sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
agno-agi/agno:libs/agno/tests/unit/app/test_agui_app.py | from unittest.mock import MagicMock
import pytest
from ag_ui.core import EventType
from agno.os.interfaces.agui.utils import EventBuffer, async_stream_agno_response_as_agui_events
from agno.run.agent import RunContentEvent, ToolCallCompletedEvent, ToolCallStartedEvent
def test_event_buffer_initial_state():
"""Test EventBuffer initial state"""
buffer = EventBuffer()
assert len(buffer.active_tool_call_ids) == 0
assert len(buffer.ended_tool_call_ids) == 0
def test_event_buffer_tool_call_lifecycle():
"""Test complete tool call lifecycle in EventBuffer"""
buffer = EventBuffer()
# Initial state
assert len(buffer.active_tool_call_ids) == 0
# Start tool call
buffer.start_tool_call("tool_1")
assert "tool_1" in buffer.active_tool_call_ids
# End tool call
buffer.end_tool_call("tool_1")
assert "tool_1" in buffer.ended_tool_call_ids
assert "tool_1" not in buffer.active_tool_call_ids
def test_event_buffer_multiple_tool_calls():
"""Test multiple concurrent tool calls"""
buffer = EventBuffer()
# Start first tool call
buffer.start_tool_call("tool_1")
assert "tool_1" in buffer.active_tool_call_ids
# Start second tool call
buffer.start_tool_call("tool_2")
assert len(buffer.active_tool_call_ids) == 2
assert "tool_1" in buffer.active_tool_call_ids
assert "tool_2" in buffer.active_tool_call_ids
# End first tool call
buffer.end_tool_call("tool_2")
assert "tool_2" in buffer.ended_tool_call_ids
assert "tool_2" not in buffer.active_tool_call_ids
assert "tool_1" in buffer.active_tool_call_ids # Still active
# End second tool call
buffer.end_tool_call("tool_1")
assert "tool_1" in buffer.ended_tool_call_ids
assert "tool_1" not in buffer.active_tool_call_ids
assert len(buffer.active_tool_call_ids) == 0
def test_event_buffer_end_nonexistent_tool_call():
"""Test ending a tool call that was never started"""
buffer = EventBuffer()
# End tool call that was never started
buffer.end_tool_call("nonexistent_tool")
assert "nonexistent_tool" in buffer.ended_tool_call_ids
def test_event_buffer_duplicate_start_tool_call():
"""Test starting the same tool call multiple times"""
buffer = EventBuffer()
# Start same tool call twice
buffer.start_tool_call("tool_1")
buffer.start_tool_call("tool_1") # Should not cause issues
assert len(buffer.active_tool_call_ids) == 1 # Should still be 1
assert "tool_1" in buffer.active_tool_call_ids
def test_event_buffer_duplicate_end_tool_call():
"""Test ending the same tool call multiple times"""
buffer = EventBuffer()
buffer.start_tool_call("tool_1")
# End same tool call twice
buffer.end_tool_call("tool_1")
buffer.end_tool_call("tool_1") # Second end should be no-op
assert "tool_1" in buffer.ended_tool_call_ids
assert "tool_1" not in buffer.active_tool_call_ids
def test_event_buffer_complex_sequence():
"""Test complex sequence of tool call operations"""
buffer = EventBuffer()
# Start multiple tool calls
buffer.start_tool_call("tool_1")
buffer.start_tool_call("tool_2")
buffer.start_tool_call("tool_3")
assert len(buffer.active_tool_call_ids) == 3
# End middle tool call
buffer.end_tool_call("tool_2")
assert "tool_2" in buffer.ended_tool_call_ids
assert len(buffer.active_tool_call_ids) == 2
# End first tool call
buffer.end_tool_call("tool_1")
assert "tool_1" in buffer.ended_tool_call_ids
# End remaining tool call
buffer.end_tool_call("tool_3")
assert "tool_3" in buffer.ended_tool_call_ids
# Check final state
assert len(buffer.active_tool_call_ids) == 0
assert len(buffer.ended_tool_call_ids) == 3
def test_event_buffer_edge_cases():
"""Test edge cases in tool call handling"""
buffer = EventBuffer()
# Test that empty string tool_call_id is handled gracefully
buffer.start_tool_call("") # Empty string
assert "" in buffer.active_tool_call_ids
# End with empty string
buffer.end_tool_call("")
assert "" in buffer.ended_tool_call_ids
assert "" not in buffer.active_tool_call_ids
@pytest.mark.asyncio
async def test_stream_basic():
"""Test the async_stream_agno_response_as_agui_events function emits all expected events in a basic case."""
from agno.run.agent import RunEvent
async def mock_stream():
text_response = RunContentEvent()
text_response.event = RunEvent.run_content
text_response.content = "Hello world"
yield text_response
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(mock_stream(), "thread_1", "run_1"):
events.append(event)
assert len(events) == 4
assert events[0].type == EventType.TEXT_MESSAGE_START
assert events[1].type == EventType.TEXT_MESSAGE_CONTENT
assert events[1].delta == "Hello world"
assert events[2].type == EventType.TEXT_MESSAGE_END
assert events[3].type == EventType.RUN_FINISHED
@pytest.mark.asyncio
async def test_stream_with_tool_call_blocking():
"""Test that events are properly buffered during tool calls"""
from agno.run.agent import RunEvent
async def mock_stream_with_tool_calls():
# Start with a text response
text_response = RunContentEvent()
text_response.event = RunEvent.run_content
text_response.content = "I'll help you"
yield text_response
# Start a tool call
tool_start_response = ToolCallStartedEvent()
tool_start_response.event = RunEvent.tool_call_started
tool_start_response.content = ""
tool_call = MagicMock()
tool_call.tool_call_id = "tool_1"
tool_call.tool_name = "search"
tool_call.tool_args = {"query": "test"}
tool_start_response.tool = tool_call
yield tool_start_response
buffered_text_response = RunContentEvent()
buffered_text_response.event = RunEvent.run_content
buffered_text_response.content = "Searching..."
yield buffered_text_response
tool_end_response = ToolCallCompletedEvent()
tool_end_response.event = RunEvent.tool_call_completed
tool_end_response.content = ""
tool_end_response.tool = tool_call
yield tool_end_response
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(mock_stream_with_tool_calls(), "thread_1", "run_1"):
events.append(event)
# Asserting all expected events are present
event_types = [event.type for event in events]
assert EventType.TEXT_MESSAGE_START in event_types
assert EventType.TEXT_MESSAGE_CONTENT in event_types
assert EventType.TOOL_CALL_START in event_types
assert EventType.TOOL_CALL_ARGS in event_types
assert EventType.TOOL_CALL_END in event_types
assert EventType.TEXT_MESSAGE_END in event_types
assert EventType.RUN_FINISHED in event_types
# Verify tool call ordering
tool_start_idx = event_types.index(EventType.TOOL_CALL_START)
tool_end_idx = event_types.index(EventType.TOOL_CALL_END)
assert tool_start_idx < tool_end_idx
@pytest.mark.asyncio
async def test_concurrent_tool_calls_no_infinite_loop():
"""Tests multiple concurrent tool calls without infinite loop"""
from agno.models.response import ToolExecution
from agno.run.agent import RunEvent
async def mock_stream_with_three_concurrent_tools():
# Initial text response
text_response = RunContentEvent()
text_response.event = RunEvent.run_content
text_response.content = "I'll check multiple stocks for you"
yield text_response
# Start 3 concurrent tool calls (this previously caused infinite loop)
tool_call_1 = ToolExecution(
tool_call_id="call_TSLA_123", tool_name="get_stock_price", tool_args={"symbol": "TSLA"}
)
tool_start_1 = ToolCallStartedEvent()
tool_start_1.tool = tool_call_1
yield tool_start_1
tool_call_2 = ToolExecution(
tool_call_id="call_AAPL_456", tool_name="get_stock_price", tool_args={"symbol": "AAPL"}
)
tool_start_2 = ToolCallStartedEvent()
tool_start_2.tool = tool_call_2
yield tool_start_2
tool_call_3 = ToolExecution(
tool_call_id="call_MSFT_789", tool_name="get_stock_price", tool_args={"symbol": "MSFT"}
)
tool_start_3 = ToolCallStartedEvent()
tool_start_3.tool = tool_call_3
yield tool_start_3
# Some buffered content during tool calls
buffered_response = RunContentEvent()
buffered_response.event = RunEvent.run_content
buffered_response.content = "Fetching stock data..."
yield buffered_response
# Complete all tool calls
tool_call_1.result = {"price": 250.50, "symbol": "TSLA"}
tool_end_1 = ToolCallCompletedEvent()
tool_end_1.tool = tool_call_1
yield tool_end_1
tool_call_2.result = {"price": 175.25, "symbol": "AAPL"}
tool_end_2 = ToolCallCompletedEvent()
tool_end_2.tool = tool_call_2
yield tool_end_2
tool_call_3.result = {"price": 320.75, "symbol": "MSFT"}
tool_end_3 = ToolCallCompletedEvent()
tool_end_3.tool = tool_call_3
yield tool_end_3
# Final content and completion
final_response = RunContentEvent()
final_response.event = RunEvent.run_content
final_response.content = "Here are the stock prices"
yield final_response
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(
mock_stream_with_three_concurrent_tools(), "thread_1", "run_1"
):
events.append(event)
# Safety valve - if we get too many events, we might have an infinite loop
if len(events) > 50:
pytest.fail("Too many events generated - possible infinite loop detected")
# Verify we got all expected events (should be around 19 events total)
assert len(events) >= 15, f"Expected at least 15 events, got {len(events)}"
event_types = [event.type for event in events]
# Check all event types are present
assert EventType.TEXT_MESSAGE_START in event_types
assert EventType.TEXT_MESSAGE_CONTENT in event_types
assert EventType.TEXT_MESSAGE_END in event_types
assert EventType.TOOL_CALL_START in event_types
assert EventType.TOOL_CALL_ARGS in event_types
assert EventType.TOOL_CALL_END in event_types
assert EventType.TOOL_CALL_RESULT in event_types
assert EventType.RUN_FINISHED in event_types
# Verify we have events for all 3 tool calls
tool_start_events = [e for e in events if e.type == EventType.TOOL_CALL_START]
tool_end_events = [e for e in events if e.type == EventType.TOOL_CALL_END]
assert len(tool_start_events) == 3, f"Expected 3 tool starts, got {len(tool_start_events)}"
assert len(tool_end_events) == 3, f"Expected 3 tool ends, got {len(tool_end_events)}"
@pytest.mark.asyncio
async def test_text_message_end_before_tool_call_start():
"""
Regression test for Issues #3554 and #4601: Missing TEXT_MESSAGE_END before TOOL_CALL_START.
Tests that TEXT_MESSAGE_END is properly emitted before TOOL_CALL_START to prevent
AG-UI protocol violations that cause errors like:
"Cannot send event type 'TOOL_CALL_START' after 'TEXT_MESSAGE_START': Send 'TEXT_MESSAGE_END' first."
"""
from agno.models.response import ToolExecution
from agno.run.agent import RunEvent
async def mock_stream_with_text_then_tool():
# Start with text content (this starts a TEXT_MESSAGE)
text_response = RunContentEvent()
text_response.event = RunEvent.run_content
text_response.content = "Let me check that for you"
yield text_response
# Immediately start a tool call (this should properly end the text message first)
tool_call = ToolExecution(
tool_call_id="call_search_123", tool_name="search_tool", tool_args={"query": "test query"}
)
tool_start = ToolCallStartedEvent()
tool_start.tool = tool_call
yield tool_start
# Complete the tool call
tool_call.result = {"results": "search results"}
tool_end = ToolCallCompletedEvent()
tool_end.tool = tool_call
yield tool_end
# More text after tool call
final_response = RunContentEvent()
final_response.event = RunEvent.run_content
final_response.content = "Based on the search results..."
yield final_response
# Complete the run
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(
mock_stream_with_text_then_tool(), "thread_1", "run_1"
):
events.append(event)
event_types = [event.type for event in events]
# Find the indices of critical events
text_start_idx = event_types.index(EventType.TEXT_MESSAGE_START)
text_content_idx = event_types.index(EventType.TEXT_MESSAGE_CONTENT)
text_end_idx = event_types.index(EventType.TEXT_MESSAGE_END)
tool_start_idx = event_types.index(EventType.TOOL_CALL_START)
# Verify proper ordering: TEXT_MESSAGE_END must come before TOOL_CALL_START
assert text_start_idx < text_content_idx, "TEXT_MESSAGE_START should come before TEXT_MESSAGE_CONTENT"
assert text_content_idx < text_end_idx, "TEXT_MESSAGE_CONTENT should come before TEXT_MESSAGE_END"
assert text_end_idx < tool_start_idx, "TEXT_MESSAGE_END should come before TOOL_CALL_START (Issue #3554/#4601 fix)"
# Ensure we have all expected event types
assert EventType.TEXT_MESSAGE_START in event_types
assert EventType.TEXT_MESSAGE_CONTENT in event_types
assert EventType.TEXT_MESSAGE_END in event_types
assert EventType.TOOL_CALL_START in event_types
assert EventType.TOOL_CALL_ARGS in event_types
assert EventType.TOOL_CALL_END in event_types
assert EventType.TOOL_CALL_RESULT in event_types
assert EventType.RUN_FINISHED in event_types
@pytest.mark.asyncio
async def test_missing_text_message_content_events():
"""
Regression test for Issue #3554: Missing TEXT_MESSAGE_CONTENT events.
Tests that TEXT_MESSAGE_CONTENT events are properly emitted when the agent
generates text content, ensuring the frontend UI shows the response content.
"""
from agno.run.agent import RunEvent
async def mock_stream_with_content():
# Text response with actual content
text_response = RunContentEvent()
text_response.event = RunEvent.run_content
text_response.content = "Hello! How can I help you today?"
yield text_response
# Another chunk of content
text_response2 = RunContentEvent()
text_response2.event = RunEvent.run_content
text_response2.content = " I'm here to assist with any questions."
yield text_response2
# Complete the run
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(mock_stream_with_content(), "thread_1", "run_1"):
events.append(event)
# Filter for TEXT_MESSAGE_CONTENT events
content_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_CONTENT]
# Should have TEXT_MESSAGE_CONTENT events (not empty like in the original issue)
assert len(content_events) > 0, "Should have TEXT_MESSAGE_CONTENT events (Issue #3554 fix)"
# Verify the content is properly captured
total_content = "".join([e.delta for e in content_events])
assert "Hello! How can I help you today?" in total_content
assert "I'm here to assist with any questions." in total_content
# Verify complete event sequence
event_types = [event.type for event in events]
assert EventType.TEXT_MESSAGE_START in event_types
assert EventType.TEXT_MESSAGE_CONTENT in event_types # This was missing in Issue #3554
assert EventType.TEXT_MESSAGE_END in event_types
assert EventType.RUN_FINISHED in event_types
@pytest.mark.asyncio
async def test_duplicate_tool_call_result_events():
"""
Regression test for Issue #3554: Duplicate ToolCallResultEvent with same results but different msg_id.
Tests that tool call results are only emitted once per tool call completion.
"""
from agno.models.response import ToolExecution
from agno.run.agent import RunEvent
async def mock_stream_with_tool_completion():
# Start a tool call
tool_call = ToolExecution(tool_call_id="call_unique_123", tool_name="test_tool", tool_args={"param": "value"})
tool_start = ToolCallStartedEvent()
tool_start.tool = tool_call
yield tool_start
# Complete the tool call with a result
tool_call.result = {"unique_result": "test_data", "id": "unique_123"}
tool_end = ToolCallCompletedEvent()
tool_end.tool = tool_call
yield tool_end
# Complete the run
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(
mock_stream_with_tool_completion(), "thread_1", "run_1"
):
events.append(event)
# Filter for TOOL_CALL_RESULT events
result_events = [e for e in events if e.type == EventType.TOOL_CALL_RESULT]
# Should have exactly one TOOL_CALL_RESULT event (not duplicates)
assert len(result_events) == 1, (
f"Expected exactly 1 TOOL_CALL_RESULT event, got {len(result_events)} (Issue #3554 fix)"
)
# Verify the result content
result_event = result_events[0]
assert result_event.tool_call_id == "call_unique_123"
assert "unique_result" in result_event.content
assert "test_data" in result_event.content
@pytest.mark.asyncio
async def test_empty_content_chunks_handling():
"""Test that empty content chunks don't create unnecessary events"""
from agno.run.agent import RunEvent
async def mock_stream_with_empty_content():
# Empty content chunk
empty_response = RunContentEvent()
empty_response.event = RunEvent.run_content
empty_response.content = ""
yield empty_response
# None content chunk
none_response = RunContentEvent()
none_response.event = RunEvent.run_content
none_response.content = None
yield none_response
# Valid content
valid_response = RunContentEvent()
valid_response.event = RunEvent.run_content
valid_response.content = "Valid content"
yield valid_response
# Complete the run
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(mock_stream_with_empty_content(), "thread_1", "run_1"):
events.append(event)
# Should only have content events for non-empty content
content_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_CONTENT]
assert len(content_events) == 1, f"Expected 1 content event for non-empty content, got {len(content_events)}"
assert content_events[0].delta == "Valid content"
@pytest.mark.asyncio
async def test_stream_ends_without_completion_event():
"""Test synthetic completion when stream ends naturally without completion event"""
from agno.run.agent import RunEvent
async def mock_stream_ending_abruptly():
# Text response
text_response = RunContentEvent()
text_response.event = RunEvent.run_content
text_response.content = "Hello"
yield text_response
# Stream ends without RunCompleted event
events = []
async for event in async_stream_agno_response_as_agui_events(mock_stream_ending_abruptly(), "thread_1", "run_1"):
events.append(event)
# Should have synthetic completion events
event_types = [event.type for event in events]
assert EventType.TEXT_MESSAGE_START in event_types
assert EventType.TEXT_MESSAGE_CONTENT in event_types
assert EventType.TEXT_MESSAGE_END in event_types, "Should have synthetic TEXT_MESSAGE_END"
assert EventType.RUN_FINISHED in event_types, "Should have synthetic RUN_FINISHED"
@pytest.mark.asyncio
async def test_reasoning_events_handling():
"""Test that reasoning events are properly converted to step events"""
from agno.run.agent import RunEvent
async def mock_stream_with_reasoning():
# Start reasoning
reasoning_start = RunContentEvent()
reasoning_start.event = RunEvent.reasoning_started
reasoning_start.content = ""
yield reasoning_start
# Some reasoning content
reasoning_content = RunContentEvent()
reasoning_content.event = RunEvent.run_content
reasoning_content.content = "Thinking about this problem..."
yield reasoning_content
# End reasoning
reasoning_end = RunContentEvent()
reasoning_end.event = RunEvent.reasoning_completed
reasoning_end.content = ""
yield reasoning_end
# Complete run
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(mock_stream_with_reasoning(), "thread_1", "run_1"):
events.append(event)
event_types = [event.type for event in events]
# Should have step events for reasoning
assert EventType.STEP_STARTED in event_types, "Should have STEP_STARTED for reasoning"
assert EventType.STEP_FINISHED in event_types, "Should have STEP_FINISHED for reasoning"
# Should have text content during reasoning
assert EventType.TEXT_MESSAGE_CONTENT in event_types
assert EventType.RUN_FINISHED in event_types
@pytest.mark.asyncio
async def test_tool_call_without_result():
"""Test tool calls that complete without results (edge case)"""
from agno.models.response import ToolExecution
from agno.run.agent import RunEvent
async def mock_stream_tool_no_result():
# Start tool call
tool_call = ToolExecution(tool_call_id="call_no_result", tool_name="void_tool", tool_args={"action": "ping"})
tool_start = ToolCallStartedEvent()
tool_start.tool = tool_call
yield tool_start
# Complete tool call without setting result (result=None)
tool_end = ToolCallCompletedEvent()
tool_end.tool = tool_call
yield tool_end
# Complete run
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(mock_stream_tool_no_result(), "thread_1", "run_1"):
events.append(event)
event_types = [event.type for event in events]
# Should have tool call events
assert EventType.TOOL_CALL_START in event_types
assert EventType.TOOL_CALL_ARGS in event_types
assert EventType.TOOL_CALL_END in event_types
# Should NOT have TOOL_CALL_RESULT since result is None
result_events = [e for e in events if e.type == EventType.TOOL_CALL_RESULT]
assert len(result_events) == 0, (
f"Expected no TOOL_CALL_RESULT events for tool with no result, got {len(result_events)}"
)
assert EventType.RUN_FINISHED in event_types
@pytest.mark.asyncio
async def test_mixed_content_and_tools_complex():
"""Complex test with interleaved content and tool calls (comprehensive scenario)"""
from agno.models.response import ToolExecution
from agno.run.agent import RunEvent
async def mock_complex_interleaved_stream():
# Initial content
initial_content = RunContentEvent()
initial_content.event = RunEvent.run_content
initial_content.content = "Starting analysis..."
yield initial_content
# Start first tool
tool_1 = ToolExecution(tool_call_id="tool_1", tool_name="analyze", tool_args={"data": "A"})
tool_start_1 = ToolCallStartedEvent()
tool_start_1.tool = tool_1
yield tool_start_1
# More content while tool 1 is running
middle_content = RunContentEvent()
middle_content.event = RunEvent.run_content
middle_content.content = "Processing data..."
yield middle_content
# Start second tool concurrently
tool_2 = ToolExecution(tool_call_id="tool_2", tool_name="verify", tool_args={"check": "B"})
tool_start_2 = ToolCallStartedEvent()
tool_start_2.tool = tool_2
yield tool_start_2
# Complete first tool
tool_1.result = "Analysis complete"
tool_end_1 = ToolCallCompletedEvent()
tool_end_1.tool = tool_1
yield tool_end_1
# More content after first tool
post_tool_content = RunContentEvent()
post_tool_content.event = RunEvent.run_content
post_tool_content.content = "First analysis done..."
yield post_tool_content
# Complete second tool
tool_2.result = "Verification passed"
tool_end_2 = ToolCallCompletedEvent()
tool_end_2.tool = tool_2
yield tool_end_2
# Final content
final_content = RunContentEvent()
final_content.event = RunEvent.run_content
final_content.content = "All tasks completed!"
yield final_content
# Complete run
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(
mock_complex_interleaved_stream(), "thread_1", "run_1"
):
events.append(event)
event_types = [event.type for event in events]
# Verify all expected event types
assert EventType.TEXT_MESSAGE_START in event_types
assert EventType.TEXT_MESSAGE_CONTENT in event_types
assert EventType.TEXT_MESSAGE_END in event_types
assert EventType.TOOL_CALL_START in event_types
assert EventType.TOOL_CALL_ARGS in event_types
assert EventType.TOOL_CALL_END in event_types
assert EventType.TOOL_CALL_RESULT in event_types
assert EventType.RUN_FINISHED in event_types
# Verify tool call counts
tool_start_events = [e for e in events if e.type == EventType.TOOL_CALL_START]
tool_end_events = [e for e in events if e.type == EventType.TOOL_CALL_END]
tool_result_events = [e for e in events if e.type == EventType.TOOL_CALL_RESULT]
assert len(tool_start_events) == 2, f"Expected 2 tool starts, got {len(tool_start_events)}"
assert len(tool_end_events) == 2, f"Expected 2 tool ends, got {len(tool_end_events)}"
assert len(tool_result_events) == 2, f"Expected 2 tool results, got {len(tool_result_events)}"
# Verify content is properly captured and sequenced
content_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_CONTENT]
total_content = "".join([e.delta for e in content_events])
assert "Starting analysis..." in total_content
assert "Processing data..." in total_content
assert "First analysis done..." in total_content
assert "All tasks completed!" in total_content
# Verify TEXT_MESSAGE_END comes before RUN_FINISHED
text_end_idx = event_types.index(EventType.TEXT_MESSAGE_END)
run_finished_idx = event_types.index(EventType.RUN_FINISHED)
assert text_end_idx < run_finished_idx, "TEXT_MESSAGE_END should come before RUN_FINISHED"
@pytest.mark.asyncio
async def test_large_scale_concurrent_tools():
"""Stress test with many concurrent tool calls to verify scalability"""
from agno.models.response import ToolExecution
from agno.run.agent import RunEvent
async def mock_stream_with_many_tools():
# Create 10 concurrent tool calls to stress test the system
tools = []
for i in range(10):
tool = ToolExecution(tool_call_id=f"call_stress_{i}", tool_name=f"stress_tool_{i}", tool_args={"index": i})
tools.append(tool)
tool_start = ToolCallStartedEvent()
tool_start.tool = tool
yield tool_start
# Complete all tools
for i, tool in enumerate(tools):
tool.result = f"Result {i}"
tool_end = ToolCallCompletedEvent()
tool_end.tool = tool
yield tool_end
# Complete run
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(mock_stream_with_many_tools(), "thread_1", "run_1"):
events.append(event)
# Safety valve for stress test
if len(events) > 100:
pytest.fail("Too many events in stress test - possible infinite loop")
# Verify expected number of events
tool_start_events = [e for e in events if e.type == EventType.TOOL_CALL_START]
tool_end_events = [e for e in events if e.type == EventType.TOOL_CALL_END]
tool_result_events = [e for e in events if e.type == EventType.TOOL_CALL_RESULT]
assert len(tool_start_events) == 10, f"Expected 10 tool starts, got {len(tool_start_events)}"
assert len(tool_end_events) == 10, f"Expected 10 tool ends, got {len(tool_end_events)}"
assert len(tool_result_events) == 10, f"Expected 10 tool results, got {len(tool_result_events)}"
# Verify all tool call IDs are unique
start_tool_ids = {e.tool_call_id for e in tool_start_events}
end_tool_ids = {e.tool_call_id for e in tool_end_events}
result_tool_ids = {e.tool_call_id for e in tool_result_events}
assert len(start_tool_ids) == 10, "All tool call IDs should be unique"
assert start_tool_ids == end_tool_ids == result_tool_ids, "Tool call IDs should match across event types"
@pytest.mark.asyncio
async def test_event_ordering_invariants():
"""Test critical event ordering invariants that must never be violated"""
from agno.models.response import ToolExecution
from agno.run.agent import RunEvent
async def mock_stream_for_ordering():
# Text content
text_response = RunContentEvent()
text_response.event = RunEvent.run_content
text_response.content = "Processing your request"
yield text_response
# Tool call
tool_call = ToolExecution(tool_call_id="order_test", tool_name="process", tool_args={})
tool_start = ToolCallStartedEvent()
tool_start.tool = tool_call
yield tool_start
tool_call.result = "Done"
tool_end = ToolCallCompletedEvent()
tool_end.tool = tool_call
yield tool_end
# Final content
final_text = RunContentEvent()
final_text.event = RunEvent.run_content
final_text.content = "Complete!"
yield final_text
# Complete run
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(mock_stream_for_ordering(), "thread_1", "run_1"):
events.append(event)
event_types = [event.type for event in events]
# Critical ordering invariants
# 1. TEXT_MESSAGE_START must come before any TEXT_MESSAGE_CONTENT
start_indices = [i for i, t in enumerate(event_types) if t == EventType.TEXT_MESSAGE_START]
content_indices = [i for i, t in enumerate(event_types) if t == EventType.TEXT_MESSAGE_CONTENT]
for start_idx in start_indices:
related_content_indices = [i for i in content_indices if i > start_idx]
if related_content_indices:
next_end_idx = next(
(i for i, t in enumerate(event_types[start_idx:], start_idx) if t == EventType.TEXT_MESSAGE_END),
len(event_types),
)
related_content_indices = [i for i in related_content_indices if i < next_end_idx]
for content_idx in related_content_indices:
assert start_idx < content_idx, (
f"TEXT_MESSAGE_START at {start_idx} should come before TEXT_MESSAGE_CONTENT at {content_idx}"
)
# 2. TOOL_CALL_START must come before TOOL_CALL_ARGS for same tool
tool_starts = [(i, e) for i, e in enumerate(events) if e.type == EventType.TOOL_CALL_START]
tool_args = [(i, e) for i, e in enumerate(events) if e.type == EventType.TOOL_CALL_ARGS]
for start_idx, start_event in tool_starts:
matching_args = [(i, e) for i, e in tool_args if e.tool_call_id == start_event.tool_call_id]
for args_idx, _ in matching_args:
assert start_idx < args_idx, (
f"TOOL_CALL_START at {start_idx} should come before TOOL_CALL_ARGS at {args_idx}"
)
# 3. RUN_FINISHED must be the last event
run_finished_idx = event_types.index(EventType.RUN_FINISHED)
assert run_finished_idx == len(event_types) - 1, "RUN_FINISHED must be the last event"
@pytest.mark.asyncio
async def test_completion_event_race_condition():
"""Test the specific race condition that was causing protocol violations"""
from agno.run.agent import RunEvent
async def mock_stream_with_race_condition():
# Multiple content chunks
for i in range(5):
content_response = RunContentEvent()
content_response.event = RunEvent.run_content
content_response.content = f"Chunk {i} "
yield content_response
# Completion event with content (potential race condition)
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = "Final content in completion event"
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(
mock_stream_with_race_condition(), "thread_1", "run_1"
):
events.append(event)
event_types = [event.type for event in events]
# Verify proper sequence: all content, then TEXT_MESSAGE_END, then RUN_FINISHED
text_end_idx = event_types.index(EventType.TEXT_MESSAGE_END)
run_finished_idx = event_types.index(EventType.RUN_FINISHED)
# All TEXT_MESSAGE_CONTENT events should come before TEXT_MESSAGE_END
content_indices = [i for i, t in enumerate(event_types) if t == EventType.TEXT_MESSAGE_CONTENT]
for content_idx in content_indices:
assert content_idx < text_end_idx, (
f"TEXT_MESSAGE_CONTENT at {content_idx} should come before TEXT_MESSAGE_END at {text_end_idx}"
)
# TEXT_MESSAGE_END should come before RUN_FINISHED
assert text_end_idx < run_finished_idx, "TEXT_MESSAGE_END should come before RUN_FINISHED"
# Verify content accumulation is correct
content_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_CONTENT]
total_content = "".join([e.delta for e in content_events])
# Should NOT include completion event content (this was the bug)
assert "Final content in completion event" not in total_content, "Completion event content should not be duplicated"
assert "Chunk 0 Chunk 1 Chunk 2 Chunk 3 Chunk 4 " == total_content, (
"Content should be properly sequenced without duplication"
)
@pytest.mark.asyncio
async def test_message_id_separation_after_tool_calls():
"""Test for text messages after tool calls should have different message_ids."""
from agno.models.response import ToolExecution
from agno.run.agent import RunEvent
async def mock_stream_with_separated_messages():
# First text message
text_response_1 = RunContentEvent()
text_response_1.event = RunEvent.run_content
text_response_1.content = "Let me search for that information."
yield text_response_1
# Tool call starts (this should end first message and create new message_id)
tool_call = ToolExecution(tool_call_id="search_123", tool_name="search_tool", tool_args={"query": "test"})
tool_start = ToolCallStartedEvent()
tool_start.tool = tool_call
yield tool_start
# Tool completes
tool_call.result = {"results": "Found information"}
tool_end = ToolCallCompletedEvent()
tool_end.tool = tool_call
yield tool_end
# Second text message (should have DIFFERENT message_id)
text_response_2 = RunContentEvent()
text_response_2.event = RunEvent.run_content
text_response_2.content = "Based on the search results, here's what I found."
yield text_response_2
# Complete run
completed_response = RunContentEvent()
completed_response.event = RunEvent.run_completed
completed_response.content = ""
yield completed_response
events = []
async for event in async_stream_agno_response_as_agui_events(
mock_stream_with_separated_messages(), "thread_1", "run_1"
):
events.append(event)
# Extract relevant events
text_start_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_START]
text_content_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_CONTENT]
text_end_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_END]
tool_call_start_events = [e for e in events if e.type == EventType.TOOL_CALL_START]
# Verify we have expected number of events
assert len(text_start_events) == 2, f"Expected 2 TEXT_MESSAGE_START events, got {len(text_start_events)}"
assert len(text_content_events) == 2, f"Expected 2 TEXT_MESSAGE_CONTENT events, got {len(text_content_events)}"
assert len(text_end_events) == 2, f"Expected 2 TEXT_MESSAGE_END events, got {len(text_end_events)}"
assert len(tool_call_start_events) == 1, f"Expected 1 TOOL_CALL_START event, got {len(tool_call_start_events)}"
# CORE ASSERTION: Different text messages should have different message_ids
first_message_id = text_start_events[0].message_id
second_message_id = text_start_events[1].message_id
assert first_message_id != second_message_id, "Different text message segments should have different message_ids"
# Verify content events match their respective start events
assert text_content_events[0].message_id == first_message_id, "First content event should match first message_id"
assert text_content_events[1].message_id == second_message_id, "Second content event should match second message_id"
# Verify end events match their respective start events
assert text_end_events[0].message_id == first_message_id, "First end event should match first message_id"
assert text_end_events[1].message_id == second_message_id, "Second end event should match second message_id"
# Verify tool call references correct parent message (the first message that was ended)
tool_call_event = tool_call_start_events[0]
assert tool_call_event.parent_message_id == first_message_id, (
"Tool call should reference the first message as parent_message_id"
)
# Verify content matches expected text
assert text_content_events[0].delta == "Let me search for that information."
assert text_content_events[1].delta == "Based on the search results, here's what I found."
@pytest.mark.asyncio
async def test_multiple_tool_calls_message_id_separation():
"""
Test that multiple tool calls properly separate messages with unique message_ids.
"""
from agno.models.response import ToolExecution
from agno.run.agent import RunEvent
async def mock_stream_with_multiple_tools():
# Initial text
initial_text = RunContentEvent()
initial_text.event = RunEvent.run_content
initial_text.content = "I'll need to use multiple tools for this."
yield initial_text
# First tool call
tool_1 = ToolExecution(tool_call_id="tool_1", tool_name="search", tool_args={"query": "A"})
tool_start_1 = ToolCallStartedEvent()
tool_start_1.tool = tool_1
yield tool_start_1
tool_1.result = "Result A"
tool_end_1 = ToolCallCompletedEvent()
tool_end_1.tool = tool_1
yield tool_end_1
# Text between tools
middle_text = RunContentEvent()
middle_text.event = RunEvent.run_content
middle_text.content = "Now let me try another approach."
yield middle_text
# Second tool call
tool_2 = ToolExecution(tool_call_id="tool_2", tool_name="calculate", tool_args={"expr": "2+2"})
tool_start_2 = ToolCallStartedEvent()
tool_start_2.tool = tool_2
yield tool_start_2
tool_2.result = "4"
tool_end_2 = ToolCallCompletedEvent()
tool_end_2.tool = tool_2
yield tool_end_2
# Final text
final_text = RunContentEvent()
final_text.event = RunEvent.run_content
final_text.content = "Based on both results, here's my conclusion."
yield final_text
# Complete
completed = RunContentEvent()
completed.event = RunEvent.run_completed
completed.content = ""
yield completed
events = []
async for event in async_stream_agno_response_as_agui_events(
mock_stream_with_multiple_tools(), "thread_1", "run_1"
):
events.append(event)
# Extract events
text_start_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_START]
text_content_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_CONTENT]
tool_call_start_events = [e for e in events if e.type == EventType.TOOL_CALL_START]
# Should have 3 text messages: initial, middle, final
assert len(text_start_events) == 3, f"Expected 3 text messages, got {len(text_start_events)}"
assert len(text_content_events) == 3, f"Expected 3 text content events, got {len(text_content_events)}"
assert len(tool_call_start_events) == 2, f"Expected 2 tool calls, got {len(tool_call_start_events)}"
# All message_ids should be different
message_ids = [event.message_id for event in text_start_events]
assert len(set(message_ids)) == 3, f"All message_ids should be unique, got: {message_ids}"
# Tool calls should reference correct parent messages
first_tool_parent = tool_call_start_events[0].parent_message_id
second_tool_parent = tool_call_start_events[1].parent_message_id
assert first_tool_parent == message_ids[0], "First tool should reference first message"
assert second_tool_parent == message_ids[1], "Second tool should reference second message"
# Content should match expected messages
expected_content = [
"I'll need to use multiple tools for this.",
"Now let me try another approach.",
"Based on both results, here's my conclusion.",
]
actual_content = [event.delta for event in text_content_events]
assert actual_content == expected_content, f"Content mismatch: {actual_content}"
@pytest.mark.asyncio
async def test_message_id_consistency_within_message():
"""
Test that all events within a single message (start, content chunks, end) use the same message_id.
"""
from agno.run.agent import RunEvent
async def mock_stream_with_chunked_content():
# Multiple content chunks for first message
chunk1 = RunContentEvent()
chunk1.event = RunEvent.run_content
chunk1.content = "This is "
yield chunk1
chunk2 = RunContentEvent()
chunk2.event = RunEvent.run_content
chunk2.content = "a long message "
yield chunk2
chunk3 = RunContentEvent()
chunk3.event = RunEvent.run_content
chunk3.content = "with multiple chunks."
yield chunk3
# Complete
completed = RunContentEvent()
completed.event = RunEvent.run_completed
completed.content = ""
yield completed
events = []
async for event in async_stream_agno_response_as_agui_events(
mock_stream_with_chunked_content(), "thread_1", "run_1"
):
events.append(event)
# Extract message-related events
text_start_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_START]
text_content_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_CONTENT]
text_end_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_END]
assert len(text_start_events) == 1, "Should have exactly one message"
assert len(text_content_events) == 3, "Should have 3 content chunks"
assert len(text_end_events) == 1, "Should have exactly one end event"
# All events should use the same message_id
message_id = text_start_events[0].message_id
for event in text_content_events:
assert event.message_id == message_id, (
f"Content event message_id {event.message_id} should match start event {message_id}"
)
assert text_end_events[0].message_id == message_id, f"End event message_id should match start event {message_id}"
# Content should be properly concatenated
total_content = "".join([e.delta for e in text_content_events])
assert total_content == "This is a long message with multiple chunks."
@pytest.mark.asyncio
async def test_message_id_regression_prevention():
"""
Regression test for message_id separation across tool call boundaries.
Ensures that text messages separated by tool calls maintain unique message_ids,
preventing improper message grouping in AG-UI frontends.
"""
from agno.models.response import ToolExecution
from agno.run.agent import RunEvent
async def mock_complex_message_tool_sequence():
# Text before tool call
text1 = RunContentEvent()
text1.event = RunEvent.run_content
text1.content = "Let me help you with that."
yield text1
# Tool call
tool = ToolExecution(tool_call_id="bug_test", tool_name="helper", tool_args={})
tool_start = ToolCallStartedEvent()
tool_start.tool = tool
yield tool_start
tool.result = "success"
tool_end = ToolCallCompletedEvent()
tool_end.tool = tool
yield tool_end
# Text after tool call - should have different message_id
text2 = RunContentEvent()
text2.event = RunEvent.run_content
text2.content = "Based on the results, here's your answer."
yield text2
# Another tool call
tool2 = ToolExecution(tool_call_id="bug_test_2", tool_name="finalizer", tool_args={})
tool_start_2 = ToolCallStartedEvent()
tool_start_2.tool = tool2
yield tool_start_2
tool2.result = "done"
tool_end_2 = ToolCallCompletedEvent()
tool_end_2.tool = tool2
yield tool_end_2
# Final text - should also have different message_id
text3 = RunContentEvent()
text3.event = RunEvent.run_content
text3.content = "All done!"
yield text3
# Complete
completed = RunContentEvent()
completed.event = RunEvent.run_completed
completed.content = ""
yield completed
events = []
async for event in async_stream_agno_response_as_agui_events(
mock_complex_message_tool_sequence(), "thread_1", "run_1"
):
events.append(event)
# Extract events
text_start_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_START]
tool_call_events = [e for e in events if e.type == EventType.TOOL_CALL_START]
# Should have 3 distinct text messages
assert len(text_start_events) == 3, f"Expected 3 text messages, got {len(text_start_events)}"
assert len(tool_call_events) == 2, f"Expected 2 tool calls, got {len(tool_call_events)}"
# Extract all message_ids
message_ids = [event.message_id for event in text_start_events]
# CRITICAL: All message_ids must be different
assert len(set(message_ids)) == 3, (
f"All text message_ids should be unique across tool call boundaries. Got message_ids: {message_ids}"
)
# Tool calls should reference correct parent messages
tool_1_parent = tool_call_events[0].parent_message_id
tool_2_parent = tool_call_events[1].parent_message_id
assert tool_1_parent == message_ids[0], (
f"First tool call should reference first message. Expected {message_ids[0]}, got {tool_1_parent}"
)
assert tool_2_parent == message_ids[1], (
f"Second tool call should reference second message. Expected {message_ids[1]}, got {tool_2_parent}"
)
# Verify no message_id is reused
all_referenced_ids = set(message_ids + [tool_1_parent, tool_2_parent])
assert len(all_referenced_ids) == 3, (
f"Should have exactly 3 unique message IDs in the conversation. Found: {sorted(all_referenced_ids)}"
)
def test_validate_agui_state_with_valid_dict():
"""Test validate_agui_state with valid dict."""
from agno.os.interfaces.agui.utils import validate_agui_state
result = validate_agui_state({"user_name": "Alice", "counter": 5}, "test_thread")
assert result == {"user_name": "Alice", "counter": 5}
def test_validate_agui_state_with_none():
"""Test validate_agui_state with None state."""
from agno.os.interfaces.agui.utils import validate_agui_state
result = validate_agui_state(None, "test_thread")
assert result is None
def test_validate_agui_state_with_invalid_type():
"""Test validate_agui_state with non-dict type returns None."""
from agno.os.interfaces.agui.utils import validate_agui_state
# String state should be rejected
result = validate_agui_state("invalid_string", "test_thread")
assert result is None
# List state should be rejected
result = validate_agui_state([1, 2, 3], "test_thread")
assert result is None
# Number state should be rejected
result = validate_agui_state(42, "test_thread")
assert result is None
def test_validate_agui_state_with_basemodel():
"""Test validate_agui_state with Pydantic BaseModel."""
from pydantic import BaseModel
from agno.os.interfaces.agui.utils import validate_agui_state
class TestModel(BaseModel):
name: str
count: int
model = TestModel(name="test", count=10)
result = validate_agui_state(model, "test_thread")
assert result == {"name": "test", "count": 10}
def test_validate_agui_state_with_dataclass():
"""Test validate_agui_state with dataclass."""
from dataclasses import dataclass
from agno.os.interfaces.agui.utils import validate_agui_state
@dataclass
class TestDataclass:
name: str
count: int
data = TestDataclass(name="test", count=10)
result = validate_agui_state(data, "test_thread")
assert result == {"name": "test", "count": 10}
def test_validate_agui_state_with_to_dict_method():
"""Test validate_agui_state with object having to_dict method."""
from agno.os.interfaces.agui.utils import validate_agui_state
class TestClass:
def __init__(self, name: str, count: int):
self.name = name
self.count = count
def to_dict(self):
return {"name": self.name, "count": self.count}
obj = TestClass(name="test", count=10)
result = validate_agui_state(obj, "test_thread")
assert result == {"name": "test", "count": 10}
def test_validate_agui_state_with_invalid_to_dict():
"""Test validate_agui_state with to_dict method returning non-dict."""
from agno.os.interfaces.agui.utils import validate_agui_state
class TestClass:
def to_dict(self):
return "not_a_dict"
obj = TestClass()
result = validate_agui_state(obj, "test_thread")
assert result is None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/app/test_agui_app.py",
"license": "Apache License 2.0",
"lines": 1074,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/bravesearch.py | import json
from os import getenv
from typing import Optional
from agno.tools import Toolkit
from agno.utils.log import log_info
try:
from brave import Brave
except ImportError:
raise ImportError("`brave-search` not installed. Please install using `pip install brave-search`")
class BraveSearchTools(Toolkit):
"""
BraveSearch is a toolkit for searching Brave easily.
Args:
api_key (str, optional): Brave API key. If not provided, will use BRAVE_API_KEY environment variable.
fixed_max_results (Optional[int]): A fixed number of maximum results.
fixed_language (Optional[str]): A fixed language for the search results.
"""
def __init__(
self,
api_key: Optional[str] = None,
fixed_max_results: Optional[int] = None,
fixed_language: Optional[str] = None,
enable_brave_search: bool = True,
all: bool = False,
**kwargs,
):
self.api_key = api_key or getenv("BRAVE_API_KEY")
if not self.api_key:
raise ValueError("BRAVE_API_KEY is required. Please set the BRAVE_API_KEY environment variable.")
self.fixed_max_results = fixed_max_results
self.fixed_language = fixed_language
self.brave_client = Brave(api_key=self.api_key)
tools = []
if all or enable_brave_search:
tools.append(self.brave_search)
super().__init__(
name="brave_search",
tools=tools,
**kwargs,
)
def brave_search(
self,
query: str,
max_results: int = 5,
country: str = "US",
search_lang: str = "en",
) -> str:
"""
Search Brave for the specified query and return the results.
Args:
query (str): The query to search for.
max_results (int, optional): The maximum number of results to return. Default is 5.
country (str, optional): The country code for search results. Default is "US".
search_lang (str, optional): The language of the search results. Default is "en".
Returns:
str: A JSON formatted string containing the search results.
"""
final_max_results = self.fixed_max_results if self.fixed_max_results is not None else max_results
final_search_lang = self.fixed_language if self.fixed_language is not None else search_lang
if not query:
return json.dumps({"error": "Please provide a query to search for"})
log_info(f"Searching Brave for: {query}")
search_params = {
"q": query,
"count": final_max_results,
"country": country,
"search_lang": final_search_lang,
"result_filter": "web",
}
search_results = self.brave_client.search(**search_params)
filtered_results = {
"web_results": [],
"query": query,
"total_results": 0,
}
if hasattr(search_results, "web") and search_results.web:
web_results = []
for result in search_results.web.results:
web_result = {
"title": result.title,
"url": str(result.url),
"description": result.description,
}
web_results.append(web_result)
filtered_results["web_results"] = web_results
filtered_results["total_results"] = len(web_results)
return json.dumps(filtered_results, indent=2)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/bravesearch.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_brave_search.py | import json
import os
from unittest.mock import MagicMock, patch
import pytest
from agno.tools.bravesearch import BraveSearchTools
@pytest.fixture
def mock_brave_client():
with patch("agno.tools.bravesearch.Brave") as mock_brave:
# Create a mock instance that will be returned when Brave() is called
mock_instance = MagicMock()
# Mock the search method to return a proper result
mock_result = MagicMock()
mock_result.web = MagicMock()
mock_result.web.results = []
mock_instance.search.return_value = mock_result
# Mock the _get method to return a proper response
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"web": {"results": []}}
mock_instance._get.return_value = mock_response
mock_brave.return_value = mock_instance
yield mock_instance
@pytest.fixture
def brave_search_tools(mock_brave_client):
os.environ["BRAVE_API_KEY"] = "test_api_key"
return BraveSearchTools()
def test_init_with_api_key():
with patch("agno.tools.bravesearch.Brave"):
tools = BraveSearchTools(api_key="test_key")
assert tools.api_key == "test_key"
assert tools.fixed_max_results is None
assert tools.fixed_language is None
def test_init_with_env_var():
os.environ["BRAVE_API_KEY"] = "env_test_key"
with patch("agno.tools.bravesearch.Brave"):
tools = BraveSearchTools()
assert tools.api_key == "env_test_key"
def test_init_without_api_key():
if "BRAVE_API_KEY" in os.environ:
del os.environ["BRAVE_API_KEY"]
with pytest.raises(ValueError, match="BRAVE_API_KEY is required"):
BraveSearchTools()
def test_init_with_fixed_params():
with patch("agno.tools.bravesearch.Brave"):
tools = BraveSearchTools(api_key="test_key", fixed_max_results=10, fixed_language="fr")
assert tools.fixed_max_results == 10
assert tools.fixed_language == "fr"
def test_toolkit_integration():
"""Test that the toolkit is properly initialized with name and tools"""
with patch("agno.tools.bravesearch.Brave"):
tools = BraveSearchTools(api_key="test_key")
assert tools.name == "brave_search"
assert len(tools.tools) == 1
assert tools.tools[0].__name__ == "brave_search"
def test_brave_search_empty_query(brave_search_tools):
result = brave_search_tools.brave_search("")
assert json.loads(result) == {"error": "Please provide a query to search for"}
def test_brave_search_none_query(brave_search_tools):
"""Test with None query"""
result = brave_search_tools.brave_search(None)
assert json.loads(result) == {"error": "Please provide a query to search for"}
def test_brave_search_whitespace_query(brave_search_tools, mock_brave_client):
"""Test with whitespace-only query - currently treated as valid query"""
# Note: Current implementation treats whitespace as valid query
# This could be a future improvement to strip/validate queries
mock_result = MagicMock()
mock_result.web.results = []
mock_brave_client.search.return_value = mock_result
result = brave_search_tools.brave_search(" ")
result_dict = json.loads(result)
# Current behavior: whitespace queries are processed as normal
assert result_dict["query"] == " "
assert result_dict["web_results"] == []
assert result_dict["total_results"] == 0
def test_brave_search_successful(brave_search_tools, mock_brave_client):
# Mock the search results
mock_web_result = MagicMock()
mock_web_result.title = "Test Title"
mock_web_result.url = "https://test.com"
mock_web_result.description = "Test Description"
mock_result = MagicMock()
mock_result.web.results = [mock_web_result]
mock_brave_client.search.return_value = mock_result
result = brave_search_tools.brave_search("test query")
result_dict = json.loads(result)
assert result_dict["query"] == "test query"
assert len(result_dict["web_results"]) == 1
assert result_dict["web_results"][0]["title"] == "Test Title"
assert result_dict["web_results"][0]["url"] == "https://test.com"
assert result_dict["web_results"][0]["description"] == "Test Description"
assert result_dict["total_results"] == 1
def test_brave_search_with_multiple_results(brave_search_tools, mock_brave_client):
"""Test search with multiple results"""
mock_results = []
for i in range(3):
mock_result = MagicMock()
mock_result.title = f"Title {i}"
mock_result.url = f"https://test{i}.com"
mock_result.description = f"Description {i}"
mock_results.append(mock_result)
mock_search_result = MagicMock()
mock_search_result.web.results = mock_results
mock_brave_client.search.return_value = mock_search_result
result = brave_search_tools.brave_search("test query")
result_dict = json.loads(result)
assert result_dict["query"] == "test query"
assert len(result_dict["web_results"]) == 3
assert result_dict["total_results"] == 3
for i in range(3):
assert result_dict["web_results"][i]["title"] == f"Title {i}"
assert result_dict["web_results"][i]["url"] == f"https://test{i}.com"
assert result_dict["web_results"][i]["description"] == f"Description {i}"
def test_brave_search_with_malformed_results(brave_search_tools, mock_brave_client):
"""Test search with results missing attributes"""
mock_web_result = MagicMock()
mock_web_result.title = None
mock_web_result.url = None
mock_web_result.description = None
mock_result = MagicMock()
mock_result.web.results = [mock_web_result]
mock_brave_client.search.return_value = mock_result
result = brave_search_tools.brave_search("test query")
result_dict = json.loads(result)
assert result_dict["query"] == "test query"
assert len(result_dict["web_results"]) == 1
assert result_dict["web_results"][0]["title"] is None
assert result_dict["web_results"][0]["url"] == "None" # str() conversion
assert result_dict["web_results"][0]["description"] is None
assert result_dict["total_results"] == 1
def test_brave_search_with_custom_params(brave_search_tools, mock_brave_client):
# Mock the search results
mock_result = MagicMock()
mock_result.web.results = []
mock_brave_client.search.return_value = mock_result
brave_search_tools.brave_search(query="test query", max_results=3, country="UK", search_lang="fr")
# Verify the search was called with correct parameters
mock_brave_client.search.assert_called_once_with(
q="test query", count=3, country="UK", search_lang="fr", result_filter="web"
)
def test_brave_search_with_default_params(brave_search_tools, mock_brave_client):
"""Test that default parameters are used when not specified"""
mock_result = MagicMock()
mock_result.web.results = []
mock_brave_client.search.return_value = mock_result
brave_search_tools.brave_search(query="test query")
# Verify the search was called with default parameters
mock_brave_client.search.assert_called_once_with(
q="test query", count=5, country="US", search_lang="en", result_filter="web"
)
def test_brave_search_with_none_params(brave_search_tools, mock_brave_client):
"""Test search with None parameters - should use defaults"""
mock_result = MagicMock()
mock_result.web.results = []
mock_brave_client.search.return_value = mock_result
# Note: max_results and search_lang now have defaults, None parameters will be overridden
brave_search_tools.brave_search(query="test query", country=None)
# Verify the search was called with default values for max_results and search_lang
mock_brave_client.search.assert_called_once_with(
q="test query", count=5, country=None, search_lang="en", result_filter="web"
)
def test_brave_search_with_fixed_params():
with patch("agno.tools.bravesearch.Brave") as mock_brave:
mock_instance = MagicMock()
# Mock the search method
mock_result = MagicMock()
mock_result.web = MagicMock()
mock_result.web.results = []
mock_instance.search.return_value = mock_result
# Mock the _get method
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"web": {"results": []}}
mock_instance._get.return_value = mock_response
mock_brave.return_value = mock_instance
tools = BraveSearchTools(api_key="test_key", fixed_max_results=5, fixed_language="fr")
result = tools.brave_search(query="test query", max_results=10, search_lang="en")
result_dict = json.loads(result)
# Verify the response structure
assert result_dict["query"] == "test query"
assert result_dict["web_results"] == []
assert result_dict["total_results"] == 0
# Verify fixed parameters override the provided ones
mock_instance.search.assert_called_once_with(
q="test query",
count=5, # Should use fixed_max_results (not the provided 10)
country="US", # Should use default value
search_lang="fr", # Should use fixed_language (not the provided "en")
result_filter="web",
)
def test_brave_search_no_web_results(brave_search_tools, mock_brave_client):
# Mock the search results with no web results
mock_result = MagicMock()
mock_result.web = None
mock_brave_client.search.return_value = mock_result
result = brave_search_tools.brave_search("test query")
result_dict = json.loads(result)
assert result_dict["query"] == "test query"
assert result_dict["web_results"] == []
assert result_dict["total_results"] == 0
def test_brave_search_web_attribute_missing(brave_search_tools, mock_brave_client):
"""Test when search results object doesn't have 'web' attribute"""
mock_result = MagicMock()
del mock_result.web # Remove the web attribute
mock_brave_client.search.return_value = mock_result
result = brave_search_tools.brave_search("test query")
result_dict = json.loads(result)
assert result_dict["query"] == "test query"
assert result_dict["web_results"] == []
assert result_dict["total_results"] == 0
def test_brave_search_empty_web_results(brave_search_tools, mock_brave_client):
"""Test when web.results is empty list"""
mock_result = MagicMock()
mock_result.web.results = []
mock_brave_client.search.return_value = mock_result
result = brave_search_tools.brave_search("test query")
result_dict = json.loads(result)
assert result_dict["query"] == "test query"
assert result_dict["web_results"] == []
assert result_dict["total_results"] == 0
def test_brave_search_exception_handling(brave_search_tools, mock_brave_client):
"""Test that exceptions from Brave client are handled gracefully"""
mock_brave_client.search.side_effect = Exception("API Error")
with pytest.raises(Exception, match="API Error"):
brave_search_tools.brave_search("test query")
@patch("agno.tools.bravesearch.log_info")
def test_brave_search_logging(mock_log_info, brave_search_tools, mock_brave_client):
"""Test that logging is called correctly"""
mock_result = MagicMock()
mock_result.web.results = []
mock_brave_client.search.return_value = mock_result
brave_search_tools.brave_search("test query")
mock_log_info.assert_called_once_with("Searching Brave for: test query")
def test_brave_search_result_filter_always_web(brave_search_tools, mock_brave_client):
"""Test that result_filter is always set to 'web'"""
mock_result = MagicMock()
mock_result.web.results = []
mock_brave_client.search.return_value = mock_result
brave_search_tools.brave_search("test query")
# Verify result_filter is always 'web'
call_args = mock_brave_client.search.call_args
assert call_args[1]["result_filter"] == "web"
def test_brave_search_url_conversion(brave_search_tools, mock_brave_client):
"""Test that URL is converted to string using str()"""
mock_web_result = MagicMock()
mock_web_result.title = "Test Title"
mock_web_result.url = 12345 # Non-string URL
mock_web_result.description = "Test Description"
mock_result = MagicMock()
mock_result.web.results = [mock_web_result]
mock_brave_client.search.return_value = mock_result
result = brave_search_tools.brave_search("test query")
result_dict = json.loads(result)
assert result_dict["web_results"][0]["url"] == "12345"
def test_json_serialization_integrity(brave_search_tools, mock_brave_client):
"""Test that the returned JSON is valid and can be parsed"""
mock_web_result = MagicMock()
mock_web_result.title = "Test Title"
mock_web_result.url = "https://test.com"
mock_web_result.description = "Test Description"
mock_result = MagicMock()
mock_result.web.results = [mock_web_result]
mock_brave_client.search.return_value = mock_result
result = brave_search_tools.brave_search("test query")
# Should not raise an exception
parsed = json.loads(result)
# Verify structure
assert "web_results" in parsed
assert "query" in parsed
assert "total_results" in parsed
# Verify it can be serialized again (round-trip test)
json.dumps(parsed)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_brave_search.py",
"license": "Apache License 2.0",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/visualization.py | import json
import os
from typing import Any, Dict, List, Optional, Union
from agno.tools import Toolkit
from agno.utils.log import log_info, logger
class VisualizationTools(Toolkit):
def __init__(
self,
output_dir: str = "charts",
enable_create_bar_chart: bool = True,
enable_create_line_chart: bool = True,
enable_create_pie_chart: bool = True,
enable_create_scatter_plot: bool = True,
enable_create_histogram: bool = True,
all: bool = False,
**kwargs,
):
"""
Initialize the VisualizationTools toolkit.
Args:
output_dir (str): Directory to save charts. Default is "charts".
"""
# Check if matplotlib is available
try:
import matplotlib
# Use non-interactive backend to avoid display issues
matplotlib.use("Agg")
except ImportError:
raise ImportError("matplotlib is not installed. Please install it using: `pip install matplotlib`")
# Create output directory if it doesn't exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.output_dir = output_dir
tools: List[Any] = []
if enable_create_bar_chart or all:
tools.append(self.create_bar_chart)
if enable_create_line_chart or all:
tools.append(self.create_line_chart)
if enable_create_pie_chart or all:
tools.append(self.create_pie_chart)
if enable_create_scatter_plot or all:
tools.append(self.create_scatter_plot)
if enable_create_histogram or all:
tools.append(self.create_histogram)
super().__init__(name="visualization_tools", tools=tools, **kwargs)
def _normalize_data_for_charts(
self, data: Union[Dict[str, Any], List[Dict[str, Any]], List[Any], str]
) -> Dict[str, Union[int, float]]:
"""
Normalize various data formats into a simple dictionary format for charts.
Args:
data: Can be a dict, list of dicts, or list of values
Returns:
Dict with string keys and numeric values
"""
if isinstance(data, dict):
# Already in the right format, just ensure values are numeric
return {str(k): float(v) if isinstance(v, (int, float)) else 0 for k, v in data.items()}
elif isinstance(data, list) and len(data) > 0:
if isinstance(data[0], dict):
# List of dictionaries - try to find key-value pairs
result = {}
for item in data:
if isinstance(item, dict):
# Look for common key patterns
keys = list(item.keys())
if len(keys) >= 2:
# Use first key as label, second as value
label_key = keys[0]
value_key = keys[1]
result[str(item[label_key])] = (
float(item[value_key]) if isinstance(item[value_key], (int, float)) else 0
)
return result
else:
# List of values - create numbered keys
return {f"Item {i + 1}": float(v) if isinstance(v, (int, float)) else 0 for i, v in enumerate(data)}
# Fallback
return {"Data": 1.0}
def create_bar_chart(
self,
data: Union[Dict[str, Union[int, float]], List[Dict[str, Any]], str],
title: str = "Bar Chart",
x_label: str = "Categories",
y_label: str = "Values",
filename: Optional[str] = None,
) -> str:
"""
Create a bar chart from the provided data.
Args:
data: Dictionary with categories as keys and values as numbers,
or list of dictionaries, or JSON string
title (str): Title of the chart
x_label (str): Label for x-axis
y_label (str): Label for y-axis
filename (Optional[str]): Custom filename for the chart image
Returns:
str: JSON string with chart information and file path
"""
try:
import matplotlib.pyplot as plt
# Handle string input (JSON)
if isinstance(data, str):
try:
data = json.loads(data)
except json.JSONDecodeError:
pass
# Normalize data format
normalized_data = self._normalize_data_for_charts(data)
# Prepare data
categories = list(normalized_data.keys())
values = list(normalized_data.values())
# Create the chart
plt.figure(figsize=(10, 6))
plt.bar(categories, values)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.xticks(rotation=45, ha="right")
plt.tight_layout()
# Save the chart
if filename is None:
filename = f"bar_chart_{len(os.listdir(self.output_dir)) + 1}.png"
file_path = os.path.join(self.output_dir, filename)
plt.savefig(file_path, dpi=300, bbox_inches="tight")
plt.close()
log_info(f"Bar chart created and saved to {file_path}")
return json.dumps(
{
"chart_type": "bar_chart",
"title": title,
"file_path": file_path,
"data_points": len(normalized_data),
"status": "success",
}
)
except Exception as e:
logger.error(f"Error creating bar chart: {str(e)}")
return json.dumps({"chart_type": "bar_chart", "error": str(e), "status": "error"})
def create_line_chart(
self,
data: Union[Dict[str, Union[int, float]], List[Dict[str, Any]], str],
title: str = "Line Chart",
x_label: str = "X-axis",
y_label: str = "Y-axis",
filename: Optional[str] = None,
) -> str:
"""
Create a line chart from the provided data.
Args:
data: Dictionary with x-values as keys and y-values as numbers,
or list of dictionaries, or JSON string
title (str): Title of the chart
x_label (str): Label for x-axis
y_label (str): Label for y-axis
filename (Optional[str]): Custom filename for the chart image
Returns:
str: JSON string with chart information and file path
"""
try:
import matplotlib.pyplot as plt
# Handle string input (JSON)
if isinstance(data, str):
try:
data = json.loads(data)
except json.JSONDecodeError:
pass
# Normalize data format
normalized_data = self._normalize_data_for_charts(data)
# Prepare data
x_values = list(normalized_data.keys())
y_values = list(normalized_data.values())
# Create the chart
plt.figure(figsize=(10, 6))
plt.plot(x_values, y_values, marker="o", linewidth=2, markersize=6)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.xticks(rotation=45, ha="right")
plt.grid(True, alpha=0.3)
plt.tight_layout()
# Save the chart
if filename is None:
filename = f"line_chart_{len(os.listdir(self.output_dir)) + 1}.png"
file_path = os.path.join(self.output_dir, filename)
plt.savefig(file_path, dpi=300, bbox_inches="tight")
plt.close()
log_info(f"Line chart created and saved to {file_path}")
return json.dumps(
{
"chart_type": "line_chart",
"title": title,
"file_path": file_path,
"data_points": len(normalized_data),
"status": "success",
}
)
except Exception as e:
logger.error(f"Error creating line chart: {str(e)}")
return json.dumps({"chart_type": "line_chart", "error": str(e), "status": "error"})
def create_pie_chart(
self,
data: Union[Dict[str, Union[int, float]], List[Dict[str, Any]], str],
title: str = "Pie Chart",
filename: Optional[str] = None,
) -> str:
"""
Create a pie chart from the provided data.
Args:
data: Dictionary with categories as keys and values as numbers,
or list of dictionaries, or JSON string
title (str): Title of the chart
filename (Optional[str]): Custom filename for the chart image
Returns:
str: JSON string with chart information and file path
"""
try:
import matplotlib.pyplot as plt
# Handle string input (JSON)
if isinstance(data, str):
try:
data = json.loads(data)
except json.JSONDecodeError:
pass
# Normalize data format
normalized_data = self._normalize_data_for_charts(data)
# Prepare data
labels = list(normalized_data.keys())
values = list(normalized_data.values())
# Create the chart
plt.figure(figsize=(10, 8))
plt.pie(values, labels=labels, autopct="%1.1f%%", startangle=90)
plt.title(title)
plt.axis("equal") # Equal aspect ratio ensures that pie is drawn as a circle
# Save the chart
if filename is None:
filename = f"pie_chart_{len(os.listdir(self.output_dir)) + 1}.png"
file_path = os.path.join(self.output_dir, filename)
plt.savefig(file_path, dpi=300, bbox_inches="tight")
plt.close()
log_info(f"Pie chart created and saved to {file_path}")
return json.dumps(
{
"chart_type": "pie_chart",
"title": title,
"file_path": file_path,
"data_points": len(normalized_data),
"status": "success",
}
)
except Exception as e:
logger.error(f"Error creating pie chart: {str(e)}")
return json.dumps({"chart_type": "pie_chart", "error": str(e), "status": "error"})
def create_scatter_plot(
self,
x_data: Optional[List[Union[int, float]]] = None,
y_data: Optional[List[Union[int, float]]] = None,
title: str = "Scatter Plot",
x_label: str = "X-axis",
y_label: str = "Y-axis",
filename: Optional[str] = None,
# Alternative parameter names that agents might use
x: Optional[List[Union[int, float]]] = None,
y: Optional[List[Union[int, float]]] = None,
data: Optional[Union[List[List[Union[int, float]]], Dict[str, List[Union[int, float]]]]] = None,
) -> str:
"""
Create a scatter plot from the provided data.
Args:
x_data: List of x-values (can also use 'x' parameter)
y_data: List of y-values (can also use 'y' parameter)
title (str): Title of the chart
x_label (str): Label for x-axis
y_label (str): Label for y-axis
filename (Optional[str]): Custom filename for the chart image
data: Alternative format - list of [x,y] pairs or dict with 'x' and 'y' keys
Returns:
str: JSON string with chart information and file path
"""
try:
import matplotlib.pyplot as plt
# Handle different parameter formats
if x_data is None:
x_data = x
if y_data is None:
y_data = y
# Handle data parameter
if data is not None:
if isinstance(data, dict):
if "x" in data and "y" in data:
x_data = data["x"]
y_data = data["y"]
elif isinstance(data, list) and len(data) > 0:
if isinstance(data[0], list) and len(data[0]) == 2:
# List of [x,y] pairs
x_data = [point[0] for point in data]
y_data = [point[1] for point in data]
# Validate that we have data
if x_data is None or y_data is None:
raise ValueError("Missing x_data and y_data parameters")
if len(x_data) != len(y_data):
raise ValueError("x_data and y_data must have the same length")
# Create the chart
plt.figure(figsize=(10, 6))
plt.scatter(x_data, y_data, alpha=0.7, s=50)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.grid(True, alpha=0.3)
plt.tight_layout()
# Save the chart
if filename is None:
filename = f"scatter_plot_{len(os.listdir(self.output_dir)) + 1}.png"
file_path = os.path.join(self.output_dir, filename)
plt.savefig(file_path, dpi=300, bbox_inches="tight")
plt.close()
log_info(f"Scatter plot created and saved to {file_path}")
return json.dumps(
{
"chart_type": "scatter_plot",
"title": title,
"file_path": file_path,
"data_points": len(x_data),
"status": "success",
}
)
except Exception as e:
logger.error(f"Error creating scatter plot: {str(e)}")
return json.dumps({"chart_type": "scatter_plot", "error": str(e), "status": "error"})
def create_histogram(
self,
data: List[Union[int, float]],
bins: int = 10,
title: str = "Histogram",
x_label: str = "Values",
y_label: str = "Frequency",
filename: Optional[str] = None,
) -> str:
"""
Create a histogram from the provided data.
Args:
data: List of numeric values to plot
bins (int): Number of bins for the histogram
title (str): Title of the chart
x_label (str): Label for x-axis
y_label (str): Label for y-axis
filename (Optional[str]): Custom filename for the chart image
Returns:
str: JSON string with chart information and file path
"""
try:
import matplotlib.pyplot as plt
# Validate data
if not isinstance(data, list) or len(data) == 0:
raise ValueError("Data must be a non-empty list of numbers")
# Convert to numeric values
numeric_data = []
for value in data:
try:
numeric_data.append(float(value))
except (ValueError, TypeError):
continue
if len(numeric_data) == 0:
raise ValueError("No valid numeric data found")
# Create the chart
plt.figure(figsize=(10, 6))
plt.hist(numeric_data, bins=bins, alpha=0.7, edgecolor="black")
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.grid(True, alpha=0.3)
plt.tight_layout()
# Save the chart
if filename is None:
filename = f"histogram_{len(os.listdir(self.output_dir)) + 1}.png"
file_path = os.path.join(self.output_dir, filename)
plt.savefig(file_path, dpi=300, bbox_inches="tight")
plt.close()
log_info(f"Histogram created and saved to {file_path}")
return json.dumps(
{
"chart_type": "histogram",
"title": title,
"file_path": file_path,
"data_points": len(numeric_data),
"bins": bins,
"status": "success",
}
)
except Exception as e:
logger.error(f"Error creating histogram: {str(e)}")
return json.dumps({"chart_type": "histogram", "error": str(e), "status": "error"})
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/visualization.py",
"license": "Apache License 2.0",
"lines": 392,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_visualization.py | """Unit tests for VisualizationTools class."""
import json
import os
import tempfile
from unittest.mock import patch
import pytest
from agno.tools.visualization import VisualizationTools
@pytest.fixture
def temp_output_dir():
"""Create a temporary directory for test outputs."""
with tempfile.TemporaryDirectory() as temp_dir:
yield temp_dir
@pytest.fixture
def viz_tools(temp_output_dir):
"""Create a VisualizationTools instance with all chart types enabled."""
return VisualizationTools(output_dir=temp_output_dir)
@pytest.fixture
def basic_viz_tools(temp_output_dir):
"""Create a VisualizationTools instance with only basic chart types."""
return VisualizationTools(output_dir=temp_output_dir)
def test_initialization_with_selective_charts(temp_output_dir):
"""Test initialization with only selected chart types."""
tools = VisualizationTools(
output_dir=temp_output_dir,
enable_create_bar_chart=True,
enable_create_line_chart=True,
enable_create_pie_chart=False,
enable_create_scatter_plot=False,
enable_create_histogram=True,
)
function_names = [func.name for func in tools.functions.values()]
assert "create_bar_chart" in function_names
assert "create_line_chart" in function_names
assert "create_pie_chart" not in function_names
assert "create_scatter_plot" not in function_names
assert "create_histogram" in function_names
def test_initialization_with_all_charts(viz_tools):
"""Test initialization with all chart types enabled."""
function_names = [func.name for func in viz_tools.functions.values()]
assert "create_bar_chart" in function_names
assert "create_line_chart" in function_names
assert "create_pie_chart" in function_names
assert "create_scatter_plot" in function_names
assert "create_histogram" in function_names
def test_output_directory_creation(temp_output_dir):
"""Test that output directory is created if it doesn't exist."""
non_existent_dir = os.path.join(temp_output_dir, "charts")
assert not os.path.exists(non_existent_dir)
VisualizationTools(output_dir=non_existent_dir)
assert os.path.exists(non_existent_dir)
@patch("matplotlib.pyplot.savefig")
@patch("matplotlib.pyplot.close")
@patch("matplotlib.pyplot.figure")
@patch("matplotlib.pyplot.bar")
@patch("matplotlib.pyplot.title")
@patch("matplotlib.pyplot.xlabel")
@patch("matplotlib.pyplot.ylabel")
@patch("matplotlib.pyplot.xticks")
@patch("matplotlib.pyplot.tight_layout")
def test_create_bar_chart_success(
mock_tight_layout,
mock_xticks,
mock_ylabel,
mock_xlabel,
mock_title,
mock_bar,
mock_figure,
mock_close,
mock_savefig,
viz_tools,
):
"""Test successful bar chart creation."""
test_data = {"A": 10, "B": 20, "C": 15}
result = viz_tools.create_bar_chart(data=test_data, title="Test Chart", x_label="Categories", y_label="Values")
result_dict = json.loads(result)
assert result_dict["status"] == "success"
assert result_dict["chart_type"] == "bar_chart"
assert result_dict["title"] == "Test Chart"
assert result_dict["data_points"] == 3
assert "file_path" in result_dict
# Verify matplotlib functions were called
mock_figure.assert_called_once_with(figsize=(10, 6))
mock_bar.assert_called_once()
mock_title.assert_called_once_with("Test Chart")
mock_xlabel.assert_called_once_with("Categories")
mock_ylabel.assert_called_once_with("Values")
mock_savefig.assert_called_once()
mock_close.assert_called_once()
@patch("matplotlib.pyplot.savefig")
@patch("matplotlib.pyplot.close")
@patch("matplotlib.pyplot.figure")
@patch("matplotlib.pyplot.bar")
@patch("matplotlib.pyplot.title")
@patch("matplotlib.pyplot.xlabel")
@patch("matplotlib.pyplot.ylabel")
@patch("matplotlib.pyplot.xticks")
@patch("matplotlib.pyplot.tight_layout")
def test_create_bar_chart_with_list_of_dicts(
mock_tight_layout,
mock_xticks,
mock_ylabel,
mock_xlabel,
mock_title,
mock_bar,
mock_figure,
mock_close,
mock_savefig,
viz_tools,
):
"""Test bar chart creation with list of dictionaries data."""
test_data = [
{"Month": "January", "Sales": 10000},
{"Month": "February", "Sales": 15000},
{"Month": "March", "Sales": 12000},
]
result = viz_tools.create_bar_chart(data=test_data, title="Sales Chart", x_label="Month", y_label="Sales")
result_dict = json.loads(result)
assert result_dict["status"] == "success"
assert result_dict["chart_type"] == "bar_chart"
assert result_dict["title"] == "Sales Chart"
assert result_dict["data_points"] == 3
assert "file_path" in result_dict
# Verify matplotlib functions were called
mock_figure.assert_called_once_with(figsize=(10, 6))
mock_bar.assert_called_once()
mock_title.assert_called_once_with("Sales Chart")
@patch("matplotlib.pyplot.savefig")
@patch("matplotlib.pyplot.close")
@patch("matplotlib.pyplot.figure")
@patch("matplotlib.pyplot.plot")
@patch("matplotlib.pyplot.title")
@patch("matplotlib.pyplot.xlabel")
@patch("matplotlib.pyplot.ylabel")
@patch("matplotlib.pyplot.xticks")
@patch("matplotlib.pyplot.grid")
@patch("matplotlib.pyplot.tight_layout")
def test_create_line_chart_success(
mock_tight_layout,
mock_grid,
mock_xticks,
mock_ylabel,
mock_xlabel,
mock_title,
mock_plot,
mock_figure,
mock_close,
mock_savefig,
viz_tools,
):
"""Test successful line chart creation."""
test_data = {"Jan": 100, "Feb": 150, "Mar": 120}
result = viz_tools.create_line_chart(data=test_data, title="Monthly Trend", x_label="Month", y_label="Sales")
result_dict = json.loads(result)
assert result_dict["status"] == "success"
assert result_dict["chart_type"] == "line_chart"
assert result_dict["title"] == "Monthly Trend"
assert result_dict["data_points"] == 3
# Verify matplotlib functions were called
mock_figure.assert_called_once_with(figsize=(10, 6))
mock_plot.assert_called_once()
mock_grid.assert_called_once_with(True, alpha=0.3)
@patch("matplotlib.pyplot.savefig")
@patch("matplotlib.pyplot.close")
@patch("matplotlib.pyplot.figure")
@patch("matplotlib.pyplot.pie")
@patch("matplotlib.pyplot.title")
@patch("matplotlib.pyplot.axis")
def test_create_pie_chart_success(
mock_axis,
mock_title,
mock_pie,
mock_figure,
mock_close,
mock_savefig,
viz_tools,
):
"""Test successful pie chart creation."""
test_data = {"Red": 30, "Blue": 25, "Green": 20, "Yellow": 25}
result = viz_tools.create_pie_chart(data=test_data, title="Color Distribution")
result_dict = json.loads(result)
assert result_dict["status"] == "success"
assert result_dict["chart_type"] == "pie_chart"
assert result_dict["title"] == "Color Distribution"
assert result_dict["data_points"] == 4
# Verify matplotlib functions were called
mock_figure.assert_called_once_with(figsize=(10, 8))
mock_pie.assert_called_once()
mock_title.assert_called_once_with("Color Distribution")
mock_axis.assert_called_once_with("equal")
@patch("matplotlib.pyplot.savefig")
@patch("matplotlib.pyplot.close")
@patch("matplotlib.pyplot.figure")
@patch("matplotlib.pyplot.scatter")
@patch("matplotlib.pyplot.title")
@patch("matplotlib.pyplot.xlabel")
@patch("matplotlib.pyplot.ylabel")
@patch("matplotlib.pyplot.grid")
@patch("matplotlib.pyplot.tight_layout")
def test_create_scatter_plot_success(
mock_tight_layout,
mock_grid,
mock_ylabel,
mock_xlabel,
mock_title,
mock_scatter,
mock_figure,
mock_close,
mock_savefig,
viz_tools,
):
"""Test successful scatter plot creation."""
x_data = [1, 2, 3, 4, 5]
y_data = [2, 4, 6, 8, 10]
result = viz_tools.create_scatter_plot(
x_data=x_data, y_data=y_data, title="Correlation Analysis", x_label="X Values", y_label="Y Values"
)
result_dict = json.loads(result)
assert result_dict["status"] == "success"
assert result_dict["chart_type"] == "scatter_plot"
assert result_dict["title"] == "Correlation Analysis"
assert result_dict["data_points"] == 5
# Verify matplotlib functions were called
mock_figure.assert_called_once_with(figsize=(10, 6))
mock_scatter.assert_called_once()
mock_grid.assert_called_once_with(True, alpha=0.3)
def test_scatter_plot_missing_data(viz_tools):
"""Test scatter plot with missing data parameters."""
result = viz_tools.create_scatter_plot(title="Missing Data Test")
result_dict = json.loads(result)
assert result_dict["status"] == "error"
assert "Missing x_data and y_data" in result_dict["error"]
def test_scatter_plot_mismatched_data_length(viz_tools):
"""Test scatter plot with mismatched data lengths."""
x_data = [1, 2, 3]
y_data = [1, 2] # Different length
result = viz_tools.create_scatter_plot(x_data=x_data, y_data=y_data)
result_dict = json.loads(result)
assert result_dict["status"] == "error"
assert "same length" in result_dict["error"]
@patch("matplotlib.pyplot.savefig")
@patch("matplotlib.pyplot.close")
@patch("matplotlib.pyplot.figure")
@patch("matplotlib.pyplot.scatter")
@patch("matplotlib.pyplot.title")
@patch("matplotlib.pyplot.xlabel")
@patch("matplotlib.pyplot.ylabel")
@patch("matplotlib.pyplot.grid")
@patch("matplotlib.pyplot.tight_layout")
def test_create_scatter_plot_with_alternative_params(
mock_tight_layout,
mock_grid,
mock_ylabel,
mock_xlabel,
mock_title,
mock_scatter,
mock_figure,
mock_close,
mock_savefig,
viz_tools,
):
"""Test scatter plot creation with alternative x,y parameters."""
x_vals = [1, 2, 3, 4, 5]
y_vals = [2, 4, 6, 8, 10]
result = viz_tools.create_scatter_plot(x=x_vals, y=y_vals, title="Alt Params Test")
result_dict = json.loads(result)
assert result_dict["status"] == "success"
assert result_dict["chart_type"] == "scatter_plot"
assert result_dict["data_points"] == 5
@patch("matplotlib.pyplot.savefig")
@patch("matplotlib.pyplot.close")
@patch("matplotlib.pyplot.figure")
@patch("matplotlib.pyplot.hist")
@patch("matplotlib.pyplot.title")
@patch("matplotlib.pyplot.xlabel")
@patch("matplotlib.pyplot.ylabel")
@patch("matplotlib.pyplot.grid")
@patch("matplotlib.pyplot.tight_layout")
def test_create_histogram_success(
mock_tight_layout,
mock_grid,
mock_ylabel,
mock_xlabel,
mock_title,
mock_hist,
mock_figure,
mock_close,
mock_savefig,
viz_tools,
):
"""Test successful histogram creation."""
test_data = [1, 2, 2, 3, 3, 3, 4, 4, 5]
result = viz_tools.create_histogram(
data=test_data, bins=5, title="Distribution Analysis", x_label="Values", y_label="Frequency"
)
result_dict = json.loads(result)
assert result_dict["status"] == "success"
assert result_dict["chart_type"] == "histogram"
assert result_dict["title"] == "Distribution Analysis"
assert result_dict["data_points"] == 9
assert result_dict["bins"] == 5
# Verify matplotlib functions were called
mock_figure.assert_called_once_with(figsize=(10, 6))
mock_hist.assert_called_once()
mock_grid.assert_called_once_with(True, alpha=0.3)
def test_histogram_with_mixed_data_types(viz_tools):
"""Test histogram creation with mixed data types."""
test_data = [1, 2.5, "3", 4, "invalid", 5.0]
result = viz_tools.create_histogram(test_data, title="Mixed Data Test")
result_dict = json.loads(result)
assert result_dict["status"] == "success"
assert result_dict["data_points"] == 5 # Should filter out invalid values
def test_histogram_with_empty_data(viz_tools):
"""Test histogram with empty data."""
result = viz_tools.create_histogram([])
result_dict = json.loads(result)
assert result_dict["status"] == "error"
assert "non-empty list" in result_dict["error"]
def test_histogram_with_no_valid_numeric_data(viz_tools):
"""Test histogram with no valid numeric data."""
result = viz_tools.create_histogram(["invalid", "data", "only"])
result_dict = json.loads(result)
assert result_dict["status"] == "error"
assert "No valid numeric data" in result_dict["error"]
def test_custom_filename(viz_tools):
"""Test chart creation with custom filename."""
test_data = {"A": 10, "B": 20}
custom_filename = "custom_chart.png"
with patch("matplotlib.pyplot.savefig"):
result = viz_tools.create_bar_chart(data=test_data, filename=custom_filename)
result_dict = json.loads(result)
assert result_dict["status"] == "success"
assert custom_filename in result_dict["file_path"]
@patch("matplotlib.pyplot.savefig", side_effect=Exception("Save failed"))
def test_error_handling(mock_savefig, viz_tools):
"""Test error handling in chart creation."""
test_data = {"A": 10, "B": 20}
result = viz_tools.create_bar_chart(data=test_data)
result_dict = json.loads(result)
assert result_dict["status"] == "error"
assert "Save failed" in result_dict["error"]
def test_matplotlib_import_error():
"""Test handling of matplotlib import error."""
with patch.dict("sys.modules", {"matplotlib": None}):
with pytest.raises(ImportError, match="matplotlib is not installed"):
VisualizationTools()
def test_basic_initialization_has_correct_functions(basic_viz_tools):
"""Test that basic initialization includes default chart types."""
function_names = [func.name for func in basic_viz_tools.functions.values()]
assert "create_bar_chart" in function_names
assert "create_line_chart" in function_names
assert "create_pie_chart" in function_names
assert "create_scatter_plot" in function_names
assert "create_histogram" in function_names
def test_normalize_data_for_charts_dict(viz_tools):
"""Test data normalization with dictionary input."""
data = {"A": 10, "B": 20.5, "C": "invalid"}
normalized = viz_tools._normalize_data_for_charts(data)
expected = {"A": 10.0, "B": 20.5, "C": 0.0}
assert normalized == expected
def test_normalize_data_for_charts_list_of_dicts(viz_tools):
"""Test data normalization with list of dictionaries."""
data = [{"month": "Jan", "sales": 1000}, {"month": "Feb", "sales": 1500}, {"month": "Mar", "sales": 1200}]
normalized = viz_tools._normalize_data_for_charts(data)
expected = {"Jan": 1000.0, "Feb": 1500.0, "Mar": 1200.0}
assert normalized == expected
def test_normalize_data_for_charts_list_of_values(viz_tools):
"""Test data normalization with list of values."""
data = [10, 20, 30]
normalized = viz_tools._normalize_data_for_charts(data)
expected = {"Item 1": 10.0, "Item 2": 20.0, "Item 3": 30.0}
assert normalized == expected
def test_create_bar_chart_with_json_string(viz_tools):
"""Test bar chart creation with JSON string data."""
data = '{"A": 10, "B": 20, "C": 15}'
with patch("matplotlib.pyplot.savefig"):
result = viz_tools.create_bar_chart(data, title="JSON Chart")
result_dict = json.loads(result)
assert result_dict["status"] == "success"
assert result_dict["chart_type"] == "bar_chart"
assert result_dict["data_points"] == 3
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_visualization.py",
"license": "Apache License 2.0",
"lines": 380,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_searxng.py | """Unit tests for Searxng class."""
import json
from unittest.mock import Mock, patch
import pytest
from agno.tools.searxng import Searxng
@pytest.fixture
def searxng_instance():
"""Create a Searxng instance."""
return Searxng(host="http://localhost:53153")
@pytest.fixture
def searxng_with_engines():
"""Create a Searxng instance with engines."""
return Searxng(host="http://localhost:53153", engines=["google", "bing"])
@pytest.fixture
def searxng_with_fixed_results():
"""Create a Searxng instance with fixed max results."""
return Searxng(host="http://localhost:53153", fixed_max_results=3)
def test_searxng_search(searxng_instance):
"""Test the search method of Searxng."""
mock_response_payload = {
"results": [
{"title": "Result 1", "url": "http://example.com/1"},
{"title": "Result 2", "url": "http://example.com/2"},
{"title": "Result 3", "url": "http://example.com/3"},
]
}
with patch("httpx.get") as mock_get:
mock_response = Mock()
mock_response.json.return_value = mock_response_payload
mock_get.return_value = mock_response
result = searxng_instance.search_web("test query", max_results=2)
# Parse the JSON result since the method returns JSON string
result_data = json.loads(result)
# Check that only 2 results are returned (respecting max_results)
assert len(result_data["results"]) == 2
assert result_data["results"][0]["title"] == "Result 1"
assert result_data["results"][1]["title"] == "Result 2"
mock_get.assert_called_once_with("http://localhost:53153/search?format=json&q=test%20query")
def test_searxng_search_with_engines(searxng_with_engines):
"""Test search with specific engines configured."""
mock_response_payload = {"results": [{"title": "Test", "url": "http://test.com"}]}
with patch("httpx.get") as mock_get:
mock_response = Mock()
mock_response.json.return_value = mock_response_payload
mock_get.return_value = mock_response
searxng_with_engines.search_web("test query")
expected_url = "http://localhost:53153/search?format=json&q=test%20query&engines=google,bing"
mock_get.assert_called_once_with(expected_url)
def test_searxng_search_with_fixed_max_results(searxng_with_fixed_results):
"""Test search with fixed max results override."""
mock_response_payload = {
"results": [{"title": f"Result {i}", "url": f"http://example.com/{i}"} for i in range(1, 6)]
}
with patch("httpx.get") as mock_get:
mock_response = Mock()
mock_response.json.return_value = mock_response_payload
mock_get.return_value = mock_response
result = searxng_with_fixed_results.search_web("test query", max_results=10)
result_data = json.loads(result)
# Should respect fixed_max_results (3) instead of max_results (10)
assert len(result_data["results"]) == 3
def test_searxng_image_search(searxng_instance):
"""Test the image_search method."""
mock_response_payload = {"results": [{"title": "Image 1", "url": "http://example.com/img1"}]}
with patch("httpx.get") as mock_get:
mock_response = Mock()
mock_response.json.return_value = mock_response_payload
mock_get.return_value = mock_response
# Need to create instance with images=True to register the tool
searxng_images = Searxng(host="http://localhost:53153")
result = searxng_images.image_search("test image")
expected_url = "http://localhost:53153/search?format=json&q=test%20image&categories=images"
mock_get.assert_called_once_with(expected_url)
result_data = json.loads(result)
assert result_data["results"][0]["title"] == "Image 1"
def test_searxng_news_search():
"""Test the news_search method."""
mock_response_payload = {"results": [{"title": "News 1", "url": "http://example.com/news1"}]}
with patch("httpx.get") as mock_get:
mock_response = Mock()
mock_response.json.return_value = mock_response_payload
mock_get.return_value = mock_response
searxng_news = Searxng(host="http://localhost:53153")
searxng_news.news_search("breaking news")
expected_url = "http://localhost:53153/search?format=json&q=breaking%20news&categories=news"
mock_get.assert_called_once_with(expected_url)
def test_searxng_search_error_handling(searxng_instance):
"""Test error handling in search method."""
with patch("httpx.get") as mock_get:
mock_get.side_effect = Exception("Network error")
result = searxng_instance.search_web("test query")
assert "Error fetching results from searxng: Network error" in result
def test_searxng_query_encoding(searxng_instance):
"""Test that queries are properly URL encoded."""
mock_response_payload = {"results": []}
with patch("httpx.get") as mock_get:
mock_response = Mock()
mock_response.json.return_value = mock_response_payload
mock_get.return_value = mock_response
searxng_instance.search_web("test query with spaces & symbols")
expected_url = "http://localhost:53153/search?format=json&q=test%20query%20with%20spaces%20%26%20symbols"
mock_get.assert_called_once_with(expected_url)
def test_searxng_initialization():
"""Test Searxng initialization with various parameters."""
searxng = Searxng(host="http://test.com", engines=["google"], fixed_max_results=10)
assert searxng.host == "http://test.com"
assert searxng.engines == ["google"]
assert searxng.fixed_max_results == 10
assert (
len(searxng.tools) == 8
) # All 8 tools: search, image_search, it_search, map_search, music_search, news_search, science_search, video_search
@pytest.mark.parametrize(
"category,method_name",
[
("it", "it_search"),
("map", "map_search"),
("music", "music_search"),
("science", "science_search"),
("videos", "video_search"),
],
)
def test_category_searches(category, method_name):
"""Test all category-specific search methods."""
mock_response_payload = {"results": [{"title": "Test", "url": "http://test.com"}]}
with patch("httpx.get") as mock_get:
mock_response = Mock()
mock_response.json.return_value = mock_response_payload
mock_get.return_value = mock_response
# Create instance with only the specific method included
searxng = Searxng(host="http://localhost:53153", include_tools=[method_name])
# Call the method
method = getattr(searxng, method_name)
result = method("test query")
expected_url = f"http://localhost:53153/search?format=json&q=test%20query&categories={category}"
mock_get.assert_called_once_with(expected_url)
result_data = json.loads(result)
assert result_data["results"][0]["title"] == "Test"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_searxng.py",
"license": "Apache License 2.0",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/utils/models/schema_utils.py | """
Utility functions for handling JSON schemas across different model providers.
This module provides model-agnostic schema transformations and validations.
"""
from typing import Any, Dict, Type
from pydantic import BaseModel
def is_dict_field(schema: Dict[str, Any]) -> bool:
"""
Check if a schema represents a Dict[str, T] field.
Args:
schema: JSON schema dictionary
Returns:
bool: True if the schema represents a Dict field
"""
return (
schema.get("type") == "object"
and "additionalProperties" in schema
and isinstance(schema["additionalProperties"], dict)
and "type" in schema["additionalProperties"]
and "properties" not in schema
)
def get_dict_value_type(schema: Dict[str, Any]) -> str:
"""
Extract the value type from a Dict field schema.
Args:
schema: JSON schema dictionary for a Dict field
Returns:
str: The type of values in the dictionary (e.g., "integer", "string")
"""
if is_dict_field(schema):
return schema["additionalProperties"]["type"]
return "string"
def normalize_schema_for_provider(schema: Dict[str, Any], provider: str) -> Dict[str, Any]:
"""
Normalize a Pydantic-generated schema for a specific model provider.
Args:
schema: Original Pydantic JSON schema
provider: Model provider name ("openai", "gemini", "anthropic", etc.)
Returns:
Dict[str, Any]: Normalized schema for the provider
"""
# Make a deep copy to avoid modifying the original
import copy
normalized = copy.deepcopy(schema)
if provider.lower() == "openai":
return _normalize_for_openai(normalized)
elif provider.lower() == "gemini":
return _normalize_for_gemini(normalized)
else:
# Default normalization for other providers
return _normalize_generic(normalized)
def _normalize_for_openai(schema: Dict[str, Any]) -> Dict[str, Any]:
"""Normalize schema for OpenAI structured outputs."""
from agno.utils.models.openai_responses import sanitize_response_schema
sanitize_response_schema(schema)
return schema
def _normalize_for_gemini(schema: Dict[str, Any]) -> Dict[str, Any]:
"""
Normalize schema for Gemini.
Gemini has specific requirements for object types and doesn't support
additionalProperties in the same way as JSON Schema.
"""
def _process_schema(s: Dict[str, Any]) -> None:
if isinstance(s, dict):
# Handle Dict fields - preserve additionalProperties info for convert_schema
if is_dict_field(s):
# For Gemini, we need to preserve the additionalProperties info
# so that convert_schema can create appropriate placeholder properties
value_type = get_dict_value_type(s)
# Update description to indicate this is a dictionary
current_desc = s.get("description", "")
if current_desc:
s["description"] = f"{current_desc} (Dictionary with {value_type} values)"
else:
s["description"] = f"Dictionary with {value_type} values"
# Keep additionalProperties for convert_schema to process
# Don't remove it here - let convert_schema handle the conversion
# Recursively process nested schemas
for value in s.values():
if isinstance(value, dict):
_process_schema(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
_process_schema(item)
_process_schema(schema)
return schema
def _normalize_generic(schema: Dict[str, Any]) -> Dict[str, Any]:
"""Generic normalization for other providers."""
def _process_schema(s: Dict[str, Any]) -> None:
if isinstance(s, dict):
# Remove null defaults
if "default" in s and s["default"] is None:
s.pop("default")
# Recursively process nested schemas
for value in s.values():
if isinstance(value, dict):
_process_schema(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
_process_schema(item)
_process_schema(schema)
return schema
def get_response_schema_for_provider(output_schema: Type[BaseModel], provider: str) -> Dict[str, Any]:
"""
Get a properly formatted response schema for a specific model provider.
Args:
output_schema: Pydantic BaseModel class
provider: Model provider name
Returns:
Dict[str, Any]: Provider-specific schema
"""
# Generate the base schema
base_schema = output_schema.model_json_schema()
# Normalize for the specific provider
return normalize_schema_for_provider(base_schema, provider)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/models/schema_utils.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/models/google/test_structured_response.py | import enum
from typing import Dict, List, Union
from pydantic import BaseModel, Field
from rich.pretty import pprint # noqa
from agno.agent import Agent, RunOutput # noqa
from agno.models.google import Gemini
class SimpleMovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
class MovieScriptWithDict(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
rating: Dict[str, int] = Field(
...,
description="Your own rating of the movie. 1 to 5. Return a dictionary with the keys 'story' and 'acting'.",
)
class Rating(BaseModel):
story: int = Field(..., description="Your own rating of the movie. 1 to 5.")
acting: int = Field(..., description="Your own rating of the movie. 1 to 5.")
class MovieScriptWithNested(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
rating: Rating = Field(
...,
description="Your own rating of the movie. 1 to 5.",
)
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
class UnionFieldResponse(BaseModel):
flexible_value: Union[str, int, bool] = Field(..., description="Value that can be string, number, or boolean")
name: str = Field(..., description="Required name field")
def test_structured_response():
structured_output_agent = Agent(
model=Gemini(id="gemini-2.0-flash"),
description="You help people write movie scripts.",
output_schema=SimpleMovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_dict_fields():
structured_output_agent = Agent(
model=Gemini(id="gemini-2.0-flash"),
description="You help people write movie scripts.",
output_schema=MovieScriptWithDict,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.rating, Dict)
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
def test_structured_response_with_nested_fields():
structured_output_agent = Agent(
model=Gemini(id="gemini-2.0-flash"),
description="You help people write movie scripts.",
output_schema=MovieScriptWithNested,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.rating, Rating)
assert isinstance(response.content.rating.story, int)
assert isinstance(response.content.rating.acting, int)
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
def test_structured_response_with_enum_fields():
structured_output_agent = Agent(
model=Gemini(id="gemini-2.0-flash"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_with_union_field_types():
"""Test structured output with Union types that exercise our union handling logic"""
structured_output_agent = Agent(
model=Gemini(id="gemini-2.0-flash"),
description="You return data with flexible union-typed fields.",
output_schema=UnionFieldResponse,
)
response = structured_output_agent.run(
"Return a response with a flexible value that could be text, number, or true/false"
)
assert response.content is not None
assert isinstance(response.content.name, str)
# The flexible_value should be one of the union types
assert isinstance(response.content.flexible_value, (str, int, bool))
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/google/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/openai/chat/test_structured_response.py | import enum
from typing import Dict, List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.openai.chat import OpenAIChat
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
rating: Dict[str, int] = Field(
...,
description="Your own rating of the movie. 1-10. Return a dictionary with the keys 'story' and 'acting'.",
)
def test_structured_response_with_dict_fields():
structured_output_agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.rating, Dict)
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=OpenAIChat(id="gpt-4o-mini", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/openai/chat/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/openai/responses/test_structured_response.py | import enum
from typing import Dict, List, Literal
import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent, RunOutput # noqa
from agno.models.openai import OpenAIResponses
from agno.tools.websearch import WebSearchTools
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
rating: Dict[str, int] = Field(
...,
description="Your own rating of the movie. 1-10. Return a dictionary with the keys 'story' and 'acting'.",
)
def test_structured_response_with_integer_field():
structured_output_agent = Agent(
model=OpenAIResponses(id="gpt-4o-mini"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.rating, Dict)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
class ResearchSummary(BaseModel):
topic: str = Field(..., description="Main topic researched")
key_findings: List[str] = Field(..., description="List of key findings from the research")
summary: str = Field(..., description="Brief summary of the research")
confidence_level: Literal["High", "Medium", "Low"] = Field(
..., description="High / Medium / Low confidence in the findings"
)
def test_tool_use_with_structured_output():
"""Test basic tool use combined with structured output (non-streaming)."""
agent = Agent(
model=OpenAIResponses(id="gpt-5-mini"),
tools=[WebSearchTools()],
output_schema=ResearchSummary,
telemetry=False,
)
response = agent.run("Research the latest trends in machine learning on the internet and provide a summary")
# Verify structured output format (this is what matters for the bug fix)
assert response.content is not None
assert isinstance(response.content, ResearchSummary)
# Check fields are populated (don't care about specific content)
assert isinstance(response.content.topic, str) and len(response.content.topic.strip()) > 0
assert isinstance(response.content.key_findings, list) and len(response.content.key_findings) > 0
assert isinstance(response.content.summary, str) and len(response.content.summary.strip()) > 0
assert response.content.confidence_level in ["High", "Medium", "Low"]
# Verify key findings have content
for finding in response.content.key_findings:
assert isinstance(finding, str) and len(finding.strip()) > 0
# Verify tool usage occurred (this validates the bug fix)
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
def test_tool_use_with_structured_output_stream():
"""Test streaming tool use combined with structured output - the main bug this PR fixes."""
agent = Agent(
model=OpenAIResponses(id="gpt-5-mini"),
tools=[WebSearchTools()],
output_schema=ResearchSummary,
telemetry=False,
)
response_stream = agent.run(
"Research the latest trends in machine learning on the internet and provide a summary",
stream=True,
stream_events=True,
)
responses = []
tool_call_seen = False
final_content = None
for event in response_stream:
responses.append(event)
# Check for tool call events
if event.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(event, "tool") and event.tool: # type: ignore
if event.tool.tool_name: # type: ignore
tool_call_seen = True
# Capture final structured content
if hasattr(event, "content") and event.content is not None and isinstance(event.content, ResearchSummary):
final_content = event.content
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
# Verify final structured output (the core of this bug fix test)
assert final_content is not None
assert isinstance(final_content, ResearchSummary)
# Check structured fields are populated correctly
assert isinstance(final_content.topic, str) and len(final_content.topic.strip()) > 0
assert isinstance(final_content.key_findings, list) and len(final_content.key_findings) > 0
assert isinstance(final_content.summary, str) and len(final_content.summary.strip()) > 0
assert final_content.confidence_level in ["High", "Medium", "Low"]
# Verify key findings have content
for finding in final_content.key_findings:
assert isinstance(finding, str) and len(finding.strip()) > 0
@pytest.mark.asyncio
async def test_async_tool_use_with_structured_output_stream():
"""Test async streaming tool use combined with structured output."""
async def get_research_data(topic: str) -> str:
"""Get research data for a given topic."""
return f"Research findings on {topic}: This topic has multiple aspects including technical implementations, best practices, current trends, and future prospects in the field."
agent = Agent(
model=OpenAIResponses(id="gpt-5-mini"),
tools=[get_research_data],
output_schema=ResearchSummary,
telemetry=False,
)
responses = []
tool_call_seen = False
final_content = None
async for event in agent.arun(
"Research web development trends using available data", stream=True, stream_events=True
):
responses.append(event)
# Check for tool call events
if event.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(event, "tool") and event.tool: # type: ignore
if event.tool.tool_name: # type: ignore
tool_call_seen = True
# Capture final structured content
if hasattr(event, "content") and event.content is not None and isinstance(event.content, ResearchSummary):
final_content = event.content
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in async stream"
# Verify final structured output (async version of the bug fix test)
assert final_content is not None
assert isinstance(final_content, ResearchSummary)
# Check structured fields are populated correctly
assert isinstance(final_content.topic, str) and len(final_content.topic.strip()) > 0
assert isinstance(final_content.key_findings, list) and len(final_content.key_findings) > 0
assert isinstance(final_content.summary, str) and len(final_content.summary.strip()) > 0
assert final_content.confidence_level in ["High", "Medium", "Low"]
# Verify key findings have content
for finding in final_content.key_findings:
assert isinstance(finding, str) and len(finding.strip()) > 0
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=OpenAIResponses(id="gpt-4o", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/openai/responses/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/utils/test_gemini_dict_support.py | """Tests for Gemini Dict field support in convert_schema"""
from agno.utils.gemini import convert_schema
def test_convert_schema_dict_field_integer():
"""Test converting Dict[str, int] field creates placeholder properties"""
dict_schema = {"type": "object", "additionalProperties": {"type": "integer"}, "description": "Rating dictionary"}
result = convert_schema(dict_schema)
assert result is not None
assert result.type == "OBJECT"
assert result.properties is not None
# Should have placeholder property
assert "example_key" in result.properties
placeholder = result.properties["example_key"]
assert placeholder.type == "INTEGER"
assert "integer values" in placeholder.description.lower()
def test_convert_schema_dict_field_string():
"""Test converting Dict[str, str] field creates string placeholder"""
dict_schema = {"type": "object", "additionalProperties": {"type": "string"}, "description": "Metadata dictionary"}
result = convert_schema(dict_schema)
assert result is not None
assert result.type == "OBJECT"
# Should have string placeholder
placeholder = result.properties["example_key"]
assert placeholder.type == "STRING"
assert "string values" in placeholder.description.lower()
def test_convert_schema_dict_field_number():
"""Test converting Dict[str, float] field creates number placeholder"""
dict_schema = {"type": "object", "additionalProperties": {"type": "number"}, "description": "Score dictionary"}
result = convert_schema(dict_schema)
assert result is not None
assert result.type == "OBJECT"
# Should have number placeholder
placeholder = result.properties["example_key"]
assert placeholder.type == "NUMBER"
assert "number values" in placeholder.description.lower()
def test_convert_schema_dict_field_with_description():
"""Test Dict field preserves original description"""
dict_schema = {
"type": "object",
"additionalProperties": {"type": "integer"},
"description": "User ratings for movies",
}
result = convert_schema(dict_schema)
assert result is not None
# Should preserve original description (enhancement happens in schema_utils normalization)
assert "User ratings for movies" in result.description
# The placeholder property should have descriptive text about the type
placeholder = result.properties["example_key"]
assert "integer values" in placeholder.description.lower()
def test_convert_schema_dict_field_without_description():
"""Test Dict field gets default description when none provided"""
dict_schema = {"type": "object", "additionalProperties": {"type": "string"}}
result = convert_schema(dict_schema)
assert result is not None
# Should have default description
assert "Dictionary with string values" in result.description
assert "key-value pairs" in result.description
def test_convert_schema_regular_object_still_works():
"""Test that regular objects with properties still work normally"""
object_schema = {
"type": "object",
"description": "A regular object",
"properties": {
"name": {"type": "string", "description": "Name field"},
"age": {"type": "integer", "description": "Age field"},
},
"required": ["name"],
}
result = convert_schema(object_schema)
assert result is not None
assert result.type == "OBJECT"
assert result.description == "A regular object"
# Should have actual properties, not placeholders
assert "name" in result.properties
assert "age" in result.properties
assert "example_key" not in result.properties
assert result.properties["name"].type == "STRING"
assert result.properties["age"].type == "INTEGER"
assert "name" in result.required
def test_convert_schema_object_with_additional_properties_false():
"""Test object with additionalProperties: false works normally"""
object_schema = {"type": "object", "properties": {"name": {"type": "string"}}, "additionalProperties": False}
result = convert_schema(object_schema)
assert result is not None
assert result.type == "OBJECT"
# Should process as regular object, not Dict
assert "name" in result.properties
assert "example_key" not in result.properties
def test_convert_schema_object_with_additional_properties_true():
"""Test object with additionalProperties: true (not a typed Dict)"""
object_schema = {"type": "object", "properties": {"name": {"type": "string"}}, "additionalProperties": True}
result = convert_schema(object_schema)
assert result is not None
assert result.type == "OBJECT"
# Should process as regular object since additionalProperties is not a schema
assert "name" in result.properties
assert "example_key" not in result.properties
def test_convert_schema_empty_object():
"""Test empty object without properties or additionalProperties"""
object_schema = {"type": "object", "description": "Empty object"}
result = convert_schema(object_schema)
assert result is not None
assert result.type == "OBJECT"
assert result.description == "Empty object"
def test_convert_schema_nested_dict_in_properties():
"""Test object with both regular properties and Dict fields"""
complex_schema = {
"type": "object",
"properties": {
"name": {"type": "string", "description": "Name field"},
"metadata": {
"type": "object",
"additionalProperties": {"type": "string"},
"description": "Dynamic metadata",
},
},
"required": ["name"],
}
result = convert_schema(complex_schema)
assert result is not None
assert result.type == "OBJECT"
# Should have regular property
assert "name" in result.properties
assert result.properties["name"].type == "STRING"
# Should have converted Dict property
assert "metadata" in result.properties
metadata_result = result.properties["metadata"]
assert metadata_result.type == "OBJECT"
# The metadata object should have placeholder properties
assert metadata_result.properties is not None
assert "example_key" in metadata_result.properties
assert metadata_result.properties["example_key"].type == "STRING"
def test_convert_schema_dict_field_case_insensitive_type():
"""Test that type conversion handles different cases properly"""
dict_schema = {"type": "object", "additionalProperties": {"type": "integer"}, "description": "Test dict"}
result = convert_schema(dict_schema)
assert result is not None
placeholder = result.properties["example_key"]
# Should convert to uppercase for Gemini
assert placeholder.type == "INTEGER"
def test_convert_schema_dict_field_union_types():
"""Test Dict field with union types from Zod schemas"""
dict_schema = {
"type": "object",
"additionalProperties": {"type": ["string", "number", "boolean"]},
"description": "Mixed value dictionary",
}
result = convert_schema(dict_schema)
assert result is not None
assert result.type == "OBJECT"
# Should use first type from union
placeholder = result.properties["example_key"]
assert placeholder.type == "STRING"
# Should document the union types in description
assert "supports union types: string, number, boolean" in placeholder.description
# Should preserve the original description when provided
assert result.description == "Mixed value dictionary"
def test_convert_schema_dict_field_union_types_with_null():
"""Test Dict field with nullable union types"""
dict_schema = {
"type": "object",
"additionalProperties": {"type": ["string", "null"]},
"description": "Nullable string dictionary",
}
result = convert_schema(dict_schema)
assert result is not None
placeholder = result.properties["example_key"]
assert placeholder.type == "STRING"
assert "supports union types: string, null" in placeholder.description
def test_convert_schema_dict_field_union_empty_list():
"""Test Dict field with empty union type list"""
dict_schema = {"type": "object", "additionalProperties": {"type": []}, "description": "Empty union dictionary"}
result = convert_schema(dict_schema)
assert result is not None
placeholder = result.properties["example_key"]
assert placeholder.type == "STRING" # Fallback to STRING
assert "supports union types:" in placeholder.description
def test_convert_schema_array_with_empty_items():
"""Test array schema with empty items definition"""
array_schema = {
"type": "array",
"items": {}, # Empty items
"description": "Array with any items",
}
result = convert_schema(array_schema)
assert result is not None
assert result.type == "ARRAY"
assert result.items is not None
assert result.items.type == "STRING" # Default for empty items
assert result.description == "Array with any items"
def test_convert_schema_top_level_nullable_type():
"""Test top-level nullable types like ['string', 'null']"""
nullable_schema = {"type": ["string", "null"], "description": "Nullable string field"}
result = convert_schema(nullable_schema)
assert result is not None
assert result.type == "STRING" # First non-null type
assert result.description == "Nullable string field"
def test_convert_schema_top_level_union_type():
"""Test top-level union types like ['string', 'number']"""
union_schema = {"type": ["string", "number", "boolean"], "description": "Multi-type field"}
result = convert_schema(union_schema)
assert result is not None
assert result.type == "STRING" # First type in union
assert result.description == "Multi-type field"
def test_convert_schema_top_level_only_null():
"""Test top-level with only null types"""
null_schema = {"type": ["null"], "description": "Only null field"}
result = convert_schema(null_schema)
# Should return None for only-null schemas
assert result is None
def test_convert_schema_dict_union_with_number_first():
"""Test Dict field where number comes first in union"""
dict_schema = {
"type": "object",
"additionalProperties": {"type": ["number", "string"]},
"description": "Number-first union dictionary",
}
result = convert_schema(dict_schema)
assert result is not None
placeholder = result.properties["example_key"]
assert placeholder.type == "NUMBER" # First type in union
assert "supports union types: number, string" in placeholder.description
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/utils/test_gemini_dict_support.py",
"license": "Apache License 2.0",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/utils/test_openai_responses.py | """Tests for openai_responses module"""
import copy
from typing import Dict, List, Optional
from pydantic import BaseModel, Field
from agno.utils.models.openai_responses import sanitize_response_schema
class SimpleModel(BaseModel):
name: str = Field(..., description="Name field")
age: int = Field(..., description="Age field")
class DictModel(BaseModel):
name: str = Field(..., description="Name field")
rating: Dict[str, int] = Field(..., description="Rating dictionary")
scores: Dict[str, float] = Field(..., description="Score dictionary")
class OptionalModel(BaseModel):
name: str = Field(..., description="Name field")
optional_field: Optional[str] = Field(None, description="Optional field")
def test_sanitize_response_schema_dict_fields_excluded_from_required():
"""Test that Dict fields are excluded from the required array"""
original_schema = DictModel.model_json_schema()
schema = copy.deepcopy(original_schema)
sanitize_response_schema(schema)
required_fields = schema.get("required", [])
# Regular field should be required
assert "name" in required_fields
# Dict fields should NOT be required
assert "rating" not in required_fields
assert "scores" not in required_fields
def test_sanitize_response_schema_preserves_dict_additional_properties():
"""Test that Dict fields preserve their additionalProperties schema"""
original_schema = DictModel.model_json_schema()
schema = copy.deepcopy(original_schema)
sanitize_response_schema(schema)
# Dict fields should preserve additionalProperties
rating_field = schema["properties"]["rating"]
assert "additionalProperties" in rating_field
assert rating_field["additionalProperties"]["type"] == "integer"
scores_field = schema["properties"]["scores"]
assert "additionalProperties" in scores_field
assert scores_field["additionalProperties"]["type"] == "number"
def test_sanitize_response_schema_sets_root_additional_properties_false():
"""Test that root level additionalProperties is set to false"""
original_schema = SimpleModel.model_json_schema()
schema = copy.deepcopy(original_schema)
sanitize_response_schema(schema)
assert schema.get("additionalProperties") is False
def test_sanitize_response_schema_regular_fields_required():
"""Test that regular fields are included in required array"""
original_schema = SimpleModel.model_json_schema()
schema = copy.deepcopy(original_schema)
sanitize_response_schema(schema)
required_fields = schema.get("required", [])
assert "name" in required_fields
assert "age" in required_fields
def test_sanitize_response_schema_removes_null_defaults():
"""Test that null defaults are removed"""
original_schema = OptionalModel.model_json_schema()
schema = copy.deepcopy(original_schema)
sanitize_response_schema(schema)
optional_field = schema["properties"]["optional_field"]
# Should not have default: null
assert "default" not in optional_field or optional_field.get("default") is not None
def test_sanitize_response_schema_nested_objects():
"""Test sanitization works with nested objects"""
class NestedModel(BaseModel):
name: str = Field(..., description="Name")
nested: Dict[str, Dict[str, int]] = Field(..., description="Nested dict")
original_schema = NestedModel.model_json_schema()
schema = copy.deepcopy(original_schema)
sanitize_response_schema(schema)
# Top level Dict should not be required
required_fields = schema.get("required", [])
assert "name" in required_fields
assert "nested" not in required_fields
# Nested additionalProperties should be preserved
nested_field = schema["properties"]["nested"]
assert "additionalProperties" in nested_field
def test_sanitize_response_schema_array_items():
"""Test sanitization works with array items"""
class ArrayModel(BaseModel):
name: str = Field(..., description="Name")
items: List[Dict[str, int]] = Field(..., description="Array of dicts")
original_schema = ArrayModel.model_json_schema()
schema = copy.deepcopy(original_schema)
sanitize_response_schema(schema)
# Regular field should be required
required_fields = schema.get("required", [])
assert "name" in required_fields
assert "items" in required_fields # List itself should be required
# Array items should preserve Dict structure
items_field = schema["properties"]["items"]
assert items_field["type"] == "array"
# The items within the array should preserve additionalProperties
array_items = items_field.get("items", {})
if "additionalProperties" in array_items:
assert array_items["additionalProperties"]["type"] == "integer"
def test_sanitize_response_schema_mixed_object_with_properties_and_additional():
"""Test object that has both properties and additionalProperties"""
# Create a schema that has both properties and additionalProperties
mixed_schema = {
"type": "object",
"properties": {
"name": {"type": "string", "description": "Fixed property"},
"metadata": {
"type": "object",
"additionalProperties": {"type": "string"},
"description": "Dynamic metadata",
},
},
"required": ["name", "metadata"],
}
schema = copy.deepcopy(mixed_schema)
sanitize_response_schema(schema)
# Regular field should be required
required_fields = schema.get("required", [])
assert "name" in required_fields
# Dict field should NOT be required
assert "metadata" not in required_fields
# Dict field should preserve additionalProperties
metadata_field = schema["properties"]["metadata"]
assert "additionalProperties" in metadata_field
assert metadata_field["additionalProperties"]["type"] == "string"
def test_sanitize_response_schema_object_without_additional_properties():
"""Test regular object without additionalProperties gets additionalProperties: false"""
regular_schema = {"type": "object", "properties": {"name": {"type": "string"}, "age": {"type": "integer"}}}
schema = copy.deepcopy(regular_schema)
sanitize_response_schema(schema)
# Should add additionalProperties: false
assert schema.get("additionalProperties") is False
# Should make all properties required
required_fields = schema.get("required", [])
assert "name" in required_fields
assert "age" in required_fields
def test_sanitize_response_schema_object_with_additional_properties_true():
"""Test object with additionalProperties: true gets converted to false"""
loose_schema = {"type": "object", "properties": {"name": {"type": "string"}}, "additionalProperties": True}
schema = copy.deepcopy(loose_schema)
sanitize_response_schema(schema)
# Should convert True to False
assert schema.get("additionalProperties") is False
def test_sanitize_response_schema_preserves_non_object_types():
"""Test that non-object types are preserved unchanged"""
string_schema = {"type": "string", "description": "A string"}
array_schema = {"type": "array", "items": {"type": "integer"}}
schema1 = copy.deepcopy(string_schema)
schema2 = copy.deepcopy(array_schema)
sanitize_response_schema(schema1)
sanitize_response_schema(schema2)
# Should be unchanged except for removed null defaults
assert schema1["type"] == "string"
assert schema2["type"] == "array"
assert schema2["items"]["type"] == "integer"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/utils/test_openai_responses.py",
"license": "Apache License 2.0",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/utils/test_schema_utils.py | """Tests for schema_utils module"""
from typing import Dict, List, Optional
from pydantic import BaseModel, Field
from agno.utils.models.schema_utils import (
_normalize_for_gemini,
_normalize_for_openai,
get_dict_value_type,
get_response_schema_for_provider,
is_dict_field,
)
class SimpleModel(BaseModel):
name: str = Field(..., description="Name field")
age: int = Field(..., description="Age field")
class DictModel(BaseModel):
name: str = Field(..., description="Name field")
rating: Dict[str, int] = Field(..., description="Rating dictionary")
scores: Dict[str, float] = Field(..., description="Score dictionary")
metadata: Dict[str, str] = Field(..., description="Metadata dictionary")
class ComplexModel(BaseModel):
name: str = Field(..., description="Name field")
rating: Dict[str, int] = Field(..., description="Rating dictionary")
tags: List[str] = Field(..., description="List of tags")
optional_field: Optional[str] = Field(None, description="Optional field")
def test_is_dict_field_positive():
"""Test is_dict_field correctly identifies Dict fields"""
dict_schema = {"type": "object", "additionalProperties": {"type": "integer"}, "description": "Rating dictionary"}
assert is_dict_field(dict_schema) is True
def test_is_dict_field_negative_regular_object():
"""Test is_dict_field correctly rejects regular objects"""
object_schema = {"type": "object", "properties": {"name": {"type": "string"}}, "required": ["name"]}
assert is_dict_field(object_schema) is False
def test_is_dict_field_negative_additional_properties_false():
"""Test is_dict_field correctly rejects objects with additionalProperties: false"""
object_schema = {"type": "object", "additionalProperties": False, "properties": {"name": {"type": "string"}}}
assert is_dict_field(object_schema) is False
def test_is_dict_field_negative_no_additional_properties():
"""Test is_dict_field correctly rejects objects without additionalProperties"""
object_schema = {"type": "object", "description": "Regular object"}
assert is_dict_field(object_schema) is False
def test_get_dict_value_type():
"""Test get_dict_value_type extracts correct value types"""
int_dict_schema = {"type": "object", "additionalProperties": {"type": "integer"}}
float_dict_schema = {"type": "object", "additionalProperties": {"type": "number"}}
string_dict_schema = {"type": "object", "additionalProperties": {"type": "string"}}
assert get_dict_value_type(int_dict_schema) == "integer"
assert get_dict_value_type(float_dict_schema) == "number"
assert get_dict_value_type(string_dict_schema) == "string"
def test_get_dict_value_type_non_dict():
"""Test get_dict_value_type returns default for non-Dict fields"""
regular_schema = {"type": "object", "properties": {"name": {"type": "string"}}}
assert get_dict_value_type(regular_schema) == "string"
def test_normalize_for_openai():
"""Test OpenAI normalization excludes Dict fields from required array"""
original_schema = DictModel.model_json_schema()
normalized = _normalize_for_openai(original_schema.copy())
# Should exclude Dict fields from required
required_fields = normalized.get("required", [])
assert "name" in required_fields # Regular field should be required
assert "rating" not in required_fields # Dict field should not be required
assert "scores" not in required_fields # Dict field should not be required
# Should preserve additionalProperties for Dict fields
rating_field = normalized["properties"]["rating"]
assert "additionalProperties" in rating_field
assert rating_field["additionalProperties"]["type"] == "integer"
# Should set additionalProperties: false at root level
assert normalized.get("additionalProperties") is False
def test_normalize_for_gemini():
"""Test Gemini normalization preserves Dict field info for conversion"""
original_schema = DictModel.model_json_schema()
normalized = _normalize_for_gemini(original_schema.copy())
# Should preserve additionalProperties for Dict fields
rating_field = normalized["properties"]["rating"]
assert "additionalProperties" in rating_field
assert rating_field["additionalProperties"]["type"] == "integer"
# Should enhance description for Dict fields
assert "Dictionary with integer values" in rating_field["description"]
def test_get_response_schema_for_provider_openai():
"""Test getting OpenAI-specific schema"""
schema = get_response_schema_for_provider(DictModel, "openai")
# Should exclude Dict fields from required
required_fields = schema.get("required", [])
assert "name" in required_fields
assert "rating" not in required_fields
assert "scores" not in required_fields
# Should preserve Dict field structure
rating_field = schema["properties"]["rating"]
assert "additionalProperties" in rating_field
assert rating_field["additionalProperties"]["type"] == "integer"
def test_get_response_schema_for_provider_gemini():
"""Test getting Gemini-specific schema"""
schema = get_response_schema_for_provider(DictModel, "gemini")
# Should preserve additionalProperties for convert_schema
rating_field = schema["properties"]["rating"]
assert "additionalProperties" in rating_field
assert rating_field["additionalProperties"]["type"] == "integer"
# Should have enhanced description
assert "Dictionary with integer values" in rating_field["description"]
def test_get_response_schema_for_provider_unknown():
"""Test getting schema for unknown provider uses generic normalization"""
schema = get_response_schema_for_provider(ComplexModel, "unknown_provider")
# Should have basic structure
assert "properties" in schema
assert "required" in schema
# Should remove null defaults
optional_field = schema["properties"]["optional_field"]
assert "default" not in optional_field or optional_field.get("default") is not None
def test_complex_model_schema_handling():
"""Test schema handling with mixed field types"""
schema = get_response_schema_for_provider(ComplexModel, "openai")
required_fields = schema.get("required", [])
# Regular fields should be required
assert "name" in required_fields
assert "tags" in required_fields
# Dict field should not be required
assert "rating" not in required_fields
# Optional field handling depends on implementation
# (could be required or not based on OpenAI's strict mode requirements)
# Dict field should preserve structure
rating_field = schema["properties"]["rating"]
assert is_dict_field(rating_field)
assert get_dict_value_type(rating_field) == "integer"
def test_simple_model_schema_handling():
"""Test schema handling with no Dict fields"""
schema = get_response_schema_for_provider(SimpleModel, "openai")
required_fields = schema.get("required", [])
# All regular fields should be required
assert "name" in required_fields
assert "age" in required_fields
# Should have additionalProperties: false
assert schema.get("additionalProperties") is False
def test_multiple_dict_types():
"""Test handling of multiple Dict field types"""
schema = get_response_schema_for_provider(DictModel, "openai")
# Check all Dict fields are properly handled
rating_field = schema["properties"]["rating"]
scores_field = schema["properties"]["scores"]
metadata_field = schema["properties"]["metadata"]
assert is_dict_field(rating_field)
assert is_dict_field(scores_field)
assert is_dict_field(metadata_field)
assert get_dict_value_type(rating_field) == "integer"
assert get_dict_value_type(scores_field) == "number"
assert get_dict_value_type(metadata_field) == "string"
# None should be in required array
required_fields = schema.get("required", [])
assert "rating" not in required_fields
assert "scores" not in required_fields
assert "metadata" not in required_fields
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/utils/test_schema_utils.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_python_tools.py | import tempfile
from pathlib import Path
from unittest.mock import patch
import pytest
from agno.tools.python import PythonTools
@pytest.fixture
def temp_dir():
with tempfile.TemporaryDirectory() as tmpdirname:
yield Path(tmpdirname)
@pytest.fixture
def python_tools(temp_dir):
return PythonTools(
base_dir=temp_dir,
include_tools=[
"save_to_file_and_run",
"run_python_code",
"pip_install_package",
"uv_pip_install_package",
"run_python_file_return_variable",
"read_file",
"list_files",
],
)
def test_save_to_file_and_run_success(python_tools, temp_dir):
# Test successful code execution
code = "x = 42"
result = python_tools.save_to_file_and_run("test.py", code, "x")
assert result == "42"
assert (temp_dir / "test.py").exists()
assert (temp_dir / "test.py").read_text() == code
def test_save_to_file_and_run_error(python_tools):
# Test code with syntax error
code = "x = " # Invalid syntax
result = python_tools.save_to_file_and_run("test.py", code)
assert "Error saving and running code" in result
def test_save_to_file_and_run_no_overwrite(python_tools, temp_dir):
# Test file overwrite prevention
file_path = temp_dir / "test.py"
file_path.write_text("original")
result = python_tools.save_to_file_and_run("test.py", "new code", overwrite=False)
assert "already exists" in result
assert file_path.read_text() == "original"
def test_run_python_file_return_variable(python_tools, temp_dir):
# Test running existing file and returning variable
file_path = temp_dir / "test.py"
file_path.write_text("x = 42")
result = python_tools.run_python_file_return_variable("test.py", "x")
assert result == "42"
def test_run_python_file_return_variable_not_found(python_tools, temp_dir):
# Test running file with non-existent variable
file_path = temp_dir / "test.py"
file_path.write_text("x = 42")
result = python_tools.run_python_file_return_variable("test.py", "y")
assert "Variable y not found" in result
def test_read_file(python_tools, temp_dir):
# Test reading file contents
file_path = temp_dir / "test.txt"
content = "Hello, World!"
file_path.write_text(content)
result = python_tools.read_file("test.txt")
assert result == content
def test_read_file_not_found(python_tools):
# Test reading non-existent file
result = python_tools.read_file("nonexistent.txt")
assert "Error reading file" in result
def test_list_files(python_tools, temp_dir):
# Test listing files in directory
(temp_dir / "file1.txt").touch()
(temp_dir / "file2.txt").touch()
result = python_tools.list_files()
assert "file1.txt" in result
assert "file2.txt" in result
def test_run_python_code(python_tools):
# Test running Python code directly
code = "x = 42"
result = python_tools.run_python_code(code, "x")
assert result == "42"
def test_run_python_code_advanced(python_tools):
# Test running Python code directly
code = """
def fibonacci(n, print_steps: bool = False):
a, b = 0, 1
for _ in range(n):
if print_steps:
print(a)
a, b = b, a + b
return a
result = fibonacci(10, print_steps=True)
"""
result = python_tools.run_python_code(code, "result")
assert result == "55"
def test_run_python_code_error(python_tools):
# Test running invalid Python code
code = "x = " # Invalid syntax
result = python_tools.run_python_code(code)
assert "Error running python code" in result
@patch("subprocess.check_call")
def test_pip_install_package(mock_check_call, python_tools):
# Test pip package installation
result = python_tools.pip_install_package("requests")
assert "successfully installed package requests" in result
mock_check_call.assert_called_once()
@patch("subprocess.check_call")
def test_pip_install_package_error(mock_check_call, python_tools):
# Test pip package installation error
mock_check_call.side_effect = Exception("Installation failed")
result = python_tools.pip_install_package("requests")
assert "Error installing package requests" in result
@patch("subprocess.check_call")
def test_uv_pip_install_package(mock_check_call, python_tools):
# Test uv pip package installation
result = python_tools.uv_pip_install_package("requests")
assert "successfully installed package requests" in result
mock_check_call.assert_called_once()
@patch("subprocess.check_call")
def test_uv_pip_install_package_error(mock_check_call, python_tools):
# Test uv pip package installation error
mock_check_call.side_effect = Exception("Installation failed")
result = python_tools.uv_pip_install_package("requests")
assert "Error installing package requests" in result
# Path traversal prevention tests
def test_check_path_blocks_parent_traversal(temp_dir):
"""Test that _check_path blocks parent directory traversal."""
python_tools = PythonTools(base_dir=temp_dir)
# Attempting to escape via ..
safe, path = python_tools._check_path("../escape.py", python_tools.base_dir, python_tools.restrict_to_base_dir)
assert not safe
# Multiple levels of escape
safe, path = python_tools._check_path("../../escape.py", python_tools.base_dir, python_tools.restrict_to_base_dir)
assert not safe
# Sneaky escape via subdir
safe, path = python_tools._check_path(
"subdir/../../escape.py", python_tools.base_dir, python_tools.restrict_to_base_dir
)
assert not safe
def test_save_to_file_blocks_path_traversal(temp_dir):
"""Test that save_to_file_and_run blocks path traversal attempts."""
python_tools = PythonTools(base_dir=temp_dir)
result = python_tools.save_to_file_and_run("../malicious.py", "x = 1")
assert "outside the allowed base directory" in result
def test_read_file_blocks_path_traversal(temp_dir):
"""Test that read_file blocks path traversal attempts."""
python_tools = PythonTools(base_dir=temp_dir)
result = python_tools.read_file("../../../etc/passwd")
assert "Error reading file" in result
def test_run_python_file_blocks_path_traversal(temp_dir):
"""Test that run_python_file_return_variable blocks path traversal attempts."""
python_tools = PythonTools(base_dir=temp_dir)
result = python_tools.run_python_file_return_variable("../malicious.py")
assert "outside the allowed base directory" in result
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_python_tools.py",
"license": "Apache License 2.0",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/utils/test_json_schema.py | from dataclasses import dataclass, field
from typing import Any, Dict, List, Literal, Optional, Union
from pydantic import BaseModel
from agno.utils.json_schema import (
get_json_schema,
get_json_schema_for_arg,
get_json_type_for_py_type,
is_origin_union_type,
)
# Test models and dataclasses
class MockPydanticModel(BaseModel):
name: str
age: int
is_active: bool = True
@dataclass
class MockDataclass:
name: str
age: int
is_active: bool = True
tags: List[str] = field(default_factory=list)
# Nested Pydantic models
class AddressModel(BaseModel):
street: str
city: str
country: str
postal_code: str
class ContactInfoModel(BaseModel):
email: str
phone: Optional[str] = None
address: AddressModel
class UserProfileModel(BaseModel):
name: str
age: int
contact_info: ContactInfoModel
preferences: Dict[str, Any] = field(default_factory=dict)
# Nested dataclasses
@dataclass
class AddressDataclass:
street: str
city: str
country: str
postal_code: str
@dataclass
class ContactInfoDataclass:
email: str
address: AddressDataclass
phone: Optional[str] = None
@dataclass
class UserProfileDataclass:
name: str
age: int
contact_info: ContactInfoDataclass
preferences: Dict[str, Any] = field(default_factory=dict)
# Test cases for get_json_type_for_py_type
def test_get_json_type_for_py_type():
assert get_json_type_for_py_type("int") == "integer"
assert get_json_type_for_py_type("float") == "number"
assert get_json_type_for_py_type("str") == "string"
assert get_json_type_for_py_type("bool") == "boolean"
assert get_json_type_for_py_type("NoneType") == "null"
assert get_json_type_for_py_type("list") == "array"
assert get_json_type_for_py_type("dict") == "object"
assert get_json_type_for_py_type("unknown") == "object"
# Test cases for is_origin_union_type
def test_is_origin_union_type():
assert is_origin_union_type(Union)
assert not is_origin_union_type(list)
assert not is_origin_union_type(dict)
# Test cases for get_json_schema_for_arg
def test_get_json_schema_for_arg_basic_types():
assert get_json_schema_for_arg(int) == {"type": "integer"}
assert get_json_schema_for_arg(str) == {"type": "string"}
assert get_json_schema_for_arg(bool) == {"type": "boolean"}
assert get_json_schema_for_arg(type(None)) == {"type": "null"}
def test_get_json_schema_for_arg_collections():
# Test list type
list_schema = get_json_schema_for_arg(List[str])
assert list_schema == {"type": "array", "items": {"type": "string"}}
# Test dict type
dict_schema = get_json_schema_for_arg(Dict[str, int])
assert dict_schema == {
"type": "object",
"propertyNames": {"type": "string"},
"additionalProperties": {"type": "integer"},
}
def test_get_json_schema_for_arg_union():
# Test Optional type (Union with None)
optional_schema = get_json_schema_for_arg(Optional[str])
assert optional_schema == {"anyOf": [{"type": "string"}, {"type": "null"}]}
# Test Union type
union_schema = get_json_schema_for_arg(Union[str, int])
assert "anyOf" in union_schema
assert len(union_schema["anyOf"]) == 2
def test_get_json_schema_for_arg_literal():
# Test string Literal type
string_literal_schema = get_json_schema_for_arg(Literal["create", "update", "delete"])
assert string_literal_schema == {"type": "string", "enum": ["create", "update", "delete"]}
# Test integer Literal type
int_literal_schema = get_json_schema_for_arg(Literal[1, 2, 3])
assert int_literal_schema == {"type": "integer", "enum": [1, 2, 3]}
# Test boolean Literal type
bool_literal_schema = get_json_schema_for_arg(Literal[True, False])
assert bool_literal_schema == {"type": "boolean", "enum": [True, False]}
# Test float Literal type
float_literal_schema = get_json_schema_for_arg(Literal[1.5, 2.5, 3.5])
assert float_literal_schema == {"type": "number", "enum": [1.5, 2.5, 3.5]}
# Test mixed int/float Literal type - should use "number" to cover both
mixed_numeric_schema = get_json_schema_for_arg(Literal[1, 2.5, 3])
assert mixed_numeric_schema == {"type": "number", "enum": [1, 2.5, 3]}
# Test single value Literal
single_literal_schema = get_json_schema_for_arg(Literal["only_option"])
assert single_literal_schema == {"type": "string", "enum": ["only_option"]}
# Test cases for get_json_schema
def test_get_json_schema_basic():
type_hints = {
"name": str,
"age": int,
"is_active": bool,
}
param_descriptions = {
"name": "User's full name",
"age": "User's age in years",
"is_active": "Whether the user is active",
}
schema = get_json_schema(type_hints, param_descriptions)
assert schema["type"] == "object"
assert "properties" in schema
assert schema["properties"]["name"]["type"] == "string"
assert schema["properties"]["name"]["description"] == "User's full name"
assert schema["properties"]["age"]["type"] == "integer"
assert schema["properties"]["is_active"]["type"] == "boolean"
def test_get_json_schema_with_pydantic_model():
type_hints = {"user": MockPydanticModel}
schema = get_json_schema(type_hints)
assert schema["type"] == "object"
assert "properties" in schema
assert "user" in schema["properties"]
user_schema = schema["properties"]["user"]
assert user_schema["type"] == "object"
assert "properties" in user_schema
print(schema)
assert user_schema["properties"]["name"]["type"] == "string"
assert user_schema["properties"]["age"]["type"] == "integer"
assert user_schema["properties"]["is_active"]["type"] == "boolean"
def test_get_json_schema_with_dataclass():
type_hints = {"user": MockDataclass}
schema = get_json_schema(type_hints)
assert schema["type"] == "object"
assert "properties" in schema
assert "user" in schema["properties"]
user_schema = schema["properties"]["user"]
assert user_schema["type"] == "object"
assert "properties" in user_schema
assert user_schema["properties"]["name"]["type"] == "string"
assert user_schema["properties"]["age"]["type"] == "integer"
assert user_schema["properties"]["is_active"]["type"] == "boolean"
assert user_schema["properties"]["tags"]["type"] == "array"
def test_get_json_schema_strict():
type_hints = {"name": str, "age": int}
schema = get_json_schema(type_hints, strict=True)
assert schema["additionalProperties"] is False
def test_get_json_schema_with_complex_types():
type_hints = {
"names": List[str],
"scores": Dict[str, float],
"optional_field": Optional[int],
}
schema = get_json_schema(type_hints)
assert schema["properties"]["names"]["type"] == "array"
assert schema["properties"]["names"]["items"]["type"] == "string"
assert schema["properties"]["scores"]["type"] == "object"
assert schema["properties"]["optional_field"]["type"] == "integer"
def test_get_json_schema_with_literal_types():
"""Test that Literal types are correctly converted to JSON schema with enum."""
type_hints = {
"operation": Literal["create", "update", "delete"],
"priority": Literal[1, 2, 3],
"enabled": Literal[True, False],
}
param_descriptions = {
"operation": "The operation to perform",
"priority": "Priority level",
"enabled": "Whether feature is enabled",
}
schema = get_json_schema(type_hints, param_descriptions)
# Check operation (string literal)
assert schema["properties"]["operation"]["type"] == "string"
assert schema["properties"]["operation"]["enum"] == ["create", "update", "delete"]
assert schema["properties"]["operation"]["description"] == "The operation to perform"
# Check priority (integer literal)
assert schema["properties"]["priority"]["type"] == "integer"
assert schema["properties"]["priority"]["enum"] == [1, 2, 3]
# Check enabled (boolean literal)
assert schema["properties"]["enabled"]["type"] == "boolean"
assert schema["properties"]["enabled"]["enum"] == [True, False]
def test_get_json_schema_optional_literal():
"""Test that Optional[Literal[...]] is correctly unwrapped and converted."""
schema = get_json_schema({"op": Optional[Literal["a", "b"]]})
# get_json_schema unwraps Optional before calling get_json_schema_for_arg
assert schema["properties"]["op"] == {"type": "string", "enum": ["a", "b"]}
# Test cases for nested structures
def test_get_json_schema_with_nested_pydantic_models():
type_hints = {"user_profile": UserProfileModel}
schema = get_json_schema(type_hints)
# Verify top-level structure
assert schema["type"] == "object"
assert "properties" in schema
assert "user_profile" in schema["properties"]
user_profile = schema["properties"]["user_profile"]
assert user_profile["type"] == "object"
assert "properties" in user_profile
# Verify nested structure
assert "contact_info" in user_profile["properties"]
contact_info = user_profile["properties"]["contact_info"]
assert contact_info["type"] == "object"
assert "properties" in contact_info
# Verify address within contact_info
assert "address" in contact_info["properties"]
address = contact_info["properties"]["address"]
assert address["type"] == "object"
assert "properties" in address
assert address["properties"]["street"]["type"] == "string"
assert address["properties"]["city"]["type"] == "string"
assert address["properties"]["country"]["type"] == "string"
assert address["properties"]["postal_code"]["type"] == "string"
# Verify optional phone field
assert "phone" in contact_info["properties"]
assert contact_info["required"] == ["email", "address"]
# Verify preferences dictionary
assert "preferences" in user_profile["properties"]
preferences = user_profile["properties"]["preferences"]
assert preferences["type"] == "object"
assert "additionalProperties" in preferences
def test_get_json_schema_with_nested_dataclasses():
type_hints = {"user_profile": UserProfileDataclass}
schema = get_json_schema(type_hints)
# Verify top-level structure
assert schema["type"] == "object"
assert "properties" in schema
assert "user_profile" in schema["properties"]
user_profile = schema["properties"]["user_profile"]
assert user_profile["type"] == "object"
assert "properties" in user_profile
# Verify nested structure
assert "contact_info" in user_profile["properties"]
contact_info = user_profile["properties"]["contact_info"]
assert contact_info["type"] == "object"
assert "properties" in contact_info
# Verify address within contact_info
assert "address" in contact_info["properties"]
address = contact_info["properties"]["address"]
assert address["type"] == "object"
assert "properties" in address
assert address["properties"]["street"]["type"] == "string"
assert address["properties"]["city"]["type"] == "string"
assert address["properties"]["country"]["type"] == "string"
assert address["properties"]["postal_code"]["type"] == "string"
# Verify optional phone field
assert "phone" in contact_info["properties"]
assert contact_info["required"] == ["email", "address"]
# Verify preferences dictionary
assert "preferences" in user_profile["properties"]
preferences = user_profile["properties"]["preferences"]
assert preferences["type"] == "object"
assert "additionalProperties" in preferences
def test_get_json_schema_with_mixed_nested_structures():
@dataclass
class MixedStructure:
pydantic_model: UserProfileModel
dataclass_model: UserProfileDataclass
type_hints = {"mixed": MixedStructure}
schema = get_json_schema(type_hints)
# Verify top-level structure
assert schema["type"] == "object"
assert "properties" in schema
assert "mixed" in schema["properties"]
mixed = schema["properties"]["mixed"]
assert mixed["type"] == "object"
assert "properties" in mixed
# Verify both nested structures are present
assert "pydantic_model" in mixed["properties"]
assert "dataclass_model" in mixed["properties"]
# Verify both structures have the same schema structure
pydantic_schema = mixed["properties"]["pydantic_model"]
dataclass_schema = mixed["properties"]["dataclass_model"]
assert pydantic_schema["type"] == "object"
assert dataclass_schema["type"] == "object"
assert "properties" in pydantic_schema
assert "properties" in dataclass_schema
# Verify both have contact_info and address structures
assert "contact_info" in pydantic_schema["properties"]
assert "contact_info" in dataclass_schema["properties"]
assert "address" in pydantic_schema["properties"]["contact_info"]["properties"]
assert "address" in dataclass_schema["properties"]["contact_info"]["properties"]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/utils/test_json_schema.py",
"license": "Apache License 2.0",
"lines": 294,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/utils/test_functions.py | import json
from typing import Dict
import pytest
from agno.tools.function import Function, FunctionCall
from agno.utils.functions import get_function_call
@pytest.fixture
def sample_functions() -> Dict[str, Function]:
return {
"test_function": Function(
name="test_function",
description="A test function",
parameters={
"type": "object",
"properties": {
"param1": {"type": "string"},
"param2": {"type": "integer"},
"param3": {"type": "boolean"},
},
},
),
"test_function_2": Function(
name="test_function_2",
description="A test function 2",
parameters={
"type": "object",
"properties": {
"code": {"type": "string"},
},
},
),
}
def test_get_function_call_basic(sample_functions):
"""Test basic function call creation with valid arguments."""
arguments = json.dumps({"param1": "test", "param2": 42, "param3": True})
call_id = "test-call-123"
result = get_function_call(
name="test_function",
arguments=arguments,
call_id=call_id,
functions=sample_functions,
)
assert result is not None
assert isinstance(result, FunctionCall)
assert result.function == sample_functions["test_function"]
assert result.call_id == call_id
assert result.arguments == {"param1": "test", "param2": 42, "param3": True}
assert result.error is None
def test_get_function_call_invalid_name(sample_functions):
"""Test function call with non-existent function name."""
result = get_function_call(
name="non_existent_function",
arguments='{"param1": "test"}',
functions=sample_functions,
)
assert result is None
def test_get_function_call_no_functions():
"""Test function call with no functions dictionary."""
result = get_function_call(
name="test_function",
arguments='{"param1": "test"}',
functions=None,
)
assert result is None
def test_get_function_call_invalid_json(sample_functions):
"""Test function call with invalid JSON arguments."""
result = get_function_call(
name="test_function",
arguments="invalid json",
functions=sample_functions,
)
assert result is not None
assert result.error is not None
assert "Error while decoding function arguments" in result.error
def test_get_function_call_non_dict_arguments(sample_functions):
"""Test function call with non-dictionary arguments."""
result = get_function_call(
name="test_function",
arguments='["not", "a", "dict"]',
functions=sample_functions,
)
assert result is not None
assert result.error is not None
assert "Function arguments are not a valid JSON object" in result.error
def test_get_function_call_argument(sample_functions):
"""Test argument sanitization for boolean and null values."""
arguments = json.dumps(
{
"param1": "None",
"param2": "True",
"param3": "False",
"param4": " test ",
}
)
result = get_function_call(
name="test_function",
arguments=arguments,
functions=sample_functions,
)
assert result is not None
assert result.arguments == {
"param1": None,
"param2": True,
"param3": False,
"param4": "test",
}
def test_get_function_call_argument_advanced(sample_functions):
"""Test function call without argument sanitization."""
arguments = '{"param1": None, "param2": True, "param3": False, "param4": "test"}'
result = get_function_call(
name="test_function",
arguments=arguments,
functions=sample_functions,
)
assert result is not None
assert result.arguments == {
"param1": None,
"param2": True,
"param3": False,
"param4": "test",
}
arguments = '{"code": "x = True; y = False; z = None;"}'
result = get_function_call(
name="test_function_2",
arguments=arguments,
functions=sample_functions,
)
assert result is not None
assert result.arguments == {
"code": "x = True; y = False; z = None;",
}
def test_get_function_call_empty_arguments(sample_functions):
"""Test function call with empty arguments."""
result = get_function_call(
name="test_function",
arguments="",
functions=sample_functions,
)
assert result is not None
assert result.arguments is None
assert result.error is None
def test_get_function_call_no_arguments(sample_functions):
"""Test function call with no arguments provided."""
result = get_function_call(
name="test_function",
arguments=None,
functions=sample_functions,
)
assert result is not None
assert result.arguments is None
assert result.error is None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/utils/test_functions.py",
"license": "Apache License 2.0",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/models/aimlapi/aimlapi.py | from dataclasses import dataclass, field
from os import getenv
from typing import Any, Dict, Optional
from agno.exceptions import ModelAuthenticationError
from agno.models.message import Message
from agno.models.openai.like import OpenAILike
@dataclass
class AIMLAPI(OpenAILike):
"""
A class for using models hosted on AIMLAPI.
Attributes:
id (str): The model id. Defaults to "gpt-4o-mini".
name (str): The model name. Defaults to "AIMLAPI".
provider (str): The provider name. Defaults to "AIMLAPI".
api_key (Optional[str]): The API key.
base_url (str): The base URL. Defaults to "https://api.aimlapi.com/v1".
max_tokens (int): The maximum number of tokens. Defaults to 4096.
"""
id: str = "gpt-4o-mini"
name: str = "AIMLAPI"
provider: str = "AIMLAPI"
api_key: Optional[str] = field(default_factory=lambda: getenv("AIMLAPI_API_KEY"))
base_url: str = "https://api.aimlapi.com/v1"
max_tokens: int = 4096
def _get_client_params(self) -> Dict[str, Any]:
"""
Returns client parameters for API requests, checking for AIMLAPI_API_KEY.
Returns:
Dict[str, Any]: A dictionary of client parameters for API requests.
"""
if not self.api_key:
self.api_key = getenv("AIMLAPI_API_KEY")
if not self.api_key:
raise ModelAuthenticationError(
message="AIMLAPI_API_KEY not set. Please set the AIMLAPI_API_KEY environment variable.",
model_name=self.name,
)
return super()._get_client_params()
def _format_message(self, message: Message) -> Dict[str, Any]:
"""
Minimal additional formatter that only replaces None with empty string.
Args:
message (Message): The message to format.
Returns:
Dict[str, Any]: The formatted message, where 'content = None' is replaced with the empty string.
"""
formatted: dict = super()._format_message(message)
formatted["content"] = "" if formatted.get("content") is None else formatted["content"]
return formatted
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/aimlapi/aimlapi.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/tests/integration/models/aimlapi/test_basic.py | import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent, RunOutput
from agno.db.sqlite import SqliteDb
from agno.models.aimlapi import AIMLAPI
def _assert_metrics(response: RunOutput):
assert response.metrics is not None
input_tokens = response.metrics.input_tokens
output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
# Assert main token fields
assert input_tokens > 0
assert output_tokens > 0
assert total_tokens > 0
assert total_tokens == input_tokens + output_tokens
# Assert time fields
assert response.metrics.time_to_first_token is not None
assert response.metrics.time_to_first_token > 0
assert response.metrics.duration is not None
assert response.metrics.duration > 0
def test_basic():
agent = Agent(model=AIMLAPI(id="gpt-4o-mini"), markdown=True, telemetry=False)
response: RunOutput = agent.run("Tell me, why is the sky blue in 2 sentences")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
def test_basic_stream():
agent = Agent(model=AIMLAPI(id="gpt-4o-mini"), markdown=True, telemetry=False)
response_stream = agent.run("Tell me, why is the sky blue in 2 sentences", stream=True)
# Verify it's an iterator
assert hasattr(response_stream, "__iter__")
responses = list(response_stream)
assert len(responses) > 0
for response in responses:
assert response.content is not None
@pytest.mark.asyncio
async def test_async_basic():
agent = Agent(model=AIMLAPI(id="gpt-4o-mini"), markdown=True, telemetry=False)
response = await agent.arun("Tell me, why is the sky blue in 2 sentences")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
@pytest.mark.asyncio
async def test_async_basic_stream():
agent = Agent(model=AIMLAPI(id="gpt-4o-mini"), markdown=True, telemetry=False)
async for response in agent.arun("Tell me, why is the sky blue in 2 sentences", stream=True):
assert response.content is not None
def test_with_memory():
agent = Agent(
db=SqliteDb(db_file="tmp/test_with_memory.db"),
model=AIMLAPI(id="gpt-4o-mini"),
add_history_to_context=True,
markdown=True,
telemetry=False,
)
# First interaction
response1 = agent.run("My name is John Smith")
assert response1.content is not None
# Second interaction should remember the name
response2 = agent.run("What's my name?")
assert response2.content is not None
assert "John Smith" in response2.content
# Verify memories were created
messages = agent.get_session_messages()
assert len(messages) == 5
assert [m.role for m in messages] == ["system", "user", "assistant", "user", "assistant"]
# Test metrics structure and types
_assert_metrics(response2)
def test_output_schema():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
markdown=True,
# use_json_mode=True, if gemini-like / deepseek
telemetry=False,
output_schema=MovieScript,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_json_response_mode():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
use_json_mode=True,
telemetry=False,
output_schema=MovieScript,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_structured_outputs_deprecated():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
structured_outputs=False, # They don't support native structured outputs
# use_json_mode=True, if gemini-like / deepseek
telemetry=False,
output_schema=MovieScript,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_history():
agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
db=SqliteDb(db_file="tmp/aimlapi/test_basic.db"),
add_history_to_context=True,
store_history_messages=True,
telemetry=False,
)
run_output = agent.run("Hello")
assert run_output.messages is not None
assert len(run_output.messages) == 2
run_output = agent.run("Hello 2")
assert run_output.messages is not None
assert len(run_output.messages) == 4
run_output = agent.run("Hello 3")
assert run_output.messages is not None
assert len(run_output.messages) == 6
run_output = agent.run("Hello 4")
assert run_output.messages is not None
assert len(run_output.messages) == 8
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/aimlapi/test_basic.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/aimlapi/test_tool_use.py | from typing import Optional
import pytest
from agno.agent import Agent
from agno.models.aimlapi import AIMLAPI
from agno.tools.exa import ExaTools
from agno.tools.websearch import WebSearchTools
from agno.tools.yfinance import YFinanceTools
def test_tool_use():
agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "TSLA" in response.content
def test_tool_use_stream():
agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response_stream = agent.run("What is the current price of TSLA?", stream=True)
responses = []
tool_call_seen = False
for chunk in response_stream:
responses.append(chunk)
if chunk.tools:
if any(tc.get("tool_name") for tc in chunk.tools):
tool_call_seen = True
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
full_response = "".join(r.content for r in responses if r.content)
assert "TSLA" in full_response
@pytest.mark.asyncio
async def test_async_tool_use():
agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = await agent.arun("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.role == "assistant")
assert response.content is not None
assert "TSLA" in response.content
@pytest.mark.asyncio
async def test_async_tool_use_stream():
agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
async for response in agent.arun(
"What is the current price of TSLA?",
stream=True,
stream_events=True,
):
if response.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(response, "tool") and response.tool: # type: ignore
if response.tool.tool_name: # type: ignore
tool_call_seen = True
if response.content is not None and "TSLA" in response.content:
keyword_seen_in_response = True
# Asserting we found tool responses in the response stream
assert tool_call_seen, "No tool calls observed in stream"
# Asserting we found the expected keyword in the response stream -> proving the correct tool was called
assert keyword_seen_in_response, "Keyword not found in response"
def test_multiple_tool_calls():
agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
tools=[YFinanceTools(cache_results=True), WebSearchTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA and what is the latest news about it?")
# Verify tool usage
assert response.messages is not None
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2
assert response.content is not None
assert "TSLA" in response.content and "latest news" in response.content.lower()
def test_tool_call_custom_tool_no_parameters():
def get_the_weather_in_tokyo():
"""
Get the weather in Tokyo
"""
return "It is currently 70 degrees and cloudy in Tokyo"
agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
tools=[get_the_weather_in_tokyo],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "70" in response.content
def test_tool_call_custom_tool_optional_parameters():
def get_the_weather(city: Optional[str] = None):
"""
Get the weather in a city
Args:
city: The city to get the weather for
"""
if city is None:
return "It is currently 70 degrees and cloudy in Tokyo"
else:
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
tools=[get_the_weather],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Paris?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "70" in response.content
def test_tool_call_list_parameters():
agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
tools=[ExaTools()],
instructions="Use a single tool call if possible",
markdown=True,
telemetry=False,
)
response = agent.run(
"What are the papers at https://arxiv.org/pdf/2307.06435 and https://arxiv.org/pdf/2502.09601 about?"
)
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
for call in tool_calls:
if call.get("type", "") == "function":
assert call["function"]["name"] in ["get_contents", "exa_answer"]
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/aimlapi/test_tool_use.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/api/schemas/workflows.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, Field
from agno.api.schemas.utils import TelemetryRunEventType, get_sdk_version
class WorkflowRunCreate(BaseModel):
"""Data sent to API to create a Workflow Run"""
session_id: str
run_id: Optional[str] = None
data: Optional[Dict[Any, Any]] = None
sdk_version: str = Field(default_factory=get_sdk_version)
type: TelemetryRunEventType = TelemetryRunEventType.WORKFLOW
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/api/schemas/workflows.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/models/vercel/v0.py | from dataclasses import dataclass
from os import getenv
from typing import Any, Dict, Optional
from agno.exceptions import ModelAuthenticationError
from agno.models.openai.like import OpenAILike
@dataclass
class V0(OpenAILike):
"""
Class for interacting with the v0 API.
Attributes:
id (str): The ID of the language model. Defaults to "v0-1.0-md".
name (str): The name of the API. Defaults to "v0".
provider (str): The provider of the API. Defaults to "v0".
api_key (Optional[str]): The API key for the v0 API.
base_url (Optional[str]): The base URL for the v0 API. Defaults to "https://v0.dev/chat/settings/keys".
"""
id: str = "v0-1.0-md"
name: str = "v0"
provider: str = "Vercel"
api_key: Optional[str] = None
base_url: str = "https://api.v0.dev/v1/"
def _get_client_params(self) -> Dict[str, Any]:
"""
Returns client parameters for API requests, checking for V0_API_KEY.
Returns:
Dict[str, Any]: A dictionary of client parameters for API requests.
"""
if not self.api_key:
self.api_key = getenv("V0_API_KEY")
if not self.api_key:
raise ModelAuthenticationError(
message="V0_API_KEY not set. Please set the V0_API_KEY environment variable.",
model_name=self.name,
)
return super()._get_client_params()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/vercel/v0.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/integration/models/vercel/test_basic.py | import pytest
from agno.agent import Agent, RunOutput
from agno.db.sqlite import SqliteDb
from agno.models.vercel import V0
def _assert_metrics(response: RunOutput):
assert response.metrics is not None
input_tokens = response.metrics.input_tokens
output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
assert input_tokens > 0
assert output_tokens > 0
assert total_tokens > 0
assert total_tokens == input_tokens + output_tokens
def test_basic():
agent = Agent(model=V0(id="v0-1.0-md"), markdown=True, telemetry=False)
response: RunOutput = agent.run("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
def test_basic_stream():
agent = Agent(model=V0(id="v0-1.0-md"), markdown=True, telemetry=False)
for response in agent.run("Share a 2 sentence horror story", stream=True):
assert response.content is not None
@pytest.mark.asyncio
async def test_async_basic():
agent = Agent(model=V0(id="v0-1.0-md"), markdown=True, telemetry=False)
response = await agent.arun("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
@pytest.mark.asyncio
async def test_async_basic_stream():
agent = Agent(model=V0(id="v0-1.0-md"), markdown=True, telemetry=False)
async for response in agent.arun("Share a 2 sentence horror story", stream=True):
assert response.content is not None
def test_with_memory():
agent = Agent(
db=SqliteDb(db_file="tmp/test_with_memory.db"),
model=V0(id="v0-1.0-md"),
add_history_to_context=True,
markdown=True,
telemetry=False,
)
# First interaction
response1 = agent.run("My name is John Smith")
assert response1.content is not None
# Second interaction should remember the name
response2 = agent.run("What's my name?")
assert response2.content is not None
assert "John Smith" in response2.content
# Verify memories were created
messages = agent.get_session_messages()
assert len(messages) == 5
assert [m.role for m in messages] == ["system", "user", "assistant", "user", "assistant"]
# Test metrics structure and types
_assert_metrics(response2)
def test_history():
agent = Agent(
model=V0(id="v0-1.0-md"),
db=SqliteDb(db_file="tmp/vercel/test_basic.db"),
add_history_to_context=True,
store_history_messages=True,
telemetry=False,
)
run_output = agent.run("Hello")
assert run_output.messages is not None
assert len(run_output.messages) == 2
run_output = agent.run("Hello 2")
assert run_output.messages is not None
assert len(run_output.messages) == 4
run_output = agent.run("Hello 3")
assert run_output.messages is not None
assert len(run_output.messages) == 6
run_output = agent.run("Hello 4")
assert run_output.messages is not None
assert len(run_output.messages) == 8
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/vercel/test_basic.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/vercel/test_multimodal.py | from agno.agent import Agent
from agno.media import Image
from agno.models.vercel import V0
from agno.tools.websearch import WebSearchTools
def test_image_input():
agent = Agent(
model=V0(id="v0-1.0-md"),
tools=[WebSearchTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run(
"Tell me about this image and give me the latest news about it.",
images=[Image(url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg")],
)
assert response.content is not None
assert "golden" in response.content.lower()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/vercel/test_multimodal.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/vercel/test_tool_use.py | from typing import Optional
import pytest
from agno.agent import Agent
from agno.models.vercel import V0
from agno.tools.exa import ExaTools
from agno.tools.websearch import WebSearchTools
from agno.tools.yfinance import YFinanceTools
def test_tool_use():
agent = Agent(
model=V0(id="v0-1.0-md"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "TSLA" in response.content
def test_tool_use_stream():
agent = Agent(
model=V0(id="v0-1.0-md"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
responses = []
tool_call_seen = False
for response in agent.run("What is the current price of TSLA?", stream=True, stream_events=True):
responses.append(response)
# Check for ToolCallStartedEvent or ToolCallCompletedEvent
if response.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(response, "tool") and response.tool:
if response.tool.tool_name: # type: ignore
tool_call_seen = True
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
full_content = ""
for r in responses:
full_content += r.content or ""
assert "TSLA" in full_content
@pytest.mark.asyncio
async def test_async_tool_use():
agent = Agent(
model=V0(id="v0-1.0-md"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = await agent.arun("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.role == "assistant")
assert response.content is not None
assert "TSLA" in response.content
@pytest.mark.asyncio
async def test_async_tool_use_stream():
agent = Agent(
model=V0(id="v0-1.0-md"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
responses = []
tool_call_seen = False
async for response in agent.arun("What is the current price of TSLA?", stream=True, stream_events=True):
responses.append(response)
# Check for ToolCallStartedEvent or ToolCallCompletedEvent
if response.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(response, "tool") and response.tool:
if response.tool.tool_name: # type: ignore
tool_call_seen = True
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
full_content = ""
for r in responses:
full_content += r.content or ""
assert "TSLA" in full_content
def test_multiple_tool_calls():
agent = Agent(
model=V0(id="v0-1.0-md"),
tools=[YFinanceTools(cache_results=True), WebSearchTools(cache_results=True)],
instructions=[
"Use YFinance for stock price queries",
"Use DuckDuckGo for news and general information",
"When both price and news are requested, use both tools",
],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA and search for the latest news about it?")
# Verify tool usage
assert response.messages is not None
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2
assert response.content is not None
assert "TSLA" in response.content and "latest news" in response.content.lower()
def test_tool_call_custom_tool_no_parameters():
def get_the_weather_in_tokyo():
"""
Get the weather in Tokyo
"""
return "It is currently 70 degrees and cloudy in Tokyo"
agent = Agent(
model=V0(id="v0-1.0-md"),
tools=[get_the_weather_in_tokyo],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "70" in response.content
def test_tool_call_custom_tool_optional_parameters():
def get_the_weather(city: Optional[str] = None):
"""
Get the weather in a city
Args:
city: The city to get the weather for
"""
if city is None:
return "It is currently 70 degrees and cloudy in Tokyo"
else:
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=V0(id="v0-1.0-md"),
tools=[get_the_weather],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Paris?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "70" in response.content
def test_tool_call_list_parameters():
agent = Agent(
model=V0(id="v0-1.0-md"),
tools=[ExaTools()],
instructions="Use a single tool call if possible",
markdown=True,
telemetry=False,
)
response = agent.run(
"What are the papers at https://arxiv.org/pdf/2307.06435 and https://arxiv.org/pdf/2502.09601 about?"
)
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
for call in tool_calls:
if call.get("type", "") == "function":
assert call["function"]["name"] in ["find_similar", "search_exa", "get_contents", "exa_answer"]
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/vercel/test_tool_use.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_functions.py | from typing import Any, Callable, Dict, Optional
import pytest
from pydantic import BaseModel, ValidationError
from agno.tools.decorator import tool
from agno.tools.function import Function, FunctionCall
def test_function_initialization():
"""Test basic Function initialization with required and optional parameters."""
# Test with minimal required parameters
func = Function(name="test_function")
assert func.name == "test_function"
assert func.description is None
assert func.parameters == {"type": "object", "properties": {}, "required": []}
assert func.strict is None
assert func.entrypoint is None
# Test with all parameters
func = Function(
name="test_function",
description="Test function description",
parameters={"type": "object", "properties": {"param1": {"type": "string"}}, "required": ["param1"]},
strict=True,
instructions="Test instructions",
add_instructions=True,
requires_confirmation=True,
requires_user_input=True,
user_input_fields=["param1"],
external_execution=True,
cache_results=True,
cache_dir="/tmp",
cache_ttl=7200,
)
assert func.name == "test_function"
assert func.description == "Test function description"
assert func.parameters["properties"]["param1"]["type"] == "string"
assert func.strict is True
assert func.instructions == "Test instructions"
assert func.add_instructions is True
assert func.requires_confirmation is True
assert func.requires_user_input is True
assert func.user_input_fields == ["param1"]
assert func.external_execution is True
assert func.cache_results is True
assert func.cache_dir == "/tmp"
assert func.cache_ttl == 7200
def test_decorator_instantiation():
"""Test instantiating a Function from a decorator."""
@tool
def test_func(param1: str, param2: int = 42) -> str:
"""Test function with parameters."""
return f"{param1}-{param2}"
assert isinstance(test_func, Function)
test_func.process_entrypoint()
assert test_func.name == "test_func"
assert test_func.description == "Test function with parameters."
assert test_func.entrypoint is not None
assert test_func.parameters["properties"]["param1"]["type"] == "string"
assert test_func.parameters["properties"]["param2"]["type"] == "integer"
assert "param1" in test_func.parameters["required"]
assert "param2" not in test_func.parameters["required"]
def test_function_to_dict():
"""Test the to_dict method returns the correct dictionary representation."""
func = Function(
name="test_function",
description="Test description",
parameters={"type": "object", "properties": {"param1": {"type": "string"}}, "required": ["param1"]},
strict=True,
requires_confirmation=True,
external_execution=True,
)
result = func.to_dict()
assert isinstance(result, dict)
assert result["name"] == "test_function"
assert result["description"] == "Test description"
assert result["parameters"]["properties"]["param1"]["type"] == "string"
assert result["strict"] is True
assert result["requires_confirmation"] is True
assert result["external_execution"] is True
assert "instructions" not in result
assert "add_instructions" not in result
assert "entrypoint" not in result
def test_function_from_callable():
"""Test creating a Function from a callable."""
def test_func(param1: str, param2: int = 42) -> str:
"""Test function with parameters.
Args:
param1: First parameter
param2: Second parameter with default value
"""
return f"{param1}-{param2}"
func = Function.from_callable(test_func)
assert func.name == "test_func"
assert "Test function with parameters" in func.description
assert "param1" in func.parameters["properties"]
assert "param2" in func.parameters["properties"]
assert func.parameters["properties"]["param1"]["type"] == "string"
assert func.parameters["properties"]["param2"]["type"] == "integer"
assert "param1" in func.parameters["required"]
assert "param2" not in func.parameters["required"] # Because it has a default value
def test_wrap_callable():
"""Test wrapping a callable."""
@tool
def test_func(param1: str, param2: int) -> str:
"""Test function with parameters."""
return f"{param1}-{param2}"
assert isinstance(test_func, Function)
assert test_func.entrypoint is not None
test_func.process_entrypoint()
assert isinstance(test_func, Function)
assert test_func.entrypoint is not None
assert test_func.entrypoint(param1="test", param2=42) == "test-42"
with pytest.raises(ValidationError):
test_func.entrypoint(param1="test")
assert test_func.entrypoint._wrapped_for_validation is True
test_func.process_entrypoint()
assert isinstance(test_func, Function)
assert test_func.entrypoint is not None
assert test_func.entrypoint(param1="test", param2=42) == "test-42"
with pytest.raises(ValidationError):
test_func.entrypoint(param1="test")
assert test_func.entrypoint._wrapped_for_validation is True
def test_function_from_callable_strict():
"""Test creating a Function from a callable with strict mode."""
def test_func(param1: str, param2: int = 42) -> str:
"""Test function with parameters."""
return f"{param1}-{param2}"
func = Function.from_callable(test_func, strict=True)
assert func.name == "test_func"
assert "param1" in func.parameters["required"]
assert "param2" in func.parameters["required"] # In strict mode, all parameters are required
def test_function_process_entrypoint():
"""Test processing the entrypoint of a Function."""
def test_func(param1: str, param2: int = 42) -> str:
"""Test function with parameters."""
return f"{param1}-{param2}"
func = Function(name="test_func", entrypoint=test_func, skip_entrypoint_processing=False)
func.process_entrypoint()
assert func.parameters["properties"]["param1"]["type"] == "string"
assert func.parameters["properties"]["param2"]["type"] == "integer"
assert "param1" in func.parameters["required"]
assert "param2" not in func.parameters["required"]
def test_function_process_entrypoint_with_user_input():
"""Test processing the entrypoint with user input fields."""
def test_func(param1: str, param2: int = 42) -> str:
"""Test function with parameters."""
return f"{param1}-{param2}"
func = Function(name="test_func", entrypoint=test_func, requires_user_input=True, user_input_fields=["param1"])
func.process_entrypoint()
assert func.user_input_schema is not None
assert len(func.user_input_schema) == 2
assert func.user_input_schema[0].name == "param1"
assert func.user_input_schema[0].field_type is str
assert func.user_input_schema[1].name == "param2"
assert func.user_input_schema[1].field_type is int
def test_function_process_entrypoint_skip_processing():
"""Test that entrypoint processing is skipped when skip_entrypoint_processing is True."""
def test_func(param1: str, param2: int = 42) -> str:
"""Test function with parameters."""
return f"{param1}-{param2}"
original_parameters = {"type": "object", "properties": {"custom": {"type": "string"}}, "required": ["custom"]}
func = Function(
name="test_func", entrypoint=test_func, parameters=original_parameters, skip_entrypoint_processing=True
)
func.process_entrypoint()
assert func.parameters == original_parameters # Parameters should remain unchanged
def test_function_process_schema_for_strict():
"""Test processing schema for strict mode."""
func = Function(
name="test_func",
parameters={
"type": "object",
"properties": {"param1": {"type": "string"}, "param2": {"type": "number"}},
"required": ["param1"],
},
)
func.process_schema_for_strict()
assert "param1" in func.parameters["required"]
assert "param2" in func.parameters["required"] # All properties should be required in strict mode
def test_function_cache_key_generation():
"""Test generation of cache keys for function calls."""
func = Function(name="test_func", cache_results=True, cache_dir="/tmp")
entrypoint_args = {"param1": "value1", "param2": 42}
call_args = {"extra": "data"}
cache_key = func._get_cache_key(entrypoint_args, call_args)
assert isinstance(cache_key, str)
# Hash updated to use json.dumps with sort_keys=True for consistent ordering
assert cache_key == "d76d42a06e815b6402e24486f1f61805"
def test_function_cache_key_dict_order_independence():
"""Test that cache keys are identical regardless of dictionary key order."""
func = Function(name="test_func", cache_results=True, cache_dir="/tmp")
# Same data, different key orders
args1 = {"param1": "value1", "param2": 42, "param3": "value3"}
args2 = {"param3": "value3", "param1": "value1", "param2": 42}
args3 = {"param2": 42, "param3": "value3", "param1": "value1"}
cache_key1 = func._get_cache_key(args1)
cache_key2 = func._get_cache_key(args2)
cache_key3 = func._get_cache_key(args3)
# Should generate identical cache keys
assert cache_key1 == cache_key2 == cache_key3
def test_function_cache_file_path():
"""Test generation of cache file paths."""
func = Function(name="test_func", cache_results=True, cache_dir="/tmp")
cache_key = "test_key"
cache_file = func._get_cache_file_path(cache_key)
assert cache_file.startswith("/tmp/")
assert "test_func" in cache_file
assert "test_key" in cache_file
def test_function_cache_operations(tmp_path):
"""Test caching operations (save and retrieve)."""
import json
import os
func = Function(name="test_func", cache_results=True, cache_dir=str(tmp_path))
# Test saving to cache
test_result = {"result": "test_data"}
cache_file = os.path.join(str(tmp_path), "test_cache.json")
func._save_to_cache(cache_file, test_result)
# Verify cache file exists and contains correct data
assert os.path.exists(cache_file)
with open(cache_file, "r") as f:
cached_data = json.load(f)
assert cached_data["result"] == {"result": "test_data"}
# Test retrieving from cache
retrieved_result = func._get_cached_result(cache_file)
assert retrieved_result == test_result
# Test retrieving non-existent cache
non_existent_file = os.path.join(str(tmp_path), "non_existent.json")
assert func._get_cached_result(non_existent_file) is None
def test_function_cache_ttl(tmp_path):
"""Test cache TTL functionality."""
import os
import time
func = Function(
name="test_func",
cache_results=True,
cache_dir=str(tmp_path),
cache_ttl=1, # 1 second TTL
)
# Save test data to cache
test_result = {"result": "test_data"}
cache_file = os.path.join(str(tmp_path), "test_cache.json")
func._save_to_cache(cache_file, test_result)
# Verify cache is valid immediately
assert func._get_cached_result(cache_file) == test_result
# Wait for cache to expire
time.sleep(1.1)
# Verify cache is no longer valid
assert func._get_cached_result(cache_file) is None
def test_function_call_initialization():
"""Test FunctionCall initialization."""
func = Function(name="test_func")
call = FunctionCall(function=func)
assert call.function == func
assert call.arguments is None
assert call.result is None
assert call.call_id is None
assert call.error is None
# Test with all parameters
call = FunctionCall(
function=func, arguments={"param1": "value1"}, result="test_result", call_id="test_id", error="test_error"
)
assert call.function == func
assert call.arguments == {"param1": "value1"}
assert call.result == "test_result"
assert call.call_id == "test_id"
assert call.error == "test_error"
def test_function_call_get_call_str():
"""Test the get_call_str method."""
func = Function(name="test_func", description="Test function")
call = FunctionCall(function=func, arguments={"param1": "value1", "param2": 42})
call_str = call.get_call_str()
assert "test_func" in call_str
assert "param1" in call_str
assert "value1" in call_str
assert "param2" in call_str
assert "42" in call_str
def test_function_call_execution():
"""Test function call execution."""
def test_func(param1: str, param2: int = 42) -> str:
return f"{param1}-{param2}"
func = Function(name="test_func", entrypoint=test_func)
call = FunctionCall(function=func, arguments={"param1": "value1", "param2": 42})
result = call.execute()
assert result.status == "success"
assert result.result == "value1-42"
assert result.error is None
def test_function_call_execution_with_error():
"""Test function call execution with error handling."""
def test_func(param1: str) -> str:
raise ValueError("Test error")
func = Function(name="test_func", entrypoint=test_func)
call = FunctionCall(function=func, arguments={"param1": "value1"})
result = call.execute()
assert result.status == "failure"
assert result.error is not None
assert "Test error" in result.error
def test_function_call_with_hooks():
"""Test function call execution with pre and post hooks."""
pre_hook_called = False
post_hook_called = False
def pre_hook():
nonlocal pre_hook_called
pre_hook_called = True
def post_hook():
nonlocal post_hook_called
post_hook_called = True
def test_func(param1: str) -> str:
return f"processed-{param1}"
func = Function(name="test_func", entrypoint=test_func, pre_hook=pre_hook, post_hook=post_hook)
call = FunctionCall(function=func, arguments={"param1": "value1"})
result = call.execute()
assert result.status == "success"
assert result.result == "processed-value1"
assert pre_hook_called
assert post_hook_called
def test_function_call_with_tool_hooks():
"""Test function call execution with tool hooks."""
hook_calls = []
def tool_hook(function_name: str, function_call: Callable, arguments: Dict[str, Any]):
hook_calls.append(("before", function_name, arguments))
result = function_call(**arguments)
hook_calls.append(("after", function_name, result))
return result
@tool(tool_hooks=[tool_hook])
def test_func(param1: str) -> str:
return f"processed-{param1}"
test_func.process_entrypoint()
call = FunctionCall(function=test_func, arguments={"param1": "value1"})
result = call.execute()
assert result.status == "success"
assert result.result == "processed-value1"
assert len(hook_calls) == 2
assert hook_calls[0][0] == "before"
assert hook_calls[0][1] == "test_func"
assert hook_calls[1][0] == "after"
assert hook_calls[1][2] == "processed-value1"
@pytest.mark.asyncio
async def test_function_call_async_execution():
"""Test async function call execution."""
async def test_func(param1: str, param2: int = 42) -> str:
return f"{param1}-{param2}"
func = Function(name="test_func", entrypoint=test_func)
call = FunctionCall(function=func, arguments={"param1": "value1", "param2": 42})
result = await call.aexecute()
assert result.status == "success"
assert result.result == "value1-42"
assert result.error is None
@pytest.mark.asyncio
async def test_function_call_async_execution_with_error():
"""Test async function call execution with error handling."""
async def test_func(param1: str) -> str:
raise ValueError("Test error")
func = Function(name="test_func", entrypoint=test_func)
call = FunctionCall(function=func, arguments={"param1": "value1"})
result = await call.aexecute()
assert result.status == "failure"
assert result.error is not None
assert "Test error" in result.error
@pytest.mark.asyncio
async def test_function_call_async_with_hooks():
"""Test async function call execution with pre and post hooks."""
pre_hook_called = False
post_hook_called = False
async def pre_hook():
nonlocal pre_hook_called
pre_hook_called = True
async def post_hook():
nonlocal post_hook_called
post_hook_called = True
@tool(pre_hook=pre_hook, post_hook=post_hook)
async def test_func(param1: str) -> str:
return f"processed-{param1}"
test_func.process_entrypoint()
call = FunctionCall(function=test_func, arguments={"param1": "value1"})
result = await call.aexecute()
assert result.status == "success"
assert result.result == "processed-value1"
assert pre_hook_called
assert post_hook_called
@pytest.mark.asyncio
async def test_function_call_async_with_tool_hooks():
"""Test async function call execution with tool hooks."""
hook_calls = []
async def tool_hook(function_name: str, function_call: Callable, arguments: Dict[str, Any]):
hook_calls.append(("before", function_name, arguments))
result = await function_call(**arguments)
hook_calls.append(("after", function_name, result))
return result
@tool(tool_hooks=[tool_hook])
async def test_func(param1: str) -> str:
return f"processed-{param1}"
test_func.process_entrypoint()
call = FunctionCall(function=test_func, arguments={"param1": "value1"})
result = await call.aexecute()
assert result.status == "success"
assert result.result == "processed-value1"
assert len(hook_calls) == 2
assert hook_calls[0][0] == "before"
assert hook_calls[0][1] == "test_func"
assert hook_calls[1][0] == "after"
assert hook_calls[1][2] == "processed-value1"
def test_tool_decorator_basic():
"""Test basic @tool decorator usage."""
@tool
def basic_func() -> str:
"""Basic test function."""
return "test"
assert isinstance(basic_func, Function)
assert basic_func.name == "basic_func"
assert basic_func.description == "Basic test function."
assert basic_func.entrypoint is not None
assert basic_func.parameters["type"] == "object"
assert basic_func.parameters["properties"] == {}
assert basic_func.parameters["required"] == []
def test_tool_decorator_with_config():
"""Test @tool decorator with configuration options."""
@tool(
name="custom_name",
description="Custom description",
strict=True,
instructions="Custom instructions",
add_instructions=False,
show_result=True,
stop_after_tool_call=True,
requires_confirmation=True,
cache_results=True,
cache_dir="/tmp",
cache_ttl=7200,
)
def configured_func() -> str:
"""Original docstring."""
return "test"
assert isinstance(configured_func, Function)
assert configured_func.name == "custom_name"
assert configured_func.description == "Custom description"
assert configured_func.strict is True
assert configured_func.instructions == "Custom instructions"
assert configured_func.add_instructions is False
assert configured_func.show_result is True
assert configured_func.stop_after_tool_call is True
assert configured_func.requires_confirmation is True
assert configured_func.cache_results is True
assert configured_func.cache_dir == "/tmp"
assert configured_func.cache_ttl == 7200
def test_tool_decorator_with_user_input():
"""Test @tool decorator with user input configuration."""
@tool(requires_user_input=True, user_input_fields=["param1"])
def user_input_func(param1: str, param2: int = 42) -> str:
"""Function requiring user input."""
return f"{param1}-{param2}"
assert isinstance(user_input_func, Function)
assert user_input_func.requires_user_input is True
assert user_input_func.user_input_fields == ["param1"]
user_input_func.process_entrypoint()
assert user_input_func.user_input_schema is not None
assert len(user_input_func.user_input_schema) == 2
assert user_input_func.user_input_schema[0].name == "param1"
assert user_input_func.user_input_schema[0].field_type is str
assert user_input_func.user_input_schema[1].name == "param2"
assert user_input_func.user_input_schema[1].field_type is int
def test_tool_decorator_with_hooks():
"""Test @tool decorator with pre and post hooks."""
pre_hook_called = False
post_hook_called = False
def pre_hook():
nonlocal pre_hook_called
pre_hook_called = True
def post_hook():
nonlocal post_hook_called
post_hook_called = True
@tool(pre_hook=pre_hook, post_hook=post_hook)
def hooked_func() -> str:
return "test"
assert isinstance(hooked_func, Function)
assert hooked_func.pre_hook == pre_hook
assert hooked_func.post_hook == post_hook
def test_tool_decorator_with_tool_hooks():
"""Test @tool decorator with tool hooks."""
hook_calls = []
def tool_hook(function_name: str, function_call: Callable, arguments: Dict[str, Any]):
hook_calls.append(("before", function_name, arguments))
result = function_call(**arguments)
hook_calls.append(("after", function_name, result))
return result
@tool(tool_hooks=[tool_hook])
def tool_hooked_func(param1: str) -> str:
return f"processed-{param1}"
assert isinstance(tool_hooked_func, Function)
assert tool_hooked_func.tool_hooks == [tool_hook]
def test_tool_decorator_async():
"""Test @tool decorator with async function."""
@tool
async def async_func() -> str:
"""Async test function."""
return "test"
assert isinstance(async_func, Function)
assert async_func.name == "async_func"
assert async_func.description == "Async test function."
assert async_func.entrypoint is not None
def test_tool_decorator_async_generator():
"""Test @tool decorator with async generator function."""
@tool
async def async_gen_func():
"""Async generator test function."""
yield "test"
assert isinstance(async_gen_func, Function)
assert async_gen_func.name == "async_gen_func"
assert async_gen_func.description == "Async generator test function."
assert async_gen_func.entrypoint is not None
def test_tool_decorator_invalid_config():
"""Test @tool decorator with invalid configuration."""
with pytest.raises(ValueError, match="Invalid tool configuration arguments"):
@tool(invalid_arg=True)
def invalid_func():
pass
def test_tool_decorator_exclusive_flags():
"""Test @tool decorator with mutually exclusive flags."""
with pytest.raises(
ValueError,
match="Only one of 'requires_user_input', 'requires_confirmation', or 'external_execution' can be set to True",
):
@tool(requires_user_input=True, requires_confirmation=True)
def exclusive_flags_func():
pass
def test_tool_decorator_with_agent_team_params():
"""Test @tool decorator with agent and team parameters."""
@tool
def agent_team_func(agent: Any, team: Any, param1: str) -> str:
"""Function with agent and team parameters."""
return f"{param1}"
assert isinstance(agent_team_func, Function)
agent_team_func.process_entrypoint()
assert "agent" not in agent_team_func.parameters["properties"]
assert "team" not in agent_team_func.parameters["properties"]
assert "param1" in agent_team_func.parameters["properties"]
assert agent_team_func.parameters["properties"]["param1"]["type"] == "string"
def test_tool_decorator_with_agent_team_type_annotations():
"""Test @tool decorator skips validation when parameter types are Agent/Team,
even when parameter names differ from 'agent'/'team' (issue #6344)."""
from agno.agent.agent import Agent
from agno.team.team import Team
@tool
def func_with_agent_type(my_agent: Agent, query: str) -> str:
"""Function with Agent type but non-standard parameter name."""
return query
assert isinstance(func_with_agent_type, Function)
func_with_agent_type.process_entrypoint()
# Should not have _wrapped_for_validation since validation was skipped
assert not getattr(func_with_agent_type.entrypoint, "_wrapped_for_validation", False)
assert "query" in func_with_agent_type.parameters["properties"]
assert "my_agent" not in func_with_agent_type.parameters["properties"]
@tool
def func_with_team_type(my_team: Team, query: str) -> str:
"""Function with Team type but non-standard parameter name."""
return query
assert isinstance(func_with_team_type, Function)
func_with_team_type.process_entrypoint()
assert not getattr(func_with_team_type.entrypoint, "_wrapped_for_validation", False)
assert "query" in func_with_team_type.parameters["properties"]
assert "my_team" not in func_with_team_type.parameters["properties"]
def test_tool_decorator_with_complex_types():
"""Test @tool decorator with complex parameter types."""
from typing import Dict, List, Optional
@tool
def complex_types_func(param1: List[str], param2: Dict[str, int], param3: Optional[bool] = None) -> str:
"""Function with complex parameter types."""
return "test"
assert isinstance(complex_types_func, Function)
complex_types_func.process_entrypoint()
assert complex_types_func.parameters["properties"]["param1"]["type"] == "array"
assert complex_types_func.parameters["properties"]["param1"]["items"]["type"] == "string"
assert complex_types_func.parameters["properties"]["param2"]["type"] == "object"
assert complex_types_func.parameters["properties"]["param3"]["type"] == "boolean"
assert "param3" not in complex_types_func.parameters["required"]
def test_function_cache_pydantic_model(tmp_path):
"""Test caching operations with Pydantic BaseModel results."""
import json
import os
class OrderResponse(BaseModel):
success: bool
data: Optional[dict] = None
func = Function(name="test_func", cache_results=True, cache_dir=str(tmp_path))
# Test saving a Pydantic model to cache
test_result = OrderResponse(success=True, data={"id": 123, "status": "delivered"})
cache_file = os.path.join(str(tmp_path), "test_pydantic_cache.json")
func._save_to_cache(cache_file, test_result)
# Verify cache file exists and contains correct data
assert os.path.exists(cache_file)
with open(cache_file, "r") as f:
cached_data = json.load(f)
assert cached_data["result"] == {"success": True, "data": {"id": 123, "status": "delivered"}}
# Test retrieving from cache returns the dict representation
retrieved_result = func._get_cached_result(cache_file)
assert retrieved_result == {"success": True, "data": {"id": 123, "status": "delivered"}}
def test_function_cache_pydantic_model_nested(tmp_path):
"""Test caching operations with nested Pydantic BaseModel results."""
import json
import os
class Address(BaseModel):
street: str
city: str
class User(BaseModel):
name: str
address: Address
func = Function(name="test_func", cache_results=True, cache_dir=str(tmp_path))
test_result = User(name="John", address=Address(street="123 Main St", city="Springfield"))
cache_file = os.path.join(str(tmp_path), "test_nested_cache.json")
func._save_to_cache(cache_file, test_result)
assert os.path.exists(cache_file)
with open(cache_file, "r") as f:
cached_data = json.load(f)
assert cached_data["result"] == {"name": "John", "address": {"street": "123 Main St", "city": "Springfield"}}
retrieved_result = func._get_cached_result(cache_file)
assert retrieved_result == {"name": "John", "address": {"street": "123 Main St", "city": "Springfield"}}
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_functions.py",
"license": "Apache License 2.0",
"lines": 611,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/embedder/test_huggingface_embedder.py | import pytest
from agno.knowledge.embedder.huggingface import HuggingfaceCustomEmbedder
@pytest.fixture
def embedder():
return HuggingfaceCustomEmbedder()
def test_embedder_initialization(embedder):
"""Test that the embedder initializes correctly"""
assert embedder is not None
assert embedder.id == "intfloat/multilingual-e5-large"
assert embedder.api_key is not None, "HUGGINGFACE_API_KEY env variable is not set"
def test_get_embedding(embedder):
"""Test that we can get embeddings for a simple text"""
text = "The quick brown fox jumps over the lazy dog."
embeddings = embedder.get_embedding(text)
# Basic checks on the embeddings
assert isinstance(embeddings, list)
assert len(embeddings) > 0
assert all(isinstance(x, float) for x in embeddings)
def test_special_characters(embedder):
"""Test that special characters are handled correctly"""
text = "Hello, world! こんにちは 123 @#$%"
embeddings = embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) > 0
def test_long_text(embedder):
"""Test that long text is handled correctly"""
text = " ".join(["word"] * 1000) # Create a long text
embeddings = embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) > 0
def test_embedding_consistency(embedder):
"""Test that embeddings for the same text are consistent"""
text = "Consistency test"
embeddings1 = embedder.get_embedding(text)
embeddings2 = embedder.get_embedding(text)
assert len(embeddings1) == len(embeddings2)
assert all(abs(a - b) < 1e-6 for a, b in zip(embeddings1, embeddings2))
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/embedder/test_huggingface_embedder.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/user_control_flow.py | from textwrap import dedent
from typing import Optional
from agno.tools import Toolkit
class UserControlFlowTools(Toolkit):
def __init__(
self,
instructions: Optional[str] = None,
add_instructions: bool = True,
enable_get_user_input: bool = True,
all: bool = False,
**kwargs,
):
"""A toolkit that provides the ability for the agent to interrupt the agent run and interact with the user."""
if instructions is None:
self.instructions = self.DEFAULT_INSTRUCTIONS
else:
self.instructions = instructions
tools = []
if all or enable_get_user_input:
tools.append(self.get_user_input)
super().__init__(
name="user_control_flow_tools",
instructions=self.instructions,
add_instructions=add_instructions,
tools=tools,
**kwargs,
)
def get_user_input(self, user_input_fields: list[dict]) -> str:
"""Use this tool to get user input for the given fields. Provide all the fields that you require the user to fill in, as if they were filling in a form.
Args:
user_input_fields (list[dict[str, str]]): A list of dictionaries, each containing the following keys:
- field_name: The name of the field to get input for.
- field_type: The type of the field to get input for. Only valid python types are supported (e.g. str, int, float, bool, list, dict, etc.).
- field_description: A description of the field to get input for.
"""
# Nothing needs to be executed here, the agent logic will interrupt the run and wait for the user input
return "User input received"
# --------------------------------------------------------------------------------
# Default instructions
# --------------------------------------------------------------------------------
DEFAULT_INSTRUCTIONS = dedent(
"""\
You have access to the `get_user_input` tool to get user input for the given fields.
1. **Get User Input**:
- Purpose: When you have call a tool/function where you don't have enough information, don't say you can't do it, just use the `get_user_input` tool to get the information you need from the user.
- Usage: Call `get_user_input` with the fields you require the user to fill in for you to continue your task.
## IMPORTANT GUIDELINES
- **Don't respond and ask the user for information.** Just use the `get_user_input` tool to get the information you need from the user.
- **Don't make up information you don't have.** If you don't have the information, use the `get_user_input` tool to get the information you need from the user.
- **Include only the required fields.** Include only the required fields in the `user_input_fields` parameter of the `get_user_input` tool. Don't include fields you already have the information for.
- **Provide a clear and concise description of the field.** Clearly describe the field in the `field_description` parameter of the `user_input_fields` parameter of the `get_user_input` tool.
- **Provide a type for the field.** Fill the `field_type` parameter of the `user_input_fields` parameter of the `get_user_input` tool with the type of the field.
## INPUT VALIDATION AND CONVERSION
- **Boolean fields**: Only explicit positive responses are considered True:
* True values: 'true', 'yes', 'y', '1', 'on', 't', 'True', 'YES', 'Y', 'T'
* False values: Everything else including 'false', 'no', 'n', '0', 'off', 'f', empty strings, unanswered fields, or any other input
* **CRITICAL**: Empty/unanswered fields should be treated as False (not selected)
- **Users can leave fields unanswered.** Empty responses are valid and should be treated as False for boolean fields.
- **NEVER ask for the same field twice.** Once you receive ANY user input for a field (including empty strings), accept it and move on.
- **DO NOT validate or re-request input.** Accept whatever the user provides and convert it appropriately.
- **Proceed with only the fields that were explicitly answered as True.** Skip or ignore fields that are False/unanswered.
- **Complete the task immediately after receiving all user inputs, do not ask for confirmation or re-validation.**
"""
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/user_control_flow.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/tests/unit/tools/test_firecrawl.py | import json
import os
from unittest.mock import Mock, patch
import pytest
from firecrawl import FirecrawlApp # noqa
from agno.tools.firecrawl import FirecrawlTools
TEST_API_KEY = os.environ.get("FIRECRAWL_API_KEY", "test_api_key")
TEST_API_URL = "https://api.firecrawl.dev"
@pytest.fixture
def mock_firecrawl():
"""Create a mock FirecrawlApp instance."""
with patch("agno.tools.firecrawl.FirecrawlApp") as mock_firecrawl_cls:
mock_app = Mock()
mock_firecrawl_cls.return_value = mock_app
return mock_app
@pytest.fixture
def firecrawl_tools(mock_firecrawl):
"""Create a FirecrawlTools instance with mocked dependencies."""
with patch.dict("os.environ", {"FIRECRAWL_API_KEY": TEST_API_KEY}):
tools = FirecrawlTools()
# Directly set the app to our mock to avoid initialization issues
tools.app = mock_firecrawl
return tools
def test_init_with_env_vars():
"""Test initialization with environment variables."""
with patch("agno.tools.firecrawl.FirecrawlApp"):
with patch.dict("os.environ", {"FIRECRAWL_API_KEY": TEST_API_KEY}, clear=True):
tools = FirecrawlTools()
assert tools.api_key == TEST_API_KEY
assert tools.formats is None
assert tools.limit == 10
assert tools.app is not None
def test_init_with_params():
"""Test initialization with parameters."""
with patch("agno.tools.firecrawl.FirecrawlApp"):
tools = FirecrawlTools(api_key="param_api_key", formats=["html", "text"], limit=5, api_url=TEST_API_URL)
assert tools.api_key == "param_api_key"
assert tools.formats == ["html", "text"]
assert tools.limit == 5
assert tools.app is not None
def test_scrape_website(firecrawl_tools, mock_firecrawl):
"""Test scrape_website method."""
# Setup mock response
mock_response = Mock()
mock_response.model_dump.return_value = {
"url": "https://example.com",
"content": "Test content",
"status": "success",
}
mock_firecrawl.scrape.return_value = mock_response
# Call the method
result = firecrawl_tools.scrape_website("https://example.com")
result_data = json.loads(result)
# Verify results
assert result_data["url"] == "https://example.com"
assert result_data["content"] == "Test content"
assert result_data["status"] == "success"
mock_firecrawl.scrape.assert_called_once_with("https://example.com")
def test_scrape_website_with_formats(firecrawl_tools, mock_firecrawl):
"""Test scrape_website method with formats."""
# Setup mock response
mock_response = Mock()
mock_response.model_dump.return_value = {
"url": "https://example.com",
"content": "Test content",
"status": "success",
}
mock_firecrawl.scrape.return_value = mock_response
# Set formats
firecrawl_tools.formats = ["html", "text"]
# Call the method
result = firecrawl_tools.scrape_website("https://example.com")
result_data = json.loads(result)
# Verify results
assert result_data["url"] == "https://example.com"
assert result_data["content"] == "Test content"
assert result_data["status"] == "success"
mock_firecrawl.scrape.assert_called_once_with("https://example.com", formats=["html", "text"])
def test_crawl_website(firecrawl_tools, mock_firecrawl):
"""Test crawl_website method."""
# Setup mock response
mock_response = Mock()
mock_response.model_dump.return_value = {
"url": "https://example.com",
"pages": ["page1", "page2"],
"status": "success",
}
mock_firecrawl.crawl.return_value = mock_response
# Call the method
result = firecrawl_tools.crawl_website("https://example.com")
result_data = json.loads(result)
# Verify results
assert result_data["url"] == "https://example.com"
assert result_data["pages"] == ["page1", "page2"]
assert result_data["status"] == "success"
mock_firecrawl.crawl.assert_called_once_with("https://example.com", limit=10, poll_interval=30)
def test_crawl_website_with_custom_limit(firecrawl_tools, mock_firecrawl):
"""Test crawl_website method with custom limit."""
# Reset the default limit
firecrawl_tools.limit = None
# Setup mock response
mock_response = Mock()
mock_response.model_dump.return_value = {
"url": "https://example.com",
"pages": ["page1", "page2"],
"status": "success",
}
mock_firecrawl.crawl.return_value = mock_response
# Call the method with custom limit
result = firecrawl_tools.crawl_website("https://example.com", limit=5)
result_data = json.loads(result)
# Verify results
assert result_data["url"] == "https://example.com"
assert result_data["pages"] == ["page1", "page2"]
assert result_data["status"] == "success"
mock_firecrawl.crawl.assert_called_once_with("https://example.com", limit=5, poll_interval=30)
def test_map_website(firecrawl_tools, mock_firecrawl):
"""Test map_website method."""
# Setup mock response
mock_response = Mock()
mock_response.model_dump.return_value = {
"url": "https://example.com",
"sitemap": {"page1": ["link1", "link2"]},
"status": "success",
}
mock_firecrawl.map.return_value = mock_response
# Call the method
result = firecrawl_tools.map_website("https://example.com")
result_data = json.loads(result)
# Verify results
assert result_data["url"] == "https://example.com"
assert result_data["sitemap"] == {"page1": ["link1", "link2"]}
assert result_data["status"] == "success"
mock_firecrawl.map.assert_called_once_with("https://example.com")
def test_search(firecrawl_tools, mock_firecrawl):
"""Test search method."""
# Setup mock response
mock_response = Mock()
mock_response.success = True
mock_response.data = {"query": "test query", "results": ["result1", "result2"], "status": "success"}
mock_firecrawl.search.return_value = mock_response
# Call the method
result = firecrawl_tools.search_web("test query")
result_data = json.loads(result)
# Verify results
assert result_data["query"] == "test query"
assert result_data["results"] == ["result1", "result2"]
assert result_data["status"] == "success"
mock_firecrawl.search.assert_called_once_with("test query", limit=10)
def test_search_with_error(firecrawl_tools, mock_firecrawl):
"""Test search method with error response."""
# Setup mock response
mock_response = Mock()
mock_response.success = False
mock_response.error = "Search failed"
mock_firecrawl.search.return_value = mock_response
# Call the method
result = firecrawl_tools.search_web("test query")
# Verify results
assert result == "Error searching with the Firecrawl tool: Search failed"
mock_firecrawl.search.assert_called_once_with("test query", limit=10)
def test_search_with_custom_params(firecrawl_tools, mock_firecrawl):
"""Test search method with custom search parameters."""
# Setup mock response
mock_response = Mock()
mock_response.success = True
mock_response.data = {"query": "test query", "results": ["result1", "result2"], "status": "success"}
mock_firecrawl.search.return_value = mock_response
# Set custom search parameters
firecrawl_tools.search_params = {"language": "en", "region": "us"}
# Call the method
result = firecrawl_tools.search_web("test query")
result_data = json.loads(result)
# Verify results
assert result_data["query"] == "test query"
assert result_data["results"] == ["result1", "result2"]
assert result_data["status"] == "success"
mock_firecrawl.search.assert_called_once_with("test query", limit=10, language="en", region="us")
def test_search_tool_response(firecrawl_tools, mock_firecrawl):
mock_response = Mock(spec=["model_dump"])
mock_response.model_dump.return_value = {
"query": "test query",
"results": ["result1", "result2"],
}
mock_firecrawl.search.return_value = mock_response
result = firecrawl_tools.search_web("test query")
result_data = json.loads(result)
assert result_data["query"] == "test query"
assert result_data["results"] == ["result1", "result2"]
mock_firecrawl.search.assert_called_once_with("test query", limit=10)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_firecrawl.py",
"license": "Apache License 2.0",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/mem0.py | import json
from os import getenv
from typing import Any, Dict, List, Optional, Union
from agno.run import RunContext
from agno.tools import Toolkit
from agno.utils.log import log_debug, log_error, log_warning
try:
from mem0.client.main import MemoryClient
from mem0.memory.main import Memory
except ImportError:
raise ImportError("`mem0ai` package not found. Please install it with `pip install mem0ai`")
class Mem0Tools(Toolkit):
def __init__(
self,
config: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
user_id: Optional[str] = None,
org_id: Optional[str] = None,
project_id: Optional[str] = None,
infer: bool = True,
enable_add_memory: bool = True,
enable_search_memory: bool = True,
enable_get_all_memories: bool = True,
enable_delete_all_memories: bool = True,
all: bool = False,
**kwargs,
):
tools: List[Any] = []
if enable_add_memory or all:
tools.append(self.add_memory)
if enable_search_memory or all:
tools.append(self.search_memory)
if enable_get_all_memories or all:
tools.append(self.get_all_memories)
if enable_delete_all_memories or all:
tools.append(self.delete_all_memories)
super().__init__(name="mem0_tools", tools=tools, **kwargs)
self.api_key = api_key or getenv("MEM0_API_KEY")
self.user_id = user_id
self.org_id = org_id or getenv("MEM0_ORG_ID")
self.project_id = project_id or getenv("MEM0_PROJECT_ID")
self.client: Union[Memory, MemoryClient]
self.infer = infer
try:
if self.api_key:
log_debug("Using Mem0 Platform API key.")
client_kwargs = {"api_key": self.api_key}
if self.org_id:
client_kwargs["org_id"] = self.org_id
if self.project_id:
client_kwargs["project_id"] = self.project_id
self.client = MemoryClient(**client_kwargs)
elif config is not None:
log_debug("Using Mem0 with config.")
self.client = Memory.from_config(config)
else:
log_debug("Initializing Mem0 with default settings.")
self.client = Memory()
except Exception as e:
log_error(f"Failed to initialize Mem0 client: {e}")
raise ConnectionError("Failed to initialize Mem0 client. Ensure API keys/config are set.") from e
def _get_user_id(
self,
method_name: str,
run_context: RunContext,
) -> str:
"""Resolve the user ID"""
resolved_user_id = self.user_id
if not resolved_user_id:
try:
resolved_user_id = run_context.user_id
except Exception:
pass
if not resolved_user_id:
error_msg = f"Error in {method_name}: A user_id must be provided in the method call."
log_error(error_msg)
return error_msg
return resolved_user_id
def add_memory(
self,
run_context: RunContext,
content: Union[str, Dict[str, str]],
) -> str:
"""Add facts to the user's memory.
Args:
content(Union[str, Dict[str, str]]): The facts that should be stored.
Example:
content = "I live in NYC"
content = {"Name": "John", "Age": 30, "Location": "New York"}
Returns:
str: JSON-encoded Mem0 response or an error message.
"""
resolved_user_id = self._get_user_id("add_memory", run_context=run_context)
if isinstance(resolved_user_id, str) and resolved_user_id.startswith("Error in add_memory:"):
return resolved_user_id
try:
if isinstance(content, dict):
log_debug("Wrapping dict message into content string")
content = json.dumps(content)
elif not isinstance(content, str):
content = str(content)
messages_list = [{"role": "user", "content": content}]
result = self.client.add(
messages_list,
user_id=resolved_user_id,
infer=self.infer,
)
return json.dumps(result)
except Exception as e:
log_error(f"Error adding memory: {e}")
return f"Error adding memory: {e}"
def search_memory(
self,
run_context: RunContext,
query: str,
) -> str:
"""Semantic search for *query* across the user's stored memories."""
resolved_user_id = self._get_user_id("search_memory", run_context=run_context)
if isinstance(resolved_user_id, str) and resolved_user_id.startswith("Error in search_memory:"):
return resolved_user_id
try:
results = self.client.search(
query=query,
user_id=resolved_user_id,
)
if isinstance(results, dict) and "results" in results:
search_results_list = results.get("results", [])
elif isinstance(results, list):
search_results_list = results
else:
log_warning(f"Unexpected return type from mem0.search: {type(results)}. Returning empty list.")
search_results_list = []
return json.dumps(search_results_list)
except ValueError as ve:
log_error(str(ve))
return str(ve)
except Exception as e:
log_error(f"Error searching memory: {e}")
return f"Error searching memory: {e}"
def get_all_memories(self, run_context: RunContext) -> str:
"""Return **all** memories for the current user as a JSON string."""
resolved_user_id = self._get_user_id("get_all_memories", run_context=run_context)
if isinstance(resolved_user_id, str) and resolved_user_id.startswith("Error in get_all_memories:"):
return resolved_user_id
try:
results = self.client.get_all(
user_id=resolved_user_id,
)
if isinstance(results, dict) and "results" in results:
memories_list = results.get("results", [])
elif isinstance(results, list):
memories_list = results
else:
log_warning(f"Unexpected return type from mem0.get_all: {type(results)}. Returning empty list.")
memories_list = []
return json.dumps(memories_list)
except ValueError as ve:
log_error(str(ve))
return str(ve)
except Exception as e:
log_error(f"Error getting all memories: {e}")
return f"Error getting all memories: {e}"
def delete_all_memories(self, run_context: RunContext) -> str:
"""Delete *all* memories associated with the current user"""
resolved_user_id = self._get_user_id("delete_all_memories", run_context=run_context)
if isinstance(resolved_user_id, str) and resolved_user_id.startswith("Error in delete_all_memories:"):
error_msg = resolved_user_id
log_error(error_msg)
return f"Error deleting all memories: {error_msg}"
try:
self.client.delete_all(user_id=resolved_user_id)
return f"Successfully deleted all memories for user_id: {resolved_user_id}."
except Exception as e:
log_error(f"Error deleting all memories: {e}")
return f"Error deleting all memories: {e}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/mem0.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_mem0.py | import json
from unittest.mock import MagicMock
import pytest
from agno.run import RunContext
from agno.tools.mem0 import Mem0Tools
MockMemory = MagicMock()
MockMemoryClient = MagicMock()
@pytest.fixture(scope="function")
def mock_memory_instance():
mock = MockMemory()
mock.reset_mock()
mock.add.return_value = {"results": [{"id": "mem-add-123", "memory": "added memory", "event": "ADD"}]}
mock.search.return_value = {"results": [{"id": "mem-search-456", "memory": "found memory", "score": 0.9}]}
mock.get.return_value = {"id": "mem-get-789", "memory": "specific memory"}
mock.update.return_value = {"message": "Memory updated successfully!"}
mock.delete.return_value = None
mock.get_all.return_value = {"results": [{"id": "mem-all-1", "memory": "all mem 1"}]}
mock.delete_all.return_value = None
mock.history.return_value = [{"event": "ADD", "memory_id": "hist-1"}]
return mock
@pytest.fixture(scope="function")
def mock_memory_client_instance():
mock = MockMemoryClient()
mock.reset_mock()
mock.add.return_value = [{"id": "mem-client-add-123", "memory": "added client memory", "event": "ADD"}]
mock.search.return_value = [{"id": "mem-client-search-456", "memory": "found client memory", "score": 0.8}]
mock.get.return_value = {"id": "mem-client-get-789", "memory": "specific client memory"}
mock.update.return_value = {"message": "Client memory updated successfully!"}
mock.delete.return_value = None
mock.get_all.return_value = [{"id": "mem-client-all-1", "memory": "all client mem 1"}]
mock.delete_all.return_value = None
mock.history.return_value = [{"event": "ADD", "memory_id": "client-hist-1"}]
return mock
@pytest.fixture(autouse=True)
def patch_mem0_library(monkeypatch, mock_memory_instance, mock_memory_client_instance):
monkeypatch.setattr("agno.tools.mem0.Memory", MockMemory)
monkeypatch.setattr("agno.tools.mem0.MemoryClient", MockMemoryClient)
MockMemory.from_config.return_value = mock_memory_instance
MockMemoryClient.return_value = mock_memory_client_instance
@pytest.fixture
def toolkit_config(monkeypatch):
# Reset the class mock's config call count before creating instance
MockMemory.from_config.reset_mock()
MockMemoryClient.reset_mock() # Also reset client mock
monkeypatch.delenv("MEM0_API_KEY", raising=False) # raising=False avoids error if var doesn't exist
toolkit = Mem0Tools(config={}, user_id=None)
return toolkit
@pytest.fixture
def toolkit_api_key():
MockMemoryClient.reset_mock()
MockMemory.from_config.reset_mock()
return Mem0Tools(api_key="fake-api-key")
@pytest.fixture
def dummy_run_context():
"""Return a minimal RunContext that the toolkit expects."""
return RunContext(run_id="test-run-id", session_id="test-session-id", user_id=None)
class TestMem0Toolkit:
def test_init_with_config(self, toolkit_config, mock_memory_instance):
assert toolkit_config is not None
# Check the *instance* of client is the mock returned by Memory.from_config
assert isinstance(toolkit_config.client, MagicMock)
assert toolkit_config.client == mock_memory_instance # Check it's the correct mock
# Check the CLASS method MockMemory.from_config was called once
MockMemory.from_config.assert_called_once_with({})
MockMemoryClient.assert_not_called()
def test_init_with_api_key(self, toolkit_api_key, mock_memory_client_instance):
assert toolkit_api_key is not None
assert isinstance(toolkit_api_key.client, MagicMock)
assert toolkit_api_key.client == mock_memory_client_instance
MockMemoryClient.assert_called_once_with(api_key="fake-api-key")
MockMemory.from_config.assert_not_called()
def test_get_user_id_from_arg(self, toolkit_config, dummy_run_context):
toolkit_config.user_id = "arg_user"
user_id = toolkit_config._get_user_id("test_method", dummy_run_context)
assert user_id == "arg_user"
def test_get_user_id_no_id_provided(self, toolkit_config, dummy_run_context):
result = toolkit_config._get_user_id("test_method", dummy_run_context)
assert result == "Error in test_method: A user_id must be provided in the method call."
def test_get_user_id_from_run_context(self, toolkit_config):
"""Test that user_id is retrieved from run_context when not provided in constructor"""
run_context = RunContext(run_id="test-run", session_id="test-session", user_id="context_user_123")
user_id = toolkit_config._get_user_id("test_method", run_context)
assert user_id == "context_user_123"
def test_get_user_id_constructor_takes_priority(self, toolkit_config):
"""Test that constructor user_id takes priority over run_context"""
toolkit_config.user_id = "constructor_user"
run_context = RunContext(run_id="test-run", session_id="test-session", user_id="context_user_123")
user_id = toolkit_config._get_user_id("test_method", run_context)
assert user_id == "constructor_user"
def test_add_memory_with_run_context_user_id(self, toolkit_config, mock_memory_instance):
"""Test that add_memory works with user_id from run_context"""
run_context = RunContext(run_id="test-run", session_id="test-session", user_id="context_user_add")
result_str = toolkit_config.add_memory(run_context, content="Context user test")
mock_memory_instance.add.assert_called_once_with(
[{"role": "user", "content": "Context user test"}],
user_id="context_user_add",
infer=True,
)
expected_result = {"results": [{"id": "mem-add-123", "memory": "added memory", "event": "ADD"}]}
assert json.loads(result_str) == expected_result
def test_search_memory_with_run_context_user_id(self, toolkit_config, mock_memory_instance):
"""Test that search_memory works with user_id from run_context"""
run_context = RunContext(run_id="test-run", session_id="test-session", user_id="context_user_search")
result_str = toolkit_config.search_memory(run_context, query="Context search test")
mock_memory_instance.search.assert_called_once_with(query="Context search test", user_id="context_user_search")
expected_result = [{"id": "mem-search-456", "memory": "found memory", "score": 0.9}]
assert json.loads(result_str) == expected_result
def test_add_memory_success_arg_id(self, toolkit_config, mock_memory_instance, dummy_run_context):
toolkit_config.user_id = "test_user_add"
result_str = toolkit_config.add_memory(dummy_run_context, content="Test message")
mock_memory_instance.add.assert_called_once_with(
[{"role": "user", "content": "Test message"}],
user_id="test_user_add",
infer=True,
)
expected_result = {"results": [{"id": "mem-add-123", "memory": "added memory", "event": "ADD"}]}
assert json.loads(result_str) == expected_result
def test_add_memory_dict_message(self, toolkit_config, mock_memory_instance, dummy_run_context):
toolkit_config.user_id = "user1"
dict_content = {"role": "user", "content": "Dict message"}
result_str = toolkit_config.add_memory(dummy_run_context, content=dict_content)
mock_memory_instance.add.assert_called_once_with(
[{"role": "user", "content": json.dumps(dict_content)}],
user_id="user1",
infer=True,
)
expected_result = {"results": [{"id": "mem-add-123", "memory": "added memory", "event": "ADD"}]}
assert json.loads(result_str) == expected_result
def test_add_memory_invalid_message_type(self, toolkit_config, mock_memory_instance, dummy_run_context):
toolkit_config.user_id = "user1"
result_str = toolkit_config.add_memory(dummy_run_context, content=123)
mock_memory_instance.add.assert_called_once_with(
[{"role": "user", "content": "123"}],
user_id="user1",
infer=True,
)
expected_result = {"results": [{"id": "mem-add-123", "memory": "added memory", "event": "ADD"}]}
assert json.loads(result_str) == expected_result
def test_add_memory_no_user_id(self, toolkit_config, dummy_run_context):
result = toolkit_config.add_memory(dummy_run_context, content="No user ID test")
expected_error_msg = "Error in add_memory: A user_id must be provided in the method call."
assert expected_error_msg in result
def test_search_memory_success_arg_id(self, toolkit_config, mock_memory_instance, dummy_run_context):
toolkit_config.user_id = "test_user_search"
result_str = toolkit_config.search_memory(dummy_run_context, query="find stuff")
mock_memory_instance.search.assert_called_once_with(query="find stuff", user_id="test_user_search")
expected_result = [{"id": "mem-search-456", "memory": "found memory", "score": 0.9}]
assert json.loads(result_str) == expected_result
def test_search_memory_success_default_call(self, toolkit_config, mock_memory_instance, dummy_run_context):
toolkit_config.user_id = "user_default"
toolkit_config.search_memory(dummy_run_context, query="default search")
mock_memory_instance.search.assert_called_once_with(query="default search", user_id="user_default")
def test_search_memory_no_user_id(self, toolkit_config, dummy_run_context):
result = toolkit_config.search_memory(dummy_run_context, query="No user ID search")
expected_error_msg = "Error in search_memory: A user_id must be provided in the method call."
assert result == expected_error_msg
def test_search_memory_api_key_list_return(self, toolkit_api_key, mock_memory_client_instance, dummy_run_context):
toolkit_api_key.user_id = "default_user_api"
result_str = toolkit_api_key.search_memory(dummy_run_context, query="client search")
mock_memory_client_instance.search.assert_called_once_with(query="client search", user_id="default_user_api")
expected_result = [{"id": "mem-client-search-456", "memory": "found client memory", "score": 0.8}]
assert json.loads(result_str) == expected_result
def test_get_all_memories_success(self, toolkit_api_key, mock_memory_client_instance, dummy_run_context):
toolkit_api_key.user_id = "user-all-1"
result_str = toolkit_api_key.get_all_memories(dummy_run_context)
mock_memory_client_instance.get_all.assert_called_once_with(user_id="user-all-1")
expected = [{"id": "mem-client-all-1", "memory": "all client mem 1"}]
assert json.loads(result_str) == expected
def test_get_all_memories_success_dict_return(self, toolkit_config, mock_memory_instance, dummy_run_context):
toolkit_config.user_id = "user-all-dict"
result_str = toolkit_config.get_all_memories(dummy_run_context)
mock_memory_instance.get_all.assert_called_once_with(user_id="user-all-dict")
expected = [{"id": "mem-all-1", "memory": "all mem 1"}]
assert json.loads(result_str) == expected
def test_get_all_memories_no_user_id(self, toolkit_api_key, dummy_run_context):
result_str = toolkit_api_key.get_all_memories(dummy_run_context)
expected_error_msg = "Error in get_all_memories: A user_id must be provided in the method call."
assert result_str == expected_error_msg
def test_get_all_memories_error(self, toolkit_api_key, mock_memory_client_instance, dummy_run_context):
toolkit_api_key.user_id = "error-user"
mock_memory_client_instance.get_all.side_effect = Exception("Test get_all error")
result_str = toolkit_api_key.get_all_memories(dummy_run_context)
assert "Error getting all memories: Test get_all error" in result_str
def test_delete_all_memories_success(self, toolkit_api_key, mock_memory_client_instance, dummy_run_context):
toolkit_api_key.user_id = "user-delete-all-1"
result_str = toolkit_api_key.delete_all_memories(dummy_run_context)
mock_memory_client_instance.delete_all.assert_called_once_with(user_id="user-delete-all-1")
expected_str = "Successfully deleted all memories for user_id: user-delete-all-1."
assert result_str == expected_str
def test_delete_all_memories_no_user_id(self, toolkit_api_key, dummy_run_context):
result_str = toolkit_api_key.delete_all_memories(dummy_run_context)
expected_error_msg = "Error in delete_all_memories: A user_id must be provided in the method call."
assert "Error deleting all memories:" in result_str and expected_error_msg in result_str
def test_delete_all_memories_error(self, toolkit_api_key, mock_memory_client_instance, dummy_run_context):
toolkit_api_key.user_id = "error-user"
mock_memory_client_instance.delete_all.side_effect = Exception("Test delete_all error")
result_str = toolkit_api_key.delete_all_memories(dummy_run_context)
assert "Error deleting all memories: Test delete_all error" in result_str
def test_add_memory_with_infer_false(self, monkeypatch, dummy_run_context):
"""Test that infer parameter can be configured to False"""
# Set up mocks for this specific test
mock_memory = MagicMock()
mock_memory.add.return_value = {"results": [{"id": "mem-add-123", "memory": "added memory", "event": "ADD"}]}
MockMemory.from_config.return_value = mock_memory
# Test with config-based toolkit set to infer=False
monkeypatch.delenv("MEM0_API_KEY", raising=False)
toolkit_config = Mem0Tools(config={}, user_id="test_user", infer=False)
result_str = toolkit_config.add_memory(dummy_run_context, content="Test message")
mock_memory.add.assert_called_once_with(
[{"role": "user", "content": "Test message"}],
user_id="test_user",
infer=False,
)
expected_result = {"results": [{"id": "mem-add-123", "memory": "added memory", "event": "ADD"}]}
assert json.loads(result_str) == expected_result
def test_init_with_org_id_and_project_id(self, mock_memory_client_instance):
"""Test initialization with org_id and project_id parameters"""
MockMemoryClient.reset_mock()
toolkit = Mem0Tools(api_key="fake-api-key", org_id="test_org_123", project_id="test_project_456")
assert toolkit.org_id == "test_org_123"
assert toolkit.project_id == "test_project_456"
MockMemoryClient.assert_called_once_with(
api_key="fake-api-key", org_id="test_org_123", project_id="test_project_456"
)
def test_init_with_env_org_id(self, monkeypatch, mock_memory_client_instance):
"""Test initialization with org_id and project_id from environment variables"""
MockMemoryClient.reset_mock()
monkeypatch.setenv("MEM0_ORG_ID", "env_org_789")
monkeypatch.setenv("MEM0_PROJECT_ID", "env_project_012")
toolkit = Mem0Tools(api_key="fake-api-key")
assert toolkit.org_id == "env_org_789"
assert toolkit.project_id == "env_project_012"
MockMemoryClient.assert_called_once_with(
api_key="fake-api-key", org_id="env_org_789", project_id="env_project_012"
)
def test_init_with_partial_org_id(self, mock_memory_client_instance):
"""Test initialization with only org_id (no project_id)"""
MockMemoryClient.reset_mock()
toolkit = Mem0Tools(api_key="fake-api-key", org_id="test_org_only")
assert toolkit.org_id == "test_org_only"
assert toolkit.project_id is None
# Should only pass org_id, not project_id
MockMemoryClient.assert_called_once_with(api_key="fake-api-key", org_id="test_org_only")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_mem0.py",
"license": "Apache License 2.0",
"lines": 243,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/api/evals.py | from agno.api.api import api
from agno.api.routes import ApiRoutes
from agno.api.schemas.evals import EvalRunCreate
from agno.utils.log import log_debug
def create_eval_run_telemetry(eval_run: EvalRunCreate) -> None:
"""Telemetry recording for Eval runs"""
with api.Client() as api_client:
try:
api_client.post(ApiRoutes.EVAL_RUN_CREATE, json=eval_run.model_dump(exclude_none=True))
except Exception as e:
log_debug(f"Could not create evaluation run: {e}")
async def async_create_eval_run_telemetry(eval_run: EvalRunCreate) -> None:
"""Telemetry recording for async Eval runs"""
async with api.AsyncClient() as api_client:
try:
await api_client.post(ApiRoutes.EVAL_RUN_CREATE, json=eval_run.model_dump(exclude_none=True))
except Exception as e:
log_debug(f"Could not create evaluation run: {e}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/api/evals.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/api/schemas/evals.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, Field
from agno.api.schemas.utils import get_sdk_version
from agno.db.schemas.evals import EvalType
class EvalRunCreate(BaseModel):
"""Data sent to the telemetry API to create an Eval run event"""
run_id: str
eval_type: EvalType
data: Optional[Dict[Any, Any]] = None
sdk_version: str = Field(default_factory=get_sdk_version)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/api/schemas/evals.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/tools/google_bigquery.py | """Backward-compatibility stub. Use agno.tools.google.bigquery instead."""
import warnings
warnings.warn(
"Importing from 'agno.tools.google_bigquery' is deprecated. "
"Use 'from agno.tools.google.bigquery import GoogleBigQueryTools' instead.",
DeprecationWarning,
stacklevel=2,
)
from agno.tools.google.bigquery import * # noqa: F401, F403, E402
from agno.tools.google.bigquery import GoogleBigQueryTools, _clean_sql # noqa: F811, E402
__all__ = ["GoogleBigQueryTools", "_clean_sql"]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/google_bigquery.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/tools/test_google_bigquery.py | # agno/tests/unit/tools/test_bigquery.py
import json
from unittest.mock import MagicMock, patch
import pytest
from agno.tools.google.bigquery import GoogleBigQueryTools, _clean_sql
@pytest.fixture
def mock_bq_client():
"""Mock BigQuery Client used by BQTools."""
with patch("agno.tools.google.bigquery.bigquery.Client", autospec=True) as MockClientConstructor:
yield MockClientConstructor.return_value
@pytest.fixture
def bq_tools_instance(mock_bq_client): # mock_bq_client is the instance mock from the fixture above
"""Fixture to instantiate BQTools with the mocked BigQuery client."""
tools = GoogleBigQueryTools(
project="test-project",
location="us-central1",
dataset="test-dataset",
# credentials will be None by default in BQTools, which is fine for the mocked client.
)
return tools
# --- Test Cases ---
def test_run_sql_query_success(bq_tools_instance, mock_bq_client):
"""Test run_sql_query successfully returns a JSON string of query results."""
mock_result_data = [{"product_name": "Laptop", "quantity": 5}, {"product_name": "Mouse", "quantity": 20}]
mock_query_job = MagicMock()
mock_query_job.result.return_value = mock_result_data
mock_bq_client.query.return_value = mock_query_job
query = "SELECT product_name, quantity FROM sales"
result_json_str = bq_tools_instance.run_sql_query(query)
expected_inner_string = "[{'product_name': 'Laptop', 'quantity': 5}, {'product_name': 'Mouse', 'quantity': 20}]"
expected_json_string = json.dumps(expected_inner_string)
assert result_json_str == expected_json_string
cleaned_query = _clean_sql(query)
# Verify the call was made with cleaned query and job config
mock_bq_client.query.assert_called_once()
call_args = mock_bq_client.query.call_args
assert call_args[0][0] == cleaned_query # First positional argument should be the cleaned query
assert len(call_args[0]) == 2 # Should have 2 positional arguments (query and job_config)
def test_list_tables_error(bq_tools_instance, mock_bq_client):
"""Test list_tables error handling."""
mock_bq_client.list_tables.side_effect = Exception("Network Error")
result = bq_tools_instance.list_tables()
assert "Error getting tables: Network Error" == result
def test_describe_table_success(bq_tools_instance, mock_bq_client):
"""Test describe_table successfully returns a JSON string of table schema."""
mock_table_api_repr = {
"description": "Table of customer data",
"schema": {
"fields": [
{"name": "customer_id", "type": "STRING"},
{"name": "email", "type": "STRING"},
]
},
}
mock_table_object = MagicMock()
mock_table_object.to_api_repr.return_value = mock_table_api_repr
mock_bq_client.get_table.return_value = mock_table_object
result = bq_tools_instance.describe_table(table_id="customers")
expected_data = {"table_description": "Table of customer data", "columns": "['customer_id', 'email']"}
expected_json_string = json.dumps(expected_data)
assert result == expected_json_string
mock_bq_client.get_table.assert_called_once_with("test-project.test-dataset.customers")
def test_describe_table_error(bq_tools_instance, mock_bq_client):
"""Test describe_table error handling."""
mock_bq_client.get_table.side_effect = Exception("Table Not Found")
result = bq_tools_instance.describe_table(table_id="non_existent_table")
assert "Error getting table schema: Table Not Found" == result
def test_run_sql_query_empty_result(bq_tools_instance, mock_bq_client):
"""Test run_sql_query with a query that returns no results."""
mock_query_job = MagicMock()
mock_query_job.result.return_value = [] # Empty iterable
mock_bq_client.query.return_value = mock_query_job
query = "SELECT * FROM empty_table"
result = bq_tools_instance.run_sql_query(query)
expected_json_string = json.dumps("[]")
assert result == expected_json_string
def test_run_sql_query_error_in_client_query(bq_tools_instance, mock_bq_client):
"""Test run_sql_query when _run_sql raises an exception (e.g. client.query() fails)."""
mock_bq_client.query.side_effect = Exception("Query Execution Failed")
query = "SELECT * FROM some_table"
result = bq_tools_instance.run_sql_query(query)
expected_json_string = json.dumps("")
assert result == expected_json_string
def test_clean_sql_preserves_token_boundaries_with_line_comments():
"""Test that _clean_sql replaces newlines with spaces to prevent line comments from swallowing queries."""
# This was a bug: newlines were removed entirely, causing -- comments to consume the rest of the query
sql = "SELECT * FROM table -- this is a comment\nWHERE id = 1"
cleaned = _clean_sql(sql)
assert cleaned == "SELECT * FROM table -- this is a comment WHERE id = 1"
def test_clean_sql_handles_escaped_newlines():
"""Test that _clean_sql handles escaped newline characters."""
sql = "SELECT *\\nFROM table"
cleaned = _clean_sql(sql)
assert cleaned == "SELECT * FROM table"
def test_clean_sql_preserves_backslashes_in_string_literals():
"""Test that _clean_sql preserves backslashes (e.g., regex patterns in strings)."""
sql = r"SELECT * FROM table WHERE regex = 'word\s+'"
cleaned = _clean_sql(sql)
assert cleaned == r"SELECT * FROM table WHERE regex = 'word\s+'"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_google_bigquery.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/vectordb/couchbase/couchbase.py | import asyncio
import time
from datetime import timedelta
from typing import Any, Dict, List, Optional, Union
from agno.filters import FilterExpr
from agno.knowledge.document import Document
from agno.knowledge.embedder import Embedder
from agno.utils.log import log_debug, log_info, log_warning, logger
from agno.vectordb.base import VectorDb
try:
from hashlib import md5
except ImportError:
raise ImportError("`hashlib` not installed. Please install using `pip install hashlib`")
try:
from acouchbase.bucket import AsyncBucket
from acouchbase.cluster import AsyncCluster
from acouchbase.collection import AsyncCollection
from acouchbase.management.search import (
ScopeSearchIndexManager as AsyncScopeSearchIndexManager,
)
from acouchbase.management.search import (
SearchIndex as AsyncSearchIndex,
)
from acouchbase.management.search import (
SearchIndexManager as AsyncSearchIndexManager,
)
from acouchbase.scope import AsyncScope
from couchbase.bucket import Bucket
from couchbase.cluster import Cluster
from couchbase.collection import Collection
from couchbase.exceptions import (
CollectionAlreadyExistsException,
CollectionNotFoundException,
ScopeAlreadyExistsException,
SearchIndexNotFoundException,
)
from couchbase.management.search import ScopeSearchIndexManager, SearchIndex, SearchIndexManager
from couchbase.n1ql import QueryScanConsistency
from couchbase.options import ClusterOptions, QueryOptions, SearchOptions
from couchbase.result import SearchResult
from couchbase.scope import Scope
from couchbase.search import SearchRequest
from couchbase.vector_search import VectorQuery, VectorSearch
except ImportError:
raise ImportError("`couchbase` not installed. Please install using `pip install couchbase`")
class CouchbaseSearch(VectorDb):
"""
Couchbase Vector Database implementation with FTS (Full Text Search) index support.
"""
def __init__(
self,
bucket_name: str,
scope_name: str,
collection_name: str,
couchbase_connection_string: str,
cluster_options: ClusterOptions,
search_index: Union[str, SearchIndex],
embedder: Optional[Embedder] = None,
overwrite: bool = False,
is_global_level_index: bool = False,
wait_until_index_ready: float = 0,
batch_limit: int = 500,
name: Optional[str] = None,
description: Optional[str] = None,
**kwargs,
):
"""
Initialize the CouchbaseSearch with Couchbase connection details.
Args:
bucket_name (str): Name of the Couchbase bucket.
scope_name (str): Name of the scope within the bucket.
collection_name (str): Name of the collection within the scope.
name (Optional[str]): Name of the vector database.
description (Optional[str]): Description of the vector database.
couchbase_connection_string (str): Couchbase connection string.
cluster_options (ClusterOptions): Options for configuring the Couchbase cluster connection.
search_index (Union[str, SearchIndex], optional): Search index configuration, either as index name or SearchIndex definition.
embedder (Embedder): Embedder instance for generating embeddings. Defaults to OpenAIEmbedder.
overwrite (bool): Whether to overwrite existing collection. Defaults to False.
wait_until_index_ready (float, optional): Time in seconds to wait until the index is ready. Defaults to 0.
batch_limit (int, optional): Maximum number of documents to process in a single batch (applies to both sync and async operations). Defaults to 500.
**kwargs: Additional arguments for Couchbase connection.
"""
if not bucket_name:
raise ValueError("Bucket name must not be empty.")
self.bucket_name = bucket_name
self.scope_name = scope_name
self.collection_name = collection_name
self.connection_string = couchbase_connection_string
self.cluster_options = cluster_options
if embedder is None:
from agno.knowledge.embedder.openai import OpenAIEmbedder
embedder = OpenAIEmbedder()
log_debug("Embedder not provided, using OpenAIEmbedder as default.")
self.embedder = embedder
self.overwrite = overwrite
self.is_global_level_index = is_global_level_index
self.wait_until_index_ready = wait_until_index_ready
# Initialize base class with name and description
super().__init__(name=name, description=description)
self.kwargs = kwargs
self.batch_limit = batch_limit
if isinstance(search_index, str):
self.search_index_name = search_index
self.search_index_definition = None
else:
self.search_index_name = search_index.name
self.search_index_definition = search_index
self._cluster: Optional[Cluster] = None
self._bucket: Optional[Bucket] = None
self._scope: Optional[Scope] = None
self._collection: Optional[Collection] = None
self._async_cluster: Optional[AsyncCluster] = None
self._async_bucket: Optional[AsyncBucket] = None
self._async_scope: Optional[AsyncScope] = None
self._async_collection: Optional[AsyncCollection] = None
@property
def cluster(self) -> Cluster:
"""Create or retrieve the Couchbase cluster connection."""
if self._cluster is None:
try:
logger.debug("Creating Couchbase Cluster connection")
cluster = Cluster(self.connection_string, self.cluster_options)
# Verify connection
cluster.wait_until_ready(timeout=timedelta(seconds=60))
logger.info("Connected to Couchbase successfully.")
self._cluster = cluster
except Exception as e:
logger.error(f"Failed to connect to Couchbase: {e}")
raise ConnectionError(f"Failed to connect to Couchbase: {e}")
return self._cluster
@property
def bucket(self) -> Bucket:
"""Get the Couchbase bucket."""
if self._bucket is None:
self._bucket = self.cluster.bucket(self.bucket_name)
return self._bucket
@property
def scope(self) -> Scope:
"""Get the Couchbase scope."""
if self._scope is None:
self._scope = self.bucket.scope(self.scope_name)
return self._scope
@property
def collection(self) -> Collection:
"""Get the Couchbase collection."""
if self._collection is None:
self._collection = self.scope.collection(self.collection_name)
return self._collection
def _create_collection_and_scope(self):
"""
Get or create the scope and collection within the bucket.
Uses EAFP principle: attempts to create scope/collection and handles
specific exceptions if they already exist or (for collections with overwrite=True)
if they are not found for dropping.
Raises:
Exception: If scope or collection creation/manipulation fails unexpectedly.
"""
# 1. Ensure Scope Exists
try:
self.bucket.collections().create_scope(scope_name=self.scope_name)
logger.info(f"Created new scope '{self.scope_name}'")
except ScopeAlreadyExistsException:
logger.info(f"Scope '{self.scope_name}' already exists. Using existing scope.")
except Exception as e:
logger.error(f"Failed to create or ensure scope '{self.scope_name}' exists: {e}")
raise
collection_manager = self.bucket.collections()
# 2. Handle Collection
if self.overwrite:
# Attempt to drop the collection first since overwrite is True
try:
logger.info(
f"Overwrite is True. Attempting to drop collection '{self.collection_name}' in scope '{self.scope_name}'."
)
collection_manager.drop_collection(collection_name=self.collection_name, scope_name=self.scope_name)
logger.info(f"Successfully dropped collection '{self.collection_name}'.")
time.sleep(1) # Brief wait after drop, as in original code
except CollectionNotFoundException:
logger.info(
f"Collection '{self.collection_name}' not found in scope '{self.scope_name}'. No need to drop."
)
except Exception as e:
logger.error(f"Error dropping collection '{self.collection_name}' during overwrite: {e}")
raise
# Proceed to create the collection
try:
logger.info(f"Creating collection '{self.collection_name}' in scope '{self.scope_name}'.")
collection_manager.create_collection(scope_name=self.scope_name, collection_name=self.collection_name)
logger.info(
f"Successfully created collection '{self.collection_name}' after drop attempt (overwrite=True)."
)
except CollectionAlreadyExistsException:
# This is an unexpected state if overwrite=True and drop was supposed to clear the way.
logger.error(
f"Failed to create collection '{self.collection_name}' as it already exists, "
f"even after drop attempt for overwrite. Overwrite operation may not have completed as intended."
)
raise # Re-raise as the overwrite intent failed
except Exception as e:
logger.error(
f"Error creating collection '{self.collection_name}' after drop attempt (overwrite=True): {e}"
)
raise
else: # self.overwrite is False
try:
logger.info(
f"Overwrite is False. Attempting to create collection '{self.collection_name}' in scope '{self.scope_name}'."
)
collection_manager.create_collection(scope_name=self.scope_name, collection_name=self.collection_name)
logger.info(f"Successfully created new collection '{self.collection_name}'.")
except CollectionAlreadyExistsException:
logger.info(
f"Collection '{self.collection_name}' already exists in scope '{self.scope_name}'. Using existing collection."
)
except Exception as e:
logger.error(f"Error creating collection '{self.collection_name}': {e}")
raise
def _search_indexes_mng(self) -> Union[SearchIndexManager, ScopeSearchIndexManager]:
"""Get the search indexes manager."""
if self.is_global_level_index:
return self.cluster.search_indexes()
else:
return self.scope.search_indexes()
def _create_fts_index(self):
"""Create a FTS index on the collection if it doesn't exist."""
try:
# Check if index exists and handle string index name
self._search_indexes_mng().get_index(self.search_index_name)
if not self.overwrite:
return
except Exception:
if self.search_index_definition is None:
raise ValueError(f"Index '{self.search_index_name}' does not exist")
# Create or update index
try:
if self.overwrite:
try:
logger.info(f"Dropping existing FTS index '{self.search_index_name}'")
self._search_indexes_mng().drop_index(self.search_index_name)
except SearchIndexNotFoundException:
logger.warning(f"Index '{self.search_index_name}' does not exist")
except Exception as e:
logger.warning(f"Error dropping index (may not exist): {e}")
self._search_indexes_mng().upsert_index(self.search_index_definition)
logger.info(f"Created FTS index '{self.search_index_name}'")
if self.wait_until_index_ready:
self._wait_for_index_ready()
except Exception as e:
logger.error(f"Error creating FTS index '{self.search_index_name}': {e}")
raise
def _wait_for_index_ready(self):
"""Wait until the FTS index is ready."""
start_time = time.time()
while True:
try:
count = self._search_indexes_mng().get_indexed_documents_count(self.search_index_name)
if count > -1:
logger.info(f"FTS index '{self.search_index_name}' is ready")
break
# logger.info(f"FTS index '{self.search_index_name}' is not ready yet status: {index['status']}")
except Exception as e:
if time.time() - start_time > self.wait_until_index_ready:
logger.error(f"Error checking index status: {e}")
raise TimeoutError("Timeout waiting for FTS index to become ready")
time.sleep(1)
def create(self) -> None:
"""Create the collection and FTS index if they don't exist."""
self._create_collection_and_scope()
self._create_fts_index()
def insert(self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
"""
Insert documents into the Couchbase bucket. Fails if any document already exists.
Args:
documents: List of documents to insert
filters: Optional filters to apply to the documents
"""
log_debug(f"Inserting {len(documents)} documents")
docs_to_insert: Dict[str, Any] = {}
for document in documents:
if document.embedding is None:
document.embed(embedder=self.embedder)
if document.embedding is None:
raise ValueError(f"Failed to generate embedding for document: {document.name}")
try:
doc_data = self.prepare_doc(content_hash, document)
if filters:
doc_data["filters"] = filters
# For insert_multi, the key of the dict is the document ID,
# and the value is the document content itself.
doc_id = doc_data.pop("_id")
docs_to_insert[doc_id] = doc_data
except Exception as e:
logger.error(f"Error preparing document '{document.name}': {e}")
if not docs_to_insert:
logger.info("No documents prepared for insertion.")
return
doc_ids = list(docs_to_insert.keys())
total_inserted_count = 0
total_processed_count = len(doc_ids)
errors_occurred = False
for i in range(0, len(doc_ids), self.batch_limit):
batch_doc_ids = doc_ids[i : i + self.batch_limit]
batch_docs_to_insert = {doc_id: docs_to_insert[doc_id] for doc_id in batch_doc_ids}
if not batch_docs_to_insert:
continue
log_debug(f"Inserting batch of {len(batch_docs_to_insert)} documents.")
try:
result = self.collection.insert_multi(batch_docs_to_insert)
# Check for errors in the batch result
# The actual way to count successes/failures might depend on the SDK version
# For Couchbase SDK 3.x/4.x, result.all_ok is a good indicator for the whole batch.
# If not all_ok, result.exceptions (dict) contains errors for specific keys.
# Simplistic success counting for this example, assuming partial success is possible
# and we want to count how many actually made it.
if result.all_ok:
batch_inserted_count = len(batch_docs_to_insert)
logger.info(f"Batch of {batch_inserted_count} documents inserted successfully.")
else:
# If not all_ok, count successes by checking which keys are NOT in exceptions
# This is a more robust way than just len(batch) - len(exceptions)
# as some items might succeed even if others fail.
succeeded_ids = set(batch_docs_to_insert.keys()) - set(
result.exceptions.keys() if result.exceptions else []
)
batch_inserted_count = len(succeeded_ids)
if batch_inserted_count > 0:
logger.info(f"Partially inserted {batch_inserted_count} documents in batch.")
logger.warning(f"Bulk write error during batch insert: {result.exceptions}")
errors_occurred = True
total_inserted_count += batch_inserted_count
except Exception as e:
logger.error(f"Error during batch bulk insert for {len(batch_docs_to_insert)} documents: {e}")
errors_occurred = True # Mark that an error occurred in this batch
logger.info(f"Finished processing {total_processed_count} documents for insertion.")
logger.info(f"Total successfully inserted: {total_inserted_count}.")
if errors_occurred:
logger.warning("Some errors occurred during the insert operation. Please check logs for details.")
def upsert_available(self) -> bool:
"""Check if upsert is available in Couchbase."""
return True
def _upsert(self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
"""
Update existing documents or insert new ones into the Couchbase bucket.
"""
if self.content_hash_exists(content_hash):
self._delete_by_content_hash(content_hash)
self.insert(content_hash=content_hash, documents=documents, filters=filters)
def upsert(self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
"""
Update existing documents or insert new ones into the Couchbase bucket.
Args:
documents: List of documents to upsert
filters: Optional filters to apply to the documents
"""
logger.info(f"Upserting {len(documents)} documents")
docs_to_upsert: Dict[str, Any] = {}
for document in documents:
try:
if document.embedding is None:
document.embed(embedder=self.embedder)
if document.embedding is None:
raise ValueError(f"Failed to generate embedding for document: {document.name}")
doc_data = self.prepare_doc(content_hash, document)
if filters:
doc_data["filters"] = filters
# For upsert_multi, the key of the dict is the document ID,
# and the value is the document content itself.
doc_id = doc_data.pop("_id")
docs_to_upsert[doc_id] = doc_data
except Exception as e:
logger.error(f"Error preparing document '{document.name}': {e}")
if not docs_to_upsert:
logger.info("No documents prepared for upsert.")
return
doc_ids = list(docs_to_upsert.keys())
total_upserted_count = 0
total_processed_count = len(doc_ids)
errors_occurred = False
for i in range(0, len(doc_ids), self.batch_limit):
batch_doc_ids = doc_ids[i : i + self.batch_limit]
batch_docs_to_upsert = {doc_id: docs_to_upsert[doc_id] for doc_id in batch_doc_ids}
if not batch_docs_to_upsert:
continue
logger.info(f"Upserting batch of {len(batch_docs_to_upsert)} documents.")
try:
result = self.collection.upsert_multi(batch_docs_to_upsert)
# Similar to insert_multi, check for errors in the batch result.
if result.all_ok:
batch_upserted_count = len(batch_docs_to_upsert)
logger.info(f"Batch of {batch_upserted_count} documents upserted successfully.")
else:
succeeded_ids = set(batch_docs_to_upsert.keys()) - set(
result.exceptions.keys() if result.exceptions else []
)
batch_upserted_count = len(succeeded_ids)
if batch_upserted_count > 0:
logger.info(f"Partially upserted {batch_upserted_count} documents in batch.")
logger.warning(f"Bulk write error during batch upsert: {result.exceptions}")
errors_occurred = True
total_upserted_count += batch_upserted_count
except Exception as e:
logger.error(f"Error during batch bulk upsert for {len(batch_docs_to_upsert)} documents: {e}")
errors_occurred = True
logger.info(f"Finished processing {total_processed_count} documents for upsert.")
logger.info(f"Total successfully upserted: {total_upserted_count}.")
if errors_occurred:
logger.warning("Some errors occurred during the upsert operation. Please check logs for details.")
def search(
self, query: str, limit: int = 5, filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> List[Document]:
if isinstance(filters, List):
log_warning("Filter Expressions are not yet supported in Couchbase. No filters will be applied.")
filters = None
"""Search the Couchbase bucket for documents relevant to the query."""
query_embedding = self.embedder.get_embedding(query)
if query_embedding is None:
logger.error(f"Failed to generate embedding for query: {query}")
return []
try:
# Implement vector search using Couchbase FTS
vector_search = VectorSearch.from_vector_query(
VectorQuery(field_name="embedding", vector=query_embedding, num_candidates=limit)
)
request = SearchRequest.create(vector_search)
# Prepare the options dictionary
options_dict = {"limit": limit, "fields": ["*"]}
if filters:
options_dict["raw"] = filters
search_args = {
"index": self.search_index_name,
"request": request,
"options": SearchOptions(**options_dict), # Construct SearchOptions with the dictionary
}
if self.is_global_level_index:
results = self.cluster.search(**search_args)
else:
results = self.scope.search(**search_args)
return self.__get_doc_from_kv(results)
except Exception as e:
logger.error(f"Error during search: {e}")
raise
def __get_doc_from_kv(self, response: SearchResult) -> List[Document]:
"""
Convert search results to Document objects by fetching full documents from KV store.
Args:
response: SearchResult from Couchbase search query
Returns:
List of Document objects
"""
documents: List[Document] = []
search_hits = [(doc.id, doc.score) for doc in response.rows()]
if not search_hits:
return documents
# Fetch documents from KV store
ids = [hit[0] for hit in search_hits]
kv_response = self.collection.get_multi(keys=ids)
if not kv_response.all_ok:
raise Exception(f"Failed to get documents from KV store: {kv_response.exceptions}")
# Convert results to Documents
for doc_id, score in search_hits:
get_result = kv_response.results.get(doc_id)
if get_result is None or not get_result.success:
logger.warning(f"Document {doc_id} not found in KV store")
continue
value = get_result.value
documents.append(
Document(
id=doc_id,
name=value["name"],
content=value["content"],
meta_data=value["meta_data"],
embedding=value["embedding"],
content_id=value.get("content_id"),
)
)
return documents
def drop(self) -> None:
"""Delete the collection from the scope."""
if self.exists():
try:
self.bucket.collections().drop_collection(
collection_name=self.collection_name, scope_name=self.scope_name
)
logger.info(f"Collection '{self.collection_name}' dropped successfully.")
except Exception as e:
logger.error(f"Error dropping collection '{self.collection_name}': {e}")
raise
def delete(self) -> bool:
"""Delete the collection from the scope."""
if self.exists():
self.drop()
return True
return False
def exists(self) -> bool:
"""Check if the collection exists."""
try:
scopes = self.bucket.collections().get_all_scopes()
for scope in scopes:
if scope.name == self.scope_name:
for collection in scope.collections:
if collection.name == self.collection_name:
return True
return False
except Exception:
return False
def prepare_doc(self, content_hash: str, document: Document) -> Dict[str, Any]:
"""
Prepare a document for insertion into Couchbase.
Args:
document: Document to prepare
Returns:
Dictionary containing document data ready for insertion
Raises:
ValueError: If embedding generation fails
"""
if not document.content:
raise ValueError(f"Document {document.name} has no content")
logger.debug(f"Preparing document: {document.name}")
# Clean content and generate ID
cleaned_content = document.content.replace("\x00", "\ufffd")
doc_id = md5(cleaned_content.encode("utf-8")).hexdigest()
return {
"_id": doc_id,
"name": document.name,
"content": cleaned_content,
"meta_data": document.meta_data, # Ensure meta_data is never None
"embedding": document.embedding,
"content_id": document.content_id,
"content_hash": content_hash,
}
def get_count(self) -> int:
"""Get the count of documents in the Couchbase bucket."""
try:
search_indexes = self.cluster.search_indexes()
if not self.is_global_level_index:
search_indexes = self.scope.search_indexes()
return search_indexes.get_indexed_documents_count(self.search_index_name)
except Exception as e:
logger.error(f"Error getting document count: {e}")
return 0
def name_exists(self, name: str) -> bool:
"""Check if a document exists in the bucket based on its name."""
try:
# Use N1QL query to check if document with given name exists
query = f"SELECT name FROM {self.bucket_name}.{self.scope_name}.{self.collection_name} WHERE name = $name LIMIT 1"
result = self.scope.query(
query, QueryOptions(named_parameters={"name": name}, scan_consistency=QueryScanConsistency.REQUEST_PLUS)
)
for row in result.rows():
return True
return False
except Exception as e:
logger.error(f"Error checking document name existence: {e}")
return False
def id_exists(self, id: str) -> bool:
"""Check if a document exists in the bucket based on its ID."""
try:
result = self.collection.exists(id)
if not result.exists:
logger.debug(f"Document 'does not exist': {id}")
return result.exists
except Exception as e:
logger.error(f"Error checking document existence: {e}")
return False
def content_hash_exists(self, content_hash: str) -> bool:
"""Check if a document exists in the bucket based on its content hash."""
try:
# Use N1QL query to check if document with given content_hash exists
query = f"SELECT content_hash FROM {self.bucket_name}.{self.scope_name}.{self.collection_name} WHERE content_hash = $content_hash LIMIT 1"
result = self.scope.query(
query,
QueryOptions(
named_parameters={"content_hash": content_hash}, scan_consistency=QueryScanConsistency.REQUEST_PLUS
),
)
for row in result.rows():
return True
return False
except Exception as e:
logger.error(f"Error checking document content_hash existence: {e}")
return False
# === ASYNC SUPPORT USING acouchbase ===
async def _create_async_cluster_instance(self) -> AsyncCluster:
"""Helper method to create and connect an AsyncCluster instance."""
logger.debug("Creating and connecting new AsyncCluster instance.")
cluster = await AsyncCluster.connect(self.connection_string, self.cluster_options)
# AsyncCluster.connect ensures the cluster is ready upon successful await.
# No explicit wait_until_ready is needed here for AsyncCluster.
logger.info("AsyncCluster connected successfully.")
return cluster
async def get_async_cluster(self) -> AsyncCluster:
"""Gets or creates the cached AsyncCluster instance."""
if self._async_cluster is None:
logger.debug("AsyncCluster instance not cached, creating new one.")
self._async_cluster = await self._create_async_cluster_instance()
return self._async_cluster
async def get_async_bucket(self) -> AsyncBucket:
"""Gets or creates the cached AsyncBucket instance."""
if self._async_bucket is None:
logger.debug("AsyncBucket instance not cached, creating new one.")
cluster = await self.get_async_cluster()
self._async_bucket = cluster.bucket(self.bucket_name)
return self._async_bucket
async def get_async_scope(self) -> AsyncScope:
"""Gets or creates the cached AsyncScope instance."""
if self._async_scope is None:
logger.debug("AsyncScope instance not cached, creating new one.")
bucket = await self.get_async_bucket()
self._async_scope = bucket.scope(self.scope_name)
return self._async_scope
async def get_async_collection(self) -> AsyncCollection:
"""Gets or creates the cached AsyncCollection instance."""
if self._async_collection is None:
logger.debug("AsyncCollection instance not cached, creating new one.")
scope = await self.get_async_scope()
self._async_collection = scope.collection(self.collection_name)
return self._async_collection
async def async_create(self) -> None:
# FTS index creation is not supported in acouchbase as of now, so fallback to sync for index creation
# This is a limitation of the SDK. You may want to document this.
await self._async_create_collection_and_scope()
await self._async_create_fts_index()
async def _async_create_collection_and_scope(self):
"""
Get or create the scope and collection within the bucket.
Uses EAFP principle: attempts to create scope/collection and handles
specific exceptions if they already exist or (for collections with overwrite=True)
if they are not found for dropping.
Raises:
Exception: If scope or collection creation/manipulation fails unexpectedly.
"""
# 1. Ensure Scope Exists
async_bucket_instance = await self.get_async_bucket()
try:
await async_bucket_instance.collections().create_scope(self.scope_name)
logger.info(f"Created new scope '{self.scope_name}'")
except ScopeAlreadyExistsException:
logger.info(f"Scope '{self.scope_name}' already exists. Using existing scope.")
except Exception as e:
logger.error(f"Failed to create or ensure scope '{self.scope_name}' exists: {e}")
raise
collection_manager = async_bucket_instance.collections()
# 2. Handle Collection
if self.overwrite:
# Attempt to drop the collection first since overwrite is True
try:
logger.info(
f"Overwrite is True. Attempting to drop collection '{self.collection_name}' in scope '{self.scope_name}'."
)
await collection_manager.drop_collection(
collection_name=self.collection_name, scope_name=self.scope_name
)
logger.info(f"Successfully dropped collection '{self.collection_name}'.")
time.sleep(1) # Brief wait after drop, as in original code
except CollectionNotFoundException:
logger.info(
f"Collection '{self.collection_name}' not found in scope '{self.scope_name}'. No need to drop."
)
except Exception as e:
logger.error(f"Error dropping collection '{self.collection_name}' during overwrite: {e}")
raise
# Proceed to create the collection
try:
logger.info(f"Creating collection '{self.collection_name}' in scope '{self.scope_name}'.")
await collection_manager.create_collection(
scope_name=self.scope_name, collection_name=self.collection_name
)
logger.info(
f"Successfully created collection '{self.collection_name}' after drop attempt (overwrite=True)."
)
except CollectionAlreadyExistsException:
# This is an unexpected state if overwrite=True and drop was supposed to clear the way.
logger.error(
f"Failed to create collection '{self.collection_name}' as it already exists, "
f"even after drop attempt for overwrite. Overwrite operation may not have completed as intended."
)
raise # Re-raise as the overwrite intent failed
except Exception as e:
logger.error(
f"Error creating collection '{self.collection_name}' after drop attempt (overwrite=True): {e}"
)
raise
else: # self.overwrite is False
try:
logger.info(
f"Overwrite is False. Attempting to create collection '{self.collection_name}' in scope '{self.scope_name}'."
)
await collection_manager.create_collection(
scope_name=self.scope_name, collection_name=self.collection_name
)
logger.info(f"Successfully created new collection '{self.collection_name}'.")
except CollectionAlreadyExistsException:
logger.info(
f"Collection '{self.collection_name}' already exists in scope '{self.scope_name}'. Using existing collection."
)
except Exception as e:
logger.error(f"Error creating collection '{self.collection_name}': {e}")
raise
async def _get_async_search_indexes_mng(self) -> Union[AsyncSearchIndexManager, AsyncScopeSearchIndexManager]:
"""Get the async search indexes manager."""
if self.is_global_level_index:
cluster = await self.get_async_cluster()
return cluster.search_indexes()
else:
scope = await self.get_async_scope()
return scope.search_indexes()
async def _async_create_fts_index(self):
"""Create a FTS index on the collection if it doesn't exist."""
async_search_mng = await self._get_async_search_indexes_mng()
try:
# Check if index exists and handle string index name
await async_search_mng.get_index(self.search_index_name)
if not self.overwrite:
return
except Exception:
if self.search_index_definition is None:
raise ValueError(f"Index '{self.search_index_name}' does not exist")
# Create or update index
try:
if self.overwrite:
try:
logger.info(f"Dropping existing FTS index '{self.search_index_name}'")
await async_search_mng.drop_index(self.search_index_name)
except SearchIndexNotFoundException:
logger.warning(f"Index '{self.search_index_name}' does not exist")
except Exception as e:
logger.warning(f"Error dropping index (may not exist): {e}")
await async_search_mng.upsert_index(self.search_index_definition)
logger.info(f"Created FTS index '{self.search_index_name}'")
if self.wait_until_index_ready:
await self._async_wait_for_index_ready()
except Exception as e:
logger.error(f"Error creating FTS index '{self.search_index_name}': {e}")
raise
async def _async_wait_for_index_ready(self):
"""Wait until the FTS index is ready."""
start_time = time.time()
async_search_mng = await self._get_async_search_indexes_mng()
while True:
try:
count = await async_search_mng.get_indexed_documents_count(self.search_index_name)
if count > -1:
logger.info(f"FTS index '{self.search_index_name}' is ready")
break
# logger.info(f"FTS index '{self.search_index_name}' is not ready yet status: {index['status']}")
except Exception as e:
if time.time() - start_time > self.wait_until_index_ready:
logger.error(f"Error checking index status: {e}")
raise TimeoutError("Timeout waiting for FTS index to become ready")
await asyncio.sleep(1)
async def async_id_exists(self, id: str) -> bool:
try:
async_collection_instance = await self.get_async_collection()
result = await async_collection_instance.exists(id)
if not result.exists:
logger.debug(f"[async] Document does not exist: {id}")
return result.exists
except Exception as e:
logger.error(f"[async] Error checking document existence: {e}")
return False
async def async_name_exists(self, name: str) -> bool:
try:
query = f"SELECT name FROM {self.bucket_name}.{self.scope_name}.{self.collection_name} WHERE name = $name LIMIT 1"
async_scope_instance = await self.get_async_scope()
result = async_scope_instance.query(
query, QueryOptions(named_parameters={"name": name}, scan_consistency=QueryScanConsistency.REQUEST_PLUS)
)
async for row in result.rows():
return True
return False
except Exception as e:
logger.error(f"[async] Error checking document name existence: {e}")
return False
async def async_insert(
self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None
) -> None:
logger.info(f"[async] Inserting {len(documents)} documents")
async_collection_instance = await self.get_async_collection()
all_docs_to_insert: Dict[str, Any] = {}
if self.embedder.enable_batch and hasattr(self.embedder, "async_get_embeddings_batch_and_usage"):
# Use batch embedding when enabled and supported
try:
# Extract content from all documents
doc_contents = [doc.content for doc in documents]
# Get batch embeddings and usage
embeddings, usages = await self.embedder.async_get_embeddings_batch_and_usage(doc_contents)
# Process documents with pre-computed embeddings
for j, doc in enumerate(documents):
try:
if j < len(embeddings):
doc.embedding = embeddings[j]
doc.usage = usages[j] if j < len(usages) else None
except Exception as e:
logger.error(f"Error assigning batch embedding to document '{doc.name}': {e}")
except Exception as e:
# Check if this is a rate limit error - don't fall back as it would make things worse
error_str = str(e).lower()
is_rate_limit = any(
phrase in error_str
for phrase in ["rate limit", "too many requests", "429", "trial key", "api calls / minute"]
)
if is_rate_limit:
logger.error(f"Rate limit detected during batch embedding. {e}")
raise e
else:
logger.warning(f"Async batch embedding failed, falling back to individual embeddings: {e}")
# Fall back to individual embedding
embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
await asyncio.gather(*embed_tasks, return_exceptions=True)
else:
# Use individual embedding
embed_tasks = [document.async_embed(embedder=self.embedder) for document in documents]
await asyncio.gather(*embed_tasks, return_exceptions=True)
for document in documents:
try:
# User edit: self.prepare_doc is no longer awaited with to_thread
doc_data = self.prepare_doc(content_hash, document)
if filters:
doc_data["filters"] = filters
doc_id = doc_data.pop("_id") # Remove _id as it's used as key
all_docs_to_insert[doc_id] = doc_data
except Exception as e:
logger.error(f"[async] Error preparing document '{document.name}': {e}")
if not all_docs_to_insert:
logger.info("[async] No documents prepared for insertion.")
return
doc_ids = list(all_docs_to_insert.keys())
total_inserted_count = 0
total_failed_count = 0
processed_doc_count = len(all_docs_to_insert)
for i in range(0, len(doc_ids), self.batch_limit):
batch_doc_ids = doc_ids[i : i + self.batch_limit]
logger.info(f"[async] Processing batch of {len(batch_doc_ids)} documents for concurrent insertion.")
insert_tasks = []
for doc_id in batch_doc_ids:
doc_content = all_docs_to_insert[doc_id]
insert_tasks.append(async_collection_instance.insert(doc_id, doc_content))
if insert_tasks:
results = await asyncio.gather(*insert_tasks, return_exceptions=True)
for idx, result in enumerate(results):
# Get the original doc_id for logging, corresponding to the task order
current_doc_id = batch_doc_ids[idx]
if isinstance(result, Exception):
total_failed_count += 1
logger.error(f"[async] Error inserting document '{current_doc_id}': {result}")
else:
# Assuming successful insert doesn't return a specific value we need to check further,
# or if it does, the absence of an exception means success.
total_inserted_count += 1
logger.debug(f"[async] Successfully inserted document '{current_doc_id}'.")
logger.info(f"[async] Finished processing {processed_doc_count} documents.")
logger.info(f"[async] Total successfully inserted: {total_inserted_count}, Total failed: {total_failed_count}.")
async def async_upsert(
self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None
) -> None:
"""Upsert documents asynchronously."""
if self.content_hash_exists(content_hash):
self._delete_by_content_hash(content_hash)
await self._async_upsert(content_hash=content_hash, documents=documents, filters=filters)
async def _async_upsert(
self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None
) -> None:
logger.info(f"[async] Upserting {len(documents)} documents")
async_collection_instance = await self.get_async_collection()
all_docs_to_upsert: Dict[str, Any] = {}
if self.embedder.enable_batch and hasattr(self.embedder, "async_get_embeddings_batch_and_usage"):
# Use batch embedding when enabled and supported
try:
# Extract content from all documents
doc_contents = [doc.content for doc in documents]
# Get batch embeddings and usage
embeddings, usages = await self.embedder.async_get_embeddings_batch_and_usage(doc_contents)
# Process documents with pre-computed embeddings
for j, doc in enumerate(documents):
try:
if j < len(embeddings):
doc.embedding = embeddings[j]
doc.usage = usages[j] if j < len(usages) else None
except Exception as e:
logger.error(f"Error assigning batch embedding to document '{doc.name}': {e}")
except Exception as e:
# Check if this is a rate limit error - don't fall back as it would make things worse
error_str = str(e).lower()
is_rate_limit = any(
phrase in error_str
for phrase in ["rate limit", "too many requests", "429", "trial key", "api calls / minute"]
)
if is_rate_limit:
logger.error(f"Rate limit detected during batch embedding. {e}")
raise e
else:
logger.warning(f"Async batch embedding failed, falling back to individual embeddings: {e}")
# Fall back to individual embedding
embed_tasks = [doc.async_embed(embedder=self.embedder) for doc in documents]
await asyncio.gather(*embed_tasks, return_exceptions=True)
else:
# Use individual embedding
embed_tasks = [document.async_embed(embedder=self.embedder) for document in documents]
await asyncio.gather(*embed_tasks, return_exceptions=True)
for document in documents:
try:
# Consistent with async_insert, prepare_doc is not awaited with to_thread based on prior user edits
doc_data = self.prepare_doc(content_hash, document)
if filters:
doc_data["filters"] = filters
doc_id = doc_data.pop("_id") # _id is used as key for upsert
all_docs_to_upsert[doc_id] = doc_data
except Exception as e:
logger.error(f"[async] Error preparing document '{document.name}' for upsert: {e}")
if not all_docs_to_upsert:
logger.info("[async] No documents prepared for upsert.")
return
doc_ids = list(all_docs_to_upsert.keys())
total_upserted_count = 0
total_failed_count = 0
processed_doc_count = len(all_docs_to_upsert)
logger.info(f"[async] Prepared {processed_doc_count} documents for upsert.")
for i in range(0, len(doc_ids), self.batch_limit):
batch_doc_ids = doc_ids[i : i + self.batch_limit]
logger.info(f"[async] Processing batch of {len(batch_doc_ids)} documents for concurrent upsert.")
upsert_tasks = []
for doc_id in batch_doc_ids:
doc_content = all_docs_to_upsert[doc_id]
upsert_tasks.append(async_collection_instance.upsert(doc_id, doc_content))
if upsert_tasks:
results = await asyncio.gather(*upsert_tasks, return_exceptions=True)
for idx, result in enumerate(results):
current_doc_id = batch_doc_ids[idx]
if isinstance(result, Exception):
total_failed_count += 1
logger.error(f"[async] Error upserting document '{current_doc_id}': {result}")
else:
# Assuming successful upsert doesn't return a specific value we need to check further,
# or if it does, the absence of an exception means success.
total_upserted_count += 1
logger.debug(f"[async] Successfully upserted document '{current_doc_id}'.")
logger.info(f"[async] Finished processing {processed_doc_count} documents for upsert.")
logger.info(f"[async] Total successfully upserted: {total_upserted_count}, Total failed: {total_failed_count}.")
async def async_search(
self, query: str, limit: int = 5, filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> List[Document]:
if isinstance(filters, List):
log_warning("Filter Expressions are not yet supported in Couchbase. No filters will be applied.")
filters = None
query_embedding = self.embedder.get_embedding(query)
if query_embedding is None:
logger.error(f"[async] Failed to generate embedding for query: {query}")
return []
try:
# Implement vector search using Couchbase FTS
vector_search = VectorSearch.from_vector_query(
VectorQuery(field_name="embedding", vector=query_embedding, num_candidates=limit)
)
request = SearchRequest.create(vector_search)
# Prepare the options dictionary
options_dict = {"limit": limit, "fields": ["*"]}
if filters:
options_dict["raw"] = filters
search_args = {
"index": self.search_index_name,
"request": request,
"options": SearchOptions(**options_dict), # Construct SearchOptions with the dictionary
}
if self.is_global_level_index:
async_cluster_instance = await self.get_async_cluster()
results = async_cluster_instance.search(**search_args)
else:
async_scope_instance = await self.get_async_scope()
results = async_scope_instance.search(**search_args)
return await self.__async_get_doc_from_kv(results)
except Exception as e:
logger.error(f"[async] Error during search: {e}")
raise
async def async_drop(self) -> None:
if await self.async_exists():
try:
async_bucket_instance = await self.get_async_bucket()
await async_bucket_instance.collections().drop_collection(
collection_name=self.collection_name, scope_name=self.scope_name
)
logger.info(f"[async] Collection '{self.collection_name}' dropped successfully.")
except Exception as e:
logger.error(f"[async] Error dropping collection '{self.collection_name}': {e}")
raise
async def async_exists(self) -> bool:
try:
async_bucket_instance = await self.get_async_bucket()
scopes = await async_bucket_instance.collections().get_all_scopes()
for scope in scopes:
if scope.name == self.scope_name:
for collection in scope.collections:
if collection.name == self.collection_name:
return True
return False
except Exception:
return False
async def __async_get_doc_from_kv(self, response: AsyncSearchIndex) -> List[Document]:
"""
Convert search results to Document objects by fetching full documents from KV store concurrently.
Args:
response: SearchResult from Couchbase search query
Returns:
List of Document objects
"""
documents: List[Document] = []
# Assuming search_hits map directly to the order of documents we want to fetch and reconstruct
search_hits_map = {doc.id: doc.score async for doc in response.rows()}
doc_ids_to_fetch = list(search_hits_map.keys())
if not doc_ids_to_fetch:
return documents
async_collection_instance = await self.get_async_collection()
# Process in batches
for i in range(0, len(doc_ids_to_fetch), self.batch_limit):
batch_doc_ids = doc_ids_to_fetch[i : i + self.batch_limit]
if not batch_doc_ids:
continue
logger.debug(f"[async] Fetching batch of {len(batch_doc_ids)} documents from KV.")
get_tasks = [async_collection_instance.get(doc_id) for doc_id in batch_doc_ids]
# Fetch documents from KV store concurrently for the current batch
results_from_kv_batch = await asyncio.gather(*get_tasks, return_exceptions=True)
for batch_idx, get_result in enumerate(results_from_kv_batch):
# Original doc_id corresponding to this result within the batch
doc_id = batch_doc_ids[batch_idx]
# score = search_hits_map[doc_id] # Retrieve the original score
if isinstance(get_result, BaseException) or isinstance(get_result, Exception) or get_result is None:
logger.warning(f"[async] Document {doc_id} not found or error fetching from KV store: {get_result}")
continue
try:
value = get_result.content_as[dict]
if not isinstance(value, dict):
logger.warning(
f"[async] Document {doc_id} content from KV is not a dict: {type(value)}. Skipping."
)
continue
documents.append(
Document(
id=doc_id,
name=value.get("name"),
content=value.get("content", ""),
meta_data=value.get("meta_data", {}),
embedding=value.get("embedding", []),
)
)
except Exception as e:
logger.warning(
f"[async] Error processing document {doc_id} from KV store: {e}. Value: {getattr(get_result, 'content_as', 'N/A')}"
)
continue
return documents
def delete_by_id(self, id: str) -> bool:
"""
Delete a document by its ID.
Args:
id (str): The document ID to delete
Returns:
bool: True if document was deleted, False otherwise
"""
try:
log_debug(f"Couchbase VectorDB : Deleting document with ID {id}")
if not self.id_exists(id):
return False
# Delete by ID using Couchbase collection.delete()
self.collection.remove(id)
log_info(f"Successfully deleted document with ID {id}")
return True
except Exception as e:
log_info(f"Error deleting document with ID {id}: {e}")
return False
def delete_by_name(self, name: str) -> bool:
"""
Delete documents by name.
Args:
name (str): The document name to delete
Returns:
bool: True if documents were deleted, False otherwise
"""
try:
log_debug(f"Couchbase VectorDB : Deleting documents with name {name}")
query = f"SELECT META().id as doc_id, * FROM {self.bucket_name}.{self.scope_name}.{self.collection_name} WHERE name = $name"
result = self.scope.query(
query, QueryOptions(named_parameters={"name": name}, scan_consistency=QueryScanConsistency.REQUEST_PLUS)
)
rows = list(result.rows()) # Collect once
for row in rows:
self.collection.remove(row.get("doc_id"))
log_info(f"Deleted {len(rows)} documents with name {name}")
return True
except Exception as e:
log_info(f"Error deleting documents with name {name}: {e}")
return False
def delete_by_metadata(self, metadata: Dict[str, Any]) -> bool:
"""
Delete documents by metadata.
Args:
metadata (Dict[str, Any]): The metadata to match for deletion
Returns:
bool: True if documents were deleted, False otherwise
"""
try:
log_debug(f"Couchbase VectorDB : Deleting documents with metadata {metadata}")
if not metadata:
log_info("No metadata provided for deletion")
return False
# Build WHERE clause for metadata matching
where_conditions = []
named_parameters: Dict[str, Any] = {}
for key, value in metadata.items():
if isinstance(value, (list, tuple)):
# For array values, use ARRAY_CONTAINS
where_conditions.append(
f"(ARRAY_CONTAINS(filters.{key}, $value_{key}) OR ARRAY_CONTAINS(recipes.filters.{key}, $value_{key}))"
)
named_parameters[f"value_{key}"] = value
elif isinstance(value, str):
where_conditions.append(f"(filters.{key} = $value_{key} OR recipes.filters.{key} = $value_{key})")
named_parameters[f"value_{key}"] = value
elif isinstance(value, bool):
where_conditions.append(f"(filters.{key} = $value_{key} OR recipes.filters.{key} = $value_{key})")
named_parameters[f"value_{key}"] = value
elif isinstance(value, (int, float)):
where_conditions.append(f"(filters.{key} = $value_{key} OR recipes.filters.{key} = $value_{key})")
named_parameters[f"value_{key}"] = value
elif value is None:
where_conditions.append(f"(filters.{key} IS NULL OR recipes.filters.{key} IS NULL)")
else:
# For other types, convert to string
where_conditions.append(f"(filters.{key} = $value_{key} OR recipes.filters.{key} = $value_{key})")
named_parameters[f"value_{key}"] = str(value)
if not where_conditions:
log_info("No valid metadata conditions for deletion")
return False
where_clause = " AND ".join(where_conditions)
query = f"SELECT META().id as doc_id, * FROM {self.bucket_name}.{self.scope_name}.{self.collection_name} WHERE {where_clause}"
result = self.scope.query(
query,
QueryOptions(named_parameters=named_parameters, scan_consistency=QueryScanConsistency.REQUEST_PLUS),
)
rows = list(result.rows()) # Collect once
for row in rows:
self.collection.remove(row.get("doc_id"))
log_info(f"Deleted {len(rows)} documents with metadata {metadata}")
return True
except Exception as e:
log_info(f"Error deleting documents with metadata {metadata}: {e}")
return False
def delete_by_content_id(self, content_id: str) -> bool:
"""
Delete documents by content ID.
Args:
content_id (str): The content ID to delete
Returns:
bool: True if documents were deleted, False otherwise
"""
try:
log_debug(f"Couchbase VectorDB : Deleting documents with content_id {content_id}")
query = f"SELECT META().id as doc_id, * FROM {self.bucket_name}.{self.scope_name}.{self.collection_name} WHERE content_id = $content_id OR recipes.content_id = $content_id"
result = self.scope.query(
query,
QueryOptions(
named_parameters={"content_id": content_id}, scan_consistency=QueryScanConsistency.REQUEST_PLUS
),
)
rows = list(result.rows()) # Collect once
for row in rows:
self.collection.remove(row.get("doc_id"))
log_info(f"Deleted {len(rows)} documents with content_id {content_id}")
return True
except Exception as e:
log_info(f"Error deleting documents with content_id {content_id}: {e}")
return False
def _delete_by_content_hash(self, content_hash: str) -> bool:
"""
Delete documents by content hash.
Args:
content_hash (str): The content hash to delete
Returns:
bool: True if documents were deleted, False otherwise
"""
try:
log_debug(f"Couchbase VectorDB : Deleting documents with content_hash {content_hash}")
query = f"SELECT META().id as doc_id, * FROM {self.bucket_name}.{self.scope_name}.{self.collection_name} WHERE content_hash = $content_hash"
result = self.scope.query(
query,
QueryOptions(
named_parameters={"content_hash": content_hash}, scan_consistency=QueryScanConsistency.REQUEST_PLUS
),
)
rows = list(result.rows()) # Collect once
for row in rows:
self.collection.remove(row.get("doc_id"))
log_info(f"Deleted {len(rows)} documents with content_hash {content_hash}")
return True
except Exception as e:
log_info(f"Error deleting documents with content_hash {content_hash}: {e}")
return False
def update_metadata(self, content_id: str, metadata: Dict[str, Any]) -> None:
"""
Update the metadata for documents with the given content_id.
Args:
content_id (str): The content ID to update
metadata (Dict[str, Any]): The metadata to update
"""
try:
# Query for documents with the given content_id
query = f"SELECT META().id as doc_id, meta_data, filters FROM `{self.bucket_name}` WHERE content_id = $content_id"
result = self.cluster.query(query, content_id=content_id)
updated_count = 0
for row in result:
doc_id = row.get("doc_id")
current_metadata = row.get("meta_data", {})
current_filters = row.get("filters", {})
# Merge existing metadata with new metadata
if isinstance(current_metadata, dict):
updated_metadata = current_metadata.copy()
updated_metadata.update(metadata)
else:
updated_metadata = metadata
# Merge existing filters with new metadata
if isinstance(current_filters, dict):
updated_filters = current_filters.copy()
updated_filters.update(metadata)
else:
updated_filters = metadata
# Update the document
try:
doc = self.collection.get(doc_id)
doc_content = doc.content_as[dict]
doc_content["meta_data"] = updated_metadata
doc_content["filters"] = updated_filters
self.collection.upsert(doc_id, doc_content)
updated_count += 1
except Exception as doc_error:
logger.warning(f"Failed to update document {doc_id}: {doc_error}")
if updated_count == 0:
logger.debug(f"No documents found with content_id: {content_id}")
else:
logger.debug(f"Updated metadata for {updated_count} documents with content_id: {content_id}")
except Exception as e:
logger.error(f"Error updating metadata for content_id '{content_id}': {e}")
raise
def get_supported_search_types(self) -> List[str]:
"""Get the supported search types for this vector database."""
return [] # CouchbaseSearch doesn't use SearchType enum
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/vectordb/couchbase/couchbase.py",
"license": "Apache License 2.0",
"lines": 1244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/tools/whatsapp.py | from os import getenv
from typing import Any, Dict, List, Optional
import httpx
from agno.tools import Toolkit
from agno.utils.log import logger
class WhatsAppTools(Toolkit):
"""WhatsApp Business API toolkit for sending messages."""
base_url = "https://graph.facebook.com"
def __init__(
self,
access_token: Optional[str] = None,
phone_number_id: Optional[str] = None,
version: Optional[str] = None,
recipient_waid: Optional[str] = None,
async_mode: bool = False,
):
"""Initialize WhatsApp toolkit.
Args:
access_token: WhatsApp Business API access token
phone_number_id: WhatsApp Business Account phone number ID
version: API version to use
recipient_waid: Default recipient WhatsApp ID (optional)
async_mode: Whether to use async methods (default: False)
"""
# Core credentials
self.access_token = access_token or getenv("WHATSAPP_ACCESS_TOKEN")
if not self.access_token:
logger.error("WHATSAPP_ACCESS_TOKEN not set. Please set the WHATSAPP_ACCESS_TOKEN environment variable.")
self.phone_number_id = phone_number_id or getenv("WHATSAPP_PHONE_NUMBER_ID")
if not self.phone_number_id:
logger.error(
"WHATSAPP_PHONE_NUMBER_ID not set. Please set the WHATSAPP_PHONE_NUMBER_ID environment variable."
)
# Optional default recipient
self.default_recipient = recipient_waid or getenv("WHATSAPP_RECIPIENT_WAID")
# API version and mode
self.version = version or getenv("WHATSAPP_VERSION", "v22.0")
self.async_mode = async_mode
tools: List[Any] = []
if self.async_mode:
tools.append(self.send_text_message_async)
tools.append(self.send_template_message_async)
else:
tools.append(self.send_text_message_sync)
tools.append(self.send_template_message_sync)
super().__init__(name="whatsapp", tools=tools)
def _get_headers(self) -> Dict[str, str]:
"""Get headers for API requests."""
return {"Authorization": f"Bearer {self.access_token}", "Content-Type": "application/json"}
def _get_messages_url(self) -> str:
"""Get the messages endpoint URL."""
return f"{self.base_url}/{self.version}/{self.phone_number_id}/messages"
async def _send_message_async(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""Send a message asynchronously using the WhatsApp API.
Args:
data: Message data to send
Returns:
API response as dictionary
"""
url = self._get_messages_url()
headers = self._get_headers()
logger.debug(f"Sending WhatsApp request to URL: {url}")
async with httpx.AsyncClient() as client:
response = await client.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
def _send_message_sync(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""Send a message synchronously using the WhatsApp API.
Args:
data: Message data to send
Returns:
API response as dictionary
"""
url = self._get_messages_url()
headers = self._get_headers()
logger.debug(f"Sending WhatsApp request to URL: {url}")
response = httpx.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
def send_text_message_sync(
self,
text: str = "",
recipient: Optional[str] = None,
preview_url: bool = False,
recipient_type: str = "individual",
) -> str:
"""Send a text message to a WhatsApp user (synchronous version).
Args:
text: The text message to send
recipient: Recipient's WhatsApp ID or phone number (e.g., "+1234567890"). If not provided, uses default_recipient
preview_url: Whether to generate previews for links in the message
Returns:
Success message with message ID
"""
# Use default recipient if none provided
if recipient is None:
if not self.default_recipient:
raise ValueError("No recipient provided and no default recipient set")
recipient = self.default_recipient
logger.debug(f"Using default recipient: {recipient}")
logger.debug(f"Sending WhatsApp message to {recipient}: {text}")
logger.debug(f"Current config - Phone Number ID: {self.phone_number_id}, Version: {self.version}")
data = {
"messaging_product": "whatsapp",
"recipient_type": recipient_type,
"to": recipient,
"type": "text",
"text": {"preview_url": preview_url, "body": text},
}
try:
response = self._send_message_sync(data)
message_id = response.get("messages", [{}])[0].get("id", "unknown")
return f"Message sent successfully! Message ID: {message_id}"
except httpx.HTTPStatusError as e:
logger.error(f"Failed to send WhatsApp message: {e}")
logger.error(f"Error response: {e.response.text if hasattr(e, 'response') else 'No response text'}")
raise
except Exception as e:
logger.error(f"Unexpected error sending WhatsApp message: {str(e)}")
raise
def send_template_message_sync(
self,
recipient: Optional[str] = None,
template_name: str = "",
language_code: str = "en_US",
components: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""Send a template message to a WhatsApp user (synchronous version).
Args:
recipient: Recipient's WhatsApp ID or phone number (e.g., "+1234567890"). If not provided, uses default_recipient
template_name: Name of the template to use
language_code: Language code for the template (e.g., "en_US")
components: Optional list of template components (header, body, buttons)
Returns:
Success message with message ID
"""
# Use default recipient if none provided
if recipient is None:
if not self.default_recipient:
raise ValueError("No recipient provided and no default recipient set")
recipient = self.default_recipient
logger.debug(f"Sending WhatsApp template message to {recipient}: {template_name}")
data = {
"messaging_product": "whatsapp",
"to": recipient,
"type": "template",
"template": {"name": template_name, "language": {"code": language_code}},
}
if components:
data["template"]["components"] = components # type: ignore[index]
try:
response = self._send_message_sync(data)
message_id = response.get("messages", [{}])[0].get("id", "unknown")
return f"Template message sent successfully! Message ID: {message_id}"
except httpx.HTTPStatusError as e:
logger.error(f"Failed to send WhatsApp template message: {e}")
raise
async def send_text_message_async(
self,
text: str = "",
recipient: Optional[str] = None,
preview_url: bool = False,
recipient_type: str = "individual",
) -> str:
"""Send a text message to a WhatsApp user (asynchronous version).
Args:
text: The text message to send
recipient: Recipient's WhatsApp ID or phone number (e.g., "+1234567890"). If not provided, uses default_recipient
preview_url: Whether to generate previews for links in the message
Returns:
Success message with message ID
"""
# Use default recipient if none provided
if recipient is None:
if not self.default_recipient:
raise ValueError("No recipient provided and no default recipient set")
recipient = self.default_recipient
logger.debug(f"Using default recipient: {recipient}")
logger.debug(f"Sending WhatsApp message to {recipient}: {text}")
logger.debug(f"Current config - Phone Number ID: {self.phone_number_id}, Version: {self.version}")
data = {
"messaging_product": "whatsapp",
"recipient_type": recipient_type,
"to": recipient,
"type": "text",
"text": {"preview_url": preview_url, "body": text},
}
try:
response = await self._send_message_async(data)
message_id = response.get("messages", [{}])[0].get("id", "unknown")
return f"Message sent successfully! Message ID: {message_id}"
except httpx.HTTPStatusError as e:
logger.error(f"Failed to send WhatsApp message: {e}")
logger.error(f"Error response: {e.response.text if hasattr(e, 'response') else 'No response text'}")
raise
except Exception as e:
logger.error(f"Unexpected error sending WhatsApp message: {str(e)}")
raise
async def send_template_message_async(
self,
recipient: Optional[str] = None,
template_name: str = "",
language_code: str = "en_US",
components: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""Send a template message to a WhatsApp user (asynchronous version).
Args:
recipient: Recipient's WhatsApp ID or phone number (e.g., "+1234567890"). If not provided, uses default_recipient
template_name: Name of the template to use
language_code: Language code for the template (e.g., "en_US")
components: Optional list of template components (header, body, buttons)
Returns:
Success message with message ID
"""
# Use default recipient if none provided
if recipient is None:
if not self.default_recipient:
raise ValueError("No recipient provided and no default recipient set")
recipient = self.default_recipient
logger.debug(f"Sending WhatsApp template message to {recipient}: {template_name}")
data = {
"messaging_product": "whatsapp",
"to": recipient,
"type": "template",
"template": {"name": template_name, "language": {"code": language_code}},
}
if components:
data["template"]["components"] = components # type: ignore[index]
try:
response = await self._send_message_async(data)
message_id = response.get("messages", [{}])[0].get("id", "unknown")
return f"Template message sent successfully! Message ID: {message_id}"
except httpx.HTTPStatusError as e:
logger.error(f"Failed to send WhatsApp template message: {e}")
raise
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/whatsapp.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/whatsapp.py | import os
from typing import Optional, Union
import httpx
import requests
from agno.utils.log import log_debug, log_error
def get_access_token() -> str:
access_token = os.getenv("WHATSAPP_ACCESS_TOKEN")
if not access_token:
raise ValueError("WHATSAPP_ACCESS_TOKEN is not set")
return access_token
def get_phone_number_id() -> str:
phone_number_id = os.getenv("WHATSAPP_PHONE_NUMBER_ID")
if not phone_number_id:
raise ValueError("WHATSAPP_PHONE_NUMBER_ID is not set")
return phone_number_id
def get_media(media_id: str) -> Union[dict, bytes]:
"""
Sends a GET request to the Facebook Graph API to retrieve media information.
Args:
media_id (str): The ID of the media to retrieve.
"""
url = f"https://graph.facebook.com/v22.0/{media_id}"
access_token = get_access_token()
headers = {"Authorization": f"Bearer {access_token}"}
try:
response = requests.get(url, headers=headers)
response.raise_for_status() # Raise an HTTPError for bad responses (4xx and 5xx)
data = response.json()
media_url = data.get("url")
except requests.exceptions.RequestException as e:
return {"error": str(e)}
try:
response = requests.get(media_url, headers=headers)
response.raise_for_status() # Raise an HTTPError for bad responses (4xx and 5xx)
data = response.content
return data
except requests.exceptions.RequestException as e:
return {"error": str(e)}
async def get_media_async(media_id: str) -> Union[dict, bytes]:
"""
Sends a GET request to the Facebook Graph API to retrieve media information.
Args:
media_id (str): The ID of the media to retrieve.
"""
url = f"https://graph.facebook.com/v22.0/{media_id}"
access_token = get_access_token()
headers = {"Authorization": f"Bearer {access_token}"}
try:
async with httpx.AsyncClient() as client:
response = await client.get(url, headers=headers)
response.raise_for_status() # Raise an HTTPError for bad responses (4xx and 5xx)
data = response.json()
media_url = data.get("url")
except httpx.HTTPStatusError as e:
return {"error": str(e)}
try:
async with httpx.AsyncClient() as client:
response = await client.get(media_url, headers=headers)
response.raise_for_status() # Raise an HTTPError for bad responses (4xx and 5xx)
data = response.content
return data
except httpx.HTTPStatusError as e:
return {"error": str(e)}
def upload_media(media_data: bytes, mime_type: str, filename: str = "file"):
"""
Sends a POST request to the Facebook Graph API to upload media for WhatsApp.
Args:
media_data: Bytes buffer containing the file data
mime_type (str): The MIME type of the file
filename (str): The name to use for the file in the upload. Defaults to "file"
"""
phone_number_id = get_phone_number_id()
url = f"https://graph.facebook.com/v22.0/{phone_number_id}/media"
access_token = get_access_token()
headers = {"Authorization": f"Bearer {access_token}"}
data = {"messaging_product": "whatsapp", "type": mime_type}
try:
from io import BytesIO
file_data = BytesIO(media_data)
files = {"file": (filename, file_data, mime_type)}
response = requests.post(url, headers=headers, data=data, files=files)
response.raise_for_status() # Raise an error for bad responses
json_resp = response.json()
media_id = json_resp.get("id")
if not media_id:
return {"error": "Media ID not found in response", "response": json_resp}
return media_id
except requests.exceptions.RequestException as e:
return {"error": str(e)}
except Exception as e:
return {"error": str(e)}
async def upload_media_async(media_data: bytes, mime_type: str, filename: str = "file"):
"""
Sends a POST request to the Facebook Graph API to upload media for WhatsApp.
Args:
media_data: Bytes buffer containing the file data
mime_type (str): The MIME type of the file
filename (str): The name to use for the file in the upload. Defaults to "file"
"""
phone_number_id = get_phone_number_id()
url = f"https://graph.facebook.com/v22.0/{phone_number_id}/media"
access_token = get_access_token()
headers = {"Authorization": f"Bearer {access_token}"}
data = {"messaging_product": "whatsapp", "type": mime_type}
try:
from io import BytesIO
file_data = BytesIO(media_data)
files = {"file": (filename, file_data, mime_type)}
async with httpx.AsyncClient() as client:
response = await client.post(url, headers=headers, data=data, files=files)
response.raise_for_status() # Raise an error for bad responses
json_resp = response.json()
media_id = json_resp.get("id")
if not media_id:
return {"error": "Media ID not found in response", "response": json_resp}
return media_id
except httpx.HTTPStatusError as e:
return {"error": str(e)}
except Exception as e:
return {"error": str(e)}
async def send_image_message_async(
media_id: str,
recipient: str,
text: Optional[str] = None,
):
"""Send an image message to a WhatsApp user (asynchronous version).
Args:
media_id: The media id for the image to send
recipient: Recipient's WhatsApp ID or phone number (e.g., "+1234567890").
text: Caption for the image
Returns:
Success message with message ID
"""
log_debug(f"Sending WhatsApp image to {recipient}: {text}")
phone_number_id = get_phone_number_id()
url = f"https://graph.facebook.com/v22.0/{phone_number_id}/messages"
access_token = get_access_token()
headers = {"Authorization": f"Bearer {access_token}"}
data = {
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": recipient,
"type": "image",
"image": {"id": media_id, "caption": text},
}
try:
async with httpx.AsyncClient() as client:
import json
log_debug(f"Request data: {json.dumps(data, indent=2)}")
response = await client.post(url, headers=headers, json=data)
response.raise_for_status()
log_debug(f"Response: {response.text}")
except httpx.HTTPStatusError as e:
log_error(f"Failed to send WhatsApp image message: {e}")
log_error(f"Error response: {e.response.text if hasattr(e, 'response') else 'No response text'}")
raise
except Exception as e:
log_error(f"Unexpected error sending WhatsApp image message: {str(e)}")
raise
def send_image_message(
media_id: str,
recipient: str,
text: Optional[str] = None,
):
"""Send an image message to a WhatsApp user (synchronous version).
Args:
image: The media id for the image to send
recipient: Recipient's WhatsApp ID or phone number (e.g., "+1234567890").
text: Caption for the image
Returns:
Success message with message ID
"""
log_debug(f"Sending WhatsApp image to {recipient}: {text}")
phone_number_id = get_phone_number_id()
url = f"https://graph.facebook.com/v22.0/{phone_number_id}/messages"
access_token = get_access_token()
headers = {"Authorization": f"Bearer {access_token}"}
data = {
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": recipient,
"type": "image",
"image": {"id": media_id, "caption": text},
}
try:
import json
log_debug(f"Request data: {json.dumps(data, indent=2)}")
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
log_debug(f"Response: {response.text}")
except requests.exceptions.RequestException as e:
log_error(f"Failed to send WhatsApp image message: {e}")
log_error(f"Error response: {e.response.text if hasattr(e, 'response') else 'No response text'}") # type: ignore
raise
except Exception as e:
log_error(f"Unexpected error sending WhatsApp image message: {str(e)}")
raise
def typing_indicator(message_id: Optional[str] = None):
if not message_id:
return
phone_number_id = get_phone_number_id()
url = f"https://graph.facebook.com/v22.0/{phone_number_id}/messages"
access_token = get_access_token()
headers = {"Authorization": f"Bearer {access_token}"}
data = {
"messaging_product": "whatsapp",
"status": "read",
"message_id": f"{message_id}",
"typing_indicator": {"type": "text"},
}
try:
response = requests.post(url, headers=headers, data=data)
response.raise_for_status() # Raise an HTTPError for bad responses (4xx and 5xx)
except requests.exceptions.RequestException as e:
return {"error": str(e)}
async def typing_indicator_async(message_id: Optional[str] = None):
if not message_id:
return
phone_number_id = get_phone_number_id()
url = f"https://graph.facebook.com/v22.0/{phone_number_id}/messages"
access_token = get_access_token()
headers = {"Authorization": f"Bearer {access_token}"}
data = {
"messaging_product": "whatsapp",
"status": "read",
"message_id": f"{message_id}",
"typing_indicator": {"type": "text"},
}
try:
async with httpx.AsyncClient() as client:
response = await client.post(url, headers=headers, data=data)
response.raise_for_status() # Raise an HTTPError for bad responses (4xx and 5xx)
except httpx.HTTPStatusError as e:
return {"error": str(e)}
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/whatsapp.py",
"license": "Apache License 2.0",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/models/nebius/nebius.py | from dataclasses import dataclass, field
from os import getenv
from typing import Any, Dict, Optional
from agno.exceptions import ModelAuthenticationError
from agno.models.openai.like import OpenAILike
@dataclass
class Nebius(OpenAILike):
"""
A class for interacting with Nebius Token Factory models.
Attributes:
id (str): The model id. Defaults to "Qwen/Qwen3-235B-A22B"".
name (str): The model name. Defaults to "Nebius".
provider (str): The provider name. Defaults to "Nebius".
api_key (Optional[str]): The API key.
base_url (str): The base URL. Defaults to "https://api.tokenfactory.nebius.com/v1".
"""
id: str = "openai/gpt-oss-20b" # Default model for chat
name: str = "Nebius"
provider: str = "Nebius"
api_key: Optional[str] = field(default_factory=lambda: getenv("NEBIUS_API_KEY"))
base_url: str = "https://api.tokenfactory.nebius.com/v1/"
def _get_client_params(self) -> Dict[str, Any]:
if not self.api_key:
raise ModelAuthenticationError(
message="NEBIUS_API_KEY not set. Please set the NEBIUS_API_KEY environment variable.",
model_name=self.name,
)
# Define base client params
base_params = {
"api_key": self.api_key,
"organization": self.organization,
"base_url": self.base_url,
"timeout": self.timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add additional client params if provided
if self.client_params:
client_params.update(self.client_params)
return client_params
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/nebius/nebius.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/tools/models/nebius.py | import base64
from os import getenv
from typing import Optional
from uuid import uuid4
from agno.agent import Agent
from agno.media import Image
from agno.models.nebius import Nebius
from agno.tools import Toolkit
from agno.tools.function import ToolResult
from agno.utils.log import log_error, log_warning
class NebiusTools(Toolkit):
"""Tools for interacting with Nebius Token Factory's text-to-image API"""
def __init__(
self,
api_key: Optional[str] = None,
base_url: str = "https://api.tokenfactory.nebius.com/v1",
image_model: str = "black-forest-labs/flux-schnell",
image_quality: Optional[str] = "standard",
image_size: Optional[str] = "1024x1024",
image_style: Optional[str] = None,
enable_generate_image: bool = True,
all: bool = False,
**kwargs,
):
"""Initialize Nebius Token Factory text-to-image tools.
Args:
api_key: Nebius API key. If not provided, will look for NEBIUS_API_KEY environment variable.
base_url: The base URL for the Nebius Token Factory API. This should be configured according to Nebius's documentation.
image_model: The model to use for generation. Options include:
- "black-forest-labs/flux-schnell" (fastest)
- "black-forest-labs/flux-dev" (balanced)
- "stability-ai/sdxl" (highest quality)
image_quality: Image quality. Options: "standard", "hd".
image_size: Image size in format "WIDTHxHEIGHT". Max supported: 2000x2000.
image_style: Optional style preset to apply.
enable_generate_image: Enable image generation functionality.
all: Enable all functions.
**kwargs: Additional arguments to pass to Toolkit.
"""
tools = []
if all or enable_generate_image:
tools.append(self.generate_image)
super().__init__(name="nebius_tools", tools=tools, **kwargs)
self.api_key = api_key or getenv("NEBIUS_API_KEY")
if not self.api_key:
raise ValueError("NEBIUS_API_KEY not set. Please set the NEBIUS_API_KEY environment variable.")
self.base_url = base_url
self.image_model = image_model
self.image_quality = image_quality
self.image_size = image_size
self.image_style = image_style
self._nebius_client: Optional[Nebius] = None
def _get_client(self):
if self._nebius_client is None:
self._nebius_client = Nebius(api_key=self.api_key, base_url=self.base_url, id=self.image_model).get_client() # type: ignore
return self._nebius_client
def generate_image(
self,
agent: Agent,
prompt: str,
) -> ToolResult:
"""Generate images based on a text prompt using Nebius Token Factory.
Args:
agent: The agent instance for adding images
prompt: The text prompt to generate images from.
Returns:
ToolResult: A ToolResult containing the generated image or error message.
"""
try:
extra_params = {
"size": self.image_size,
"quality": self.image_quality,
"style": self.image_style,
}
extra_params = {k: v for k, v in extra_params.items() if v is not None}
client = self._get_client()
response = client.images.generate(
model=self.image_model,
prompt=prompt,
response_format="b64_json",
**extra_params,
)
data = None
if hasattr(response, "data") and response.data:
data = response.data[0]
if data is None:
log_warning("Nebius API did not return any data.")
return ToolResult(content="Failed to generate image: No data received from API.")
if hasattr(data, "b64_json") and data.b64_json:
image_base64 = data.b64_json
image_content_bytes = base64.b64decode(image_base64)
media_id = str(uuid4())
# Create ImageArtifact with raw bytes
image_artifact = Image(
id=media_id, content=image_content_bytes, mime_type="image/png", original_prompt=prompt
)
return ToolResult(
content="Image generated successfully.",
images=[image_artifact],
)
return ToolResult(content="Failed to generate image: No content received from API.")
except Exception as e:
log_error(f"Failed to generate image using {self.image_model}: {e}")
return ToolResult(content=f"Failed to generate image: {e}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/models/nebius.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/models/nebius/test_basic.py | import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent, RunOutput # noqa
from agno.db.sqlite import SqliteDb
from agno.models.nebius import Nebius
NEBIUS_MODEL_ID = "Qwen/Qwen3-4B-fast"
def _assert_metrics(response: RunOutput):
assert response.metrics is not None
input_tokens = response.metrics.input_tokens
output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
assert input_tokens > 0
assert output_tokens > 0
assert total_tokens > 0
assert total_tokens == input_tokens + output_tokens
def test_basic():
agent = Agent(model=Nebius(id=NEBIUS_MODEL_ID), markdown=True, telemetry=False)
# Print the response in the terminal
response: RunOutput = agent.run("Share a 2 sentence comedy story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
def test_basic_stream():
agent = Agent(model=Nebius(id=NEBIUS_MODEL_ID), markdown=True, telemetry=False)
for response in agent.run("Share a 2 sentence horror story", stream=True):
assert response.content is not None
@pytest.mark.asyncio
async def test_async_basic():
agent = Agent(model=Nebius(id=NEBIUS_MODEL_ID), markdown=True, telemetry=False)
response = await agent.arun("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
@pytest.mark.asyncio
async def test_async_basic_stream():
agent = Agent(model=Nebius(id=NEBIUS_MODEL_ID), markdown=True, telemetry=False)
async for response in agent.arun("Share a 2 sentence horror story", stream=True):
assert response.content is not None
def test_with_memory():
agent = Agent(
db=SqliteDb(db_file="tmp/test_with_memory.db"),
model=Nebius(id=NEBIUS_MODEL_ID),
add_history_to_context=True,
markdown=True,
telemetry=False,
)
# First interaction
response1 = agent.run("My name is John Smith")
assert response1.content is not None
# Second interaction should remember the name
response2 = agent.run("What's my name?")
assert response2.content is not None
assert "John Smith" in response2.content # type: ignore
# Verify memories were created
messages = agent.get_session_messages()
assert len(messages) == 5
assert [m.role for m in messages] == ["system", "user", "assistant", "user", "assistant"]
# Test metrics structure and types
_assert_metrics(response2)
def test_output_schema():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
output_schema=MovieScript,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_json_response_mode():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
output_schema=MovieScript,
use_json_mode=True,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_history():
agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
db=SqliteDb(db_file="tmp/nebius/test_basic.db"),
add_history_to_context=True,
store_history_messages=True,
telemetry=False,
)
run_output = agent.run("Hello")
assert run_output.messages is not None
assert len(run_output.messages) == 2
run_output = agent.run("Hello 2")
assert run_output.messages is not None
assert len(run_output.messages) == 4
run_output = agent.run("Hello 3")
assert run_output.messages is not None
assert len(run_output.messages) == 6
run_output = agent.run("Hello 4")
assert run_output.messages is not None
assert len(run_output.messages) == 8
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/nebius/test_basic.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/nebius/test_tool_use.py | from typing import Optional
import pytest
from agno.agent import Agent # noqa
from agno.models.nebius import Nebius
from agno.tools.exa import ExaTools
from agno.tools.websearch import WebSearchTools
from agno.tools.yfinance import YFinanceTools
NEBIUS_MODEL_ID = "Qwen/Qwen3-30B-A3B"
def test_tool_use():
agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "TSLA" in response.content
def test_tool_use_stream():
agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
for chunk in agent.run("What is the current price of TSLA?", stream=True, stream_events=True):
if chunk.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(chunk, "tool") and chunk.tool: # type: ignore
if chunk.tool.tool_name: # type: ignore
tool_call_seen = True
if chunk.content is not None and "TSLA" in chunk.content:
keyword_seen_in_response = True
assert tool_call_seen, "No tool calls observed in stream"
assert keyword_seen_in_response, "Keyword not found in response"
@pytest.mark.asyncio
async def test_async_tool_use():
agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = await agent.arun("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.role == "assistant" and msg.tool_calls is not None)
assert response.content is not None
assert "TSLA" in response.content
@pytest.mark.asyncio
async def test_async_tool_use_stream():
agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
async for chunk in agent.arun("What is the current price of TSLA?", stream=True, stream_events=True):
if chunk.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(chunk, "tool") and chunk.tool: # type: ignore
if chunk.tool.tool_name: # type: ignore
tool_call_seen = True
if chunk.content is not None and "TSLA" in chunk.content:
keyword_seen_in_response = True
assert tool_call_seen, "No tool calls observed in stream"
assert keyword_seen_in_response, "Keyword not found in response"
def test_parallel_tool_calls():
agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA and AAPL?")
# Verify tool usage
assert response.messages is not None
tool_calls = []
for msg in response.messages:
if msg.tool_calls is not None:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2 # Total of 2 tool calls made
assert response.content is not None
assert "TSLA" in response.content and "AAPL" in response.content
def test_multiple_tool_calls():
agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
tools=[YFinanceTools(cache_results=True), WebSearchTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA and what is the latest news about it?")
# Verify tool usage
assert response.messages is not None
tool_calls = []
for msg in response.messages:
if msg.tool_calls is not None:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2 # Total of 2 tool calls made
assert response.content is not None
assert "TSLA" in response.content and "latest news" in response.content.lower()
def test_tool_call_custom_tool_no_parameters():
def get_the_weather_in_tokyo():
"""
Get the weather in Tokyo
"""
return "It is currently 70 degrees and cloudy in Tokyo"
agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
tools=[get_the_weather_in_tokyo],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "70" in response.content
def test_tool_call_custom_tool_optional_parameters():
def get_the_weather(city: Optional[str] = None):
"""
Get the weather in a city
Args:
city: The city to get the weather for
"""
if city is None:
return "It is currently 70 degrees and cloudy in Tokyo"
else:
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
tools=[get_the_weather],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Paris?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "70" in response.content
def test_tool_call_list_parameters():
agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
tools=[ExaTools()],
instructions="Use a single tool call if possible",
markdown=True,
telemetry=False,
)
response = agent.run(
"What are the papers at https://arxiv.org/pdf/2307.06435 and https://arxiv.org/pdf/2502.09601 about?"
)
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
tool_calls = []
for msg in response.messages:
if msg.tool_calls is not None:
tool_calls.extend(msg.tool_calls)
for call in tool_calls:
if call.get("type", "") == "function":
assert call["function"]["name"] in ["get_contents", "exa_answer", "search_exa"]
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/nebius/test_tool_use.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/models/test_nebius.py | import base64
from unittest.mock import MagicMock, patch
from uuid import UUID
import pytest
from agno.agent import Agent
from agno.media import Image
from agno.tools.function import ToolResult
from agno.tools.models.nebius import NebiusTools
# Fixture for mock agent
@pytest.fixture
def mock_agent():
agent = MagicMock(spec=Agent)
return agent
# Fixture for mock client
@pytest.fixture
def mock_client():
client = MagicMock()
return client
# Fixture for mock NebiusTools with mock client
@pytest.fixture
def mock_nebius_tools(mock_client):
with patch("agno.tools.models.nebius.Nebius") as mock_nebius:
mock_nebius_instance = MagicMock()
mock_nebius_instance.get_client.return_value = mock_client
mock_nebius.return_value = mock_nebius_instance
nebius_tools = NebiusTools(api_key="fake_test_key")
nebius_tools._nebius_client = mock_client
return nebius_tools
# Fixture for successful API response
@pytest.fixture
def mock_successful_response():
mock_response = MagicMock()
mock_data = MagicMock()
mock_data.b64_json = base64.b64encode(b"fake_image_base64")
mock_response.data = [mock_data]
return mock_response
# Fixture for failed API response (no image data)
@pytest.fixture
def mock_failed_response_no_data():
mock_response = MagicMock()
mock_response.data = [] # Empty list to simulate no images generated
return mock_response
# Test Initialization
def test_nebius_tools_init_with_api_key_arg():
"""Test initialization with API key provided as an argument."""
api_key = "test_api_key_arg"
with patch("agno.tools.models.nebius.Nebius") as mock_nebius_cls:
mock_nebius_instance = MagicMock()
mock_client_instance = MagicMock()
mock_nebius_instance.get_client.return_value = mock_client_instance
mock_nebius_cls.return_value = mock_nebius_instance
nebius_tools = NebiusTools(api_key=api_key)
assert nebius_tools.api_key == api_key
assert nebius_tools._nebius_client is None
def test_nebius_tools_init_with_env_var():
"""Test initialization with API key from environment variable."""
env_api_key = "test_api_key_env"
def mock_getenv_side_effect(var_name):
if var_name == "NEBIUS_API_KEY":
return env_api_key
return None
with patch("agno.tools.models.nebius.getenv", side_effect=mock_getenv_side_effect) as mock_getenv:
with patch("agno.tools.models.nebius.Nebius") as mock_nebius_cls:
mock_nebius_instance = MagicMock()
mock_client_instance = MagicMock()
mock_nebius_instance.get_client.return_value = mock_client_instance
mock_nebius_cls.return_value = mock_nebius_instance
nebius_tools = NebiusTools()
assert nebius_tools.api_key == env_api_key
assert nebius_tools._nebius_client is None
assert mock_getenv.called
def test_nebius_tools_init_no_api_key():
"""Test initialization raises ValueError when no API key is found."""
def mock_getenv_side_effect(var_name):
return None
with patch("agno.tools.models.nebius.getenv", side_effect=mock_getenv_side_effect) as mock_getenv:
with pytest.raises(ValueError, match="NEBIUS_API_KEY not set"):
NebiusTools()
assert mock_getenv.called
# Test _get_client method
def test_get_client_lazy_initialization():
"""Test that client is lazily initialized."""
with patch("agno.tools.models.nebius.Nebius") as mock_nebius_cls:
mock_nebius_instance = MagicMock()
mock_client = MagicMock()
mock_nebius_instance.get_client.return_value = mock_client
mock_nebius_cls.return_value = mock_nebius_instance
nebius_tools = NebiusTools(api_key="test_api_key")
# Client should not be initialized yet
assert nebius_tools._nebius_client is None
# Get client should initialize it
client = nebius_tools._get_client()
assert client == mock_client
assert nebius_tools._nebius_client == mock_client
mock_nebius_instance.get_client.assert_called_once()
# Test generate_image method
def test_generate_image_success(mock_nebius_tools, mock_agent, mock_successful_response):
"""Test successful image generation."""
mock_client = mock_nebius_tools._get_client()
mock_client.images.generate.return_value = mock_successful_response
with patch("agno.tools.models.nebius.uuid4", return_value=UUID("12345678-1234-5678-1234-567812345678")):
prompt = "A picture of a cat"
result = mock_nebius_tools.generate_image(mock_agent, prompt)
# Check that it returns a ToolResult
assert isinstance(result, ToolResult)
assert result.content == "Image generated successfully."
assert result.images is not None
assert len(result.images) == 1
# Verify the ImageArtifact properties
image_artifact = result.images[0]
assert isinstance(image_artifact, Image)
assert image_artifact.id == "12345678-1234-5678-1234-567812345678"
assert image_artifact.original_prompt == prompt
assert image_artifact.mime_type == "image/png"
assert image_artifact.content == b"fake_image_base64"
mock_client.images.generate.assert_called_once_with(
model=mock_nebius_tools.image_model,
prompt=prompt,
response_format="b64_json",
size="1024x1024",
quality="standard",
)
def test_generate_image_no_data(mock_nebius_tools, mock_agent, mock_failed_response_no_data):
"""Test image generation when no data is returned."""
mock_client = mock_nebius_tools._get_client()
mock_client.images.generate.return_value = mock_failed_response_no_data
prompt = "A picture of a cat"
result = mock_nebius_tools.generate_image(mock_agent, prompt)
# Check that it returns a ToolResult with error
assert isinstance(result, ToolResult)
assert result.content == "Failed to generate image: No data received from API."
assert result.images is None
mock_client.images.generate.assert_called_once()
def test_generate_image_api_error(mock_nebius_tools, mock_agent):
"""Test image generation when API call raises an exception."""
mock_client = mock_nebius_tools._get_client()
error_message = "API Error"
mock_client.images.generate.side_effect = Exception(error_message)
prompt = "A picture of a cat"
result = mock_nebius_tools.generate_image(mock_agent, prompt)
expected_error = f"Failed to generate image: {error_message}"
# Check that it returns a ToolResult with error
assert isinstance(result, ToolResult)
assert result.content == expected_error
assert result.images is None
mock_client.images.generate.assert_called_once()
# Test with different image parameters
def test_generate_image_with_custom_params():
"""Test image generation with custom parameters."""
with patch("agno.tools.models.nebius.Nebius") as mock_nebius_cls:
mock_nebius_instance = MagicMock()
mock_client = MagicMock()
mock_data = MagicMock()
# Fix: Use properly encoded base64 data like in the other tests
mock_data.b64_json = base64.b64encode(b"fake_image_base64").decode("utf-8")
mock_response = MagicMock()
mock_response.data = [mock_data]
mock_client.images.generate.return_value = mock_response
mock_nebius_instance.get_client.return_value = mock_client
mock_nebius_cls.return_value = mock_nebius_instance
custom_model = "custom-model"
custom_quality = "hd"
custom_size = "2048x2048"
custom_style = "vivid"
nebius_tools = NebiusTools(
api_key="test_key",
image_model=custom_model,
image_quality=custom_quality,
image_size=custom_size,
image_style=custom_style,
)
mock_agent = MagicMock(spec=Agent)
prompt = "A picture of a dog"
with patch("agno.tools.models.nebius.uuid4", return_value=UUID("12345678-1234-5678-1234-567812345678")):
result = nebius_tools.generate_image(mock_agent, prompt)
# Check that it returns a ToolResult
assert isinstance(result, ToolResult)
assert result.content == "Image generated successfully."
assert result.images is not None
assert len(result.images) == 1
mock_client.images.generate.assert_called_once_with(
model=custom_model,
prompt=prompt,
response_format="b64_json",
size=custom_size,
quality=custom_quality,
style=custom_style,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/models/test_nebius.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/eval/utils.py | from dataclasses import asdict
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
from agno.db.base import AsyncBaseDb, BaseDb
from agno.db.schemas.evals import EvalRunRecord, EvalType
from agno.utils.log import log_debug, logger
if TYPE_CHECKING:
from agno.eval.accuracy import AccuracyResult
from agno.eval.agent_as_judge import AgentAsJudgeResult
from agno.eval.performance import PerformanceResult
from agno.eval.reliability import ReliabilityResult
def log_eval_run(
db: BaseDb,
run_id: str,
run_data: dict,
eval_type: EvalType,
eval_input: dict,
agent_id: Optional[str] = None,
model_id: Optional[str] = None,
model_provider: Optional[str] = None,
name: Optional[str] = None,
evaluated_component_name: Optional[str] = None,
team_id: Optional[str] = None,
workflow_id: Optional[str] = None,
) -> None:
"""Call the API to create an evaluation run."""
try:
db.create_eval_run(
EvalRunRecord(
run_id=run_id,
eval_type=eval_type,
eval_data=run_data,
eval_input=eval_input,
agent_id=agent_id,
model_id=model_id,
model_provider=model_provider,
name=name,
evaluated_component_name=evaluated_component_name,
team_id=team_id,
workflow_id=workflow_id,
)
)
except Exception as e:
log_debug(f"Could not create agent event: {e}")
async def async_log_eval(
db: Union[BaseDb, AsyncBaseDb],
run_id: str,
run_data: dict,
eval_type: EvalType,
eval_input: dict,
agent_id: Optional[str] = None,
model_id: Optional[str] = None,
model_provider: Optional[str] = None,
name: Optional[str] = None,
evaluated_component_name: Optional[str] = None,
team_id: Optional[str] = None,
workflow_id: Optional[str] = None,
) -> None:
"""Call the API to create an evaluation run."""
try:
if isinstance(db, AsyncBaseDb):
await db.create_eval_run(
EvalRunRecord(
run_id=run_id,
eval_type=eval_type,
eval_data=run_data,
eval_input=eval_input,
agent_id=agent_id,
model_id=model_id,
model_provider=model_provider,
name=name,
evaluated_component_name=evaluated_component_name,
team_id=team_id,
workflow_id=workflow_id,
)
)
else:
db.create_eval_run(
EvalRunRecord(
run_id=run_id,
eval_type=eval_type,
eval_data=run_data,
eval_input=eval_input,
agent_id=agent_id,
model_id=model_id,
model_provider=model_provider,
name=name,
evaluated_component_name=evaluated_component_name,
team_id=team_id,
workflow_id=workflow_id,
)
)
except Exception as e:
log_debug(f"Could not create agent event: {e}")
def store_result_in_file(
file_path: str,
result: Union["AccuracyResult", "AgentAsJudgeResult", "PerformanceResult", "ReliabilityResult"],
eval_id: Optional[str] = None,
name: Optional[str] = None,
):
"""Store the given result in the given file path"""
try:
import json
fn_path = Path(file_path.format(name=name, eval_id=eval_id))
if not fn_path.parent.exists():
fn_path.parent.mkdir(parents=True, exist_ok=True)
fn_path.write_text(json.dumps(asdict(result), indent=4))
except Exception as e:
logger.warning(f"Failed to save result to file: {e}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/eval/utils.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/utils/models/ai_foundry.py | from typing import Any, Dict
from agno.models.message import Message
from agno.utils.log import log_warning
from agno.utils.openai import images_to_message
def format_message(message: Message, compress_tool_results: bool = False) -> Dict[str, Any]:
"""
Format a message into the format expected by OpenAI.
Args:
message (Message): The message to format.
compress_tool_results: Whether to compress tool results.
Returns:
Dict[str, Any]: The formatted message.
"""
# Use compressed content for tool messages if compression is active
content = message.content
if message.role == "tool":
content = message.get_content(use_compressed_content=compress_tool_results)
message_dict: Dict[str, Any] = {
"role": message.role,
"content": content,
"name": message.name,
"tool_call_id": message.tool_call_id,
"tool_calls": message.tool_calls,
}
message_dict = {k: v for k, v in message_dict.items() if v is not None}
if message.images is not None and len(message.images) > 0:
# Ignore non-string message content
# because we assume that the images/audio are already added to the message
if isinstance(message.content, str):
message_dict["content"] = [{"type": "text", "text": message.content}]
message_dict["content"].extend(images_to_message(images=message.images))
if message.audio is not None and len(message.audio) > 0:
log_warning("Audio input is currently unsupported.")
if message.files is not None and len(message.files) > 0:
log_warning("File input is currently unsupported.")
if message.videos is not None and len(message.videos) > 0:
log_warning("Video input is currently unsupported.")
return message_dict
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/models/ai_foundry.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/utils/models/cohere.py | from pathlib import Path
from typing import Any, Dict, List, Sequence
from agno.media import Image
from agno.models.message import Message
from agno.utils.log import log_error, log_warning
def _format_images_for_message(message: Message, images: Sequence[Image]) -> List[Dict[str, Any]]:
"""
Format an image into the format expected by WatsonX.
"""
# Create a default message content with text
message_content_with_image: List[Dict[str, Any]] = [{"type": "text", "text": message.content}]
# Add images to the message content
for image in images:
try:
if image.content is not None:
image_content = image.content
elif image.url is not None:
image_content = image.get_content_bytes() # type: ignore
elif image.filepath is not None:
if isinstance(image.filepath, Path):
image_content = image.filepath.read_bytes()
else:
with open(image.filepath, "rb") as f:
image_content = f.read()
else:
log_warning(f"Unsupported image format: {image}")
continue
if image_content is not None:
import base64
base64_image = base64.b64encode(image_content).decode("utf-8")
image_url = f"data:image/jpeg;base64,{base64_image}"
image_payload = {"type": "image_url", "image_url": {"url": image_url}}
message_content_with_image.append(image_payload)
except Exception as e:
log_error(f"Failed to process image: {str(e)}")
# Update the message content with the images
return message_content_with_image
def format_messages(messages: List[Message], compress_tool_results: bool = False) -> List[Dict[str, Any]]:
"""
Format messages for the Cohere API.
Args:
messages (List[Message]): The list of messages.
compress_tool_results: Whether to compress tool results.
Returns:
List[Dict[str, Any]]: The formatted messages.
"""
formatted_messages = []
for message in messages:
# Use compressed content for tool messages if compression is active
content = message.content
if message.role == "tool":
content = message.get_content(use_compressed_content=compress_tool_results)
message_dict = {
"role": message.role,
"content": content,
"name": message.name,
"tool_call_id": message.tool_call_id,
"tool_calls": message.tool_calls,
}
if message.images is not None and len(message.images) > 0:
# Ignore non-string message content
if isinstance(message.content, str):
message_content_with_image = _format_images_for_message(message=message, images=message.images)
if len(message_content_with_image) > 1:
message_dict["content"] = message_content_with_image
if message.videos is not None and len(message.videos) > 0:
log_warning("Video input is currently unsupported.")
if message.audio is not None and len(message.audio) > 0:
log_warning("Audio input is currently unsupported.")
if message.files is not None and len(message.files) > 0:
log_warning("File input is currently unsupported.")
message_dict = {k: v for k, v in message_dict.items() if v is not None}
formatted_messages.append(message_dict)
return formatted_messages
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/models/cohere.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/models/mistral.py | from typing import Any, List, Optional, Union
from agno.media import Image
from agno.models.message import Message
from agno.utils.log import log_error, log_warning
try:
# TODO: Adapt these imports to the new Mistral SDK versions
from mistralai.models import ( # type: ignore
AssistantMessage, # type: ignore
ImageURLChunk, # type: ignore
SystemMessage, # type: ignore
TextChunk, # type: ignore
ToolMessage, # type: ignore
UserMessage, # type: ignore
)
MistralMessage = Union[UserMessage, AssistantMessage, SystemMessage, ToolMessage]
except ImportError:
raise ImportError("`mistralai` not installed. Please install using `pip install mistralai`")
def _format_image_for_message(image: Image) -> Optional[ImageURLChunk]:
# Case 1: Image is a URL
if image.url is not None:
return ImageURLChunk(image_url=image.url)
# Case 2: Image is a local file path
elif image.filepath is not None:
import base64
from pathlib import Path
path = Path(image.filepath) if isinstance(image.filepath, str) else image.filepath
if not path.exists() or not path.is_file():
log_error(f"Image file not found: {image}")
raise FileNotFoundError(f"Image file not found: {image}")
with open(image.filepath, "rb") as image_file:
base64_image = base64.b64encode(image_file.read()).decode("utf-8")
return ImageURLChunk(image_url=f"data:image/jpeg;base64,{base64_image}")
# Case 3: Image is a bytes object
elif image.content is not None:
import base64
base64_image = base64.b64encode(image.content).decode("utf-8")
return ImageURLChunk(image_url=f"data:image/jpeg;base64,{base64_image}")
return None
def format_messages(messages: List[Message], compress_tool_results: bool = False) -> List[MistralMessage]:
mistral_messages: List[MistralMessage] = []
for message in messages:
mistral_message: MistralMessage
if message.role == "user":
if message.audio is not None and len(message.audio) > 0:
log_warning("Audio input is currently unsupported.")
if message.files is not None and len(message.files) > 0:
log_warning("File input is currently unsupported.")
if message.videos is not None and len(message.videos) > 0:
log_warning("Video input is currently unsupported.")
if message.images is not None:
content: List[Any] = [TextChunk(type="text", text=message.content)]
for image in message.images:
image_content = _format_image_for_message(image)
if image_content:
content.append(image_content)
mistral_message = UserMessage(role="user", content=content)
else:
mistral_message = UserMessage(role="user", content=message.content)
elif message.role == "assistant":
if message.reasoning_content is not None:
mistral_message = UserMessage(role="user", content=message.content)
elif message.tool_calls is not None:
mistral_message = AssistantMessage(
role="assistant", content=message.content, tool_calls=message.tool_calls
)
else:
mistral_message = AssistantMessage(role=message.role, content=message.content)
elif message.role == "system":
mistral_message = SystemMessage(role="system", content=message.content)
elif message.role == "tool":
# Get compressed content if compression is active
tool_content = message.get_content(use_compressed_content=compress_tool_results)
mistral_message = ToolMessage(name="tool", content=tool_content, tool_call_id=message.tool_call_id)
else:
raise ValueError(f"Unknown role: {message.role}")
mistral_messages.append(mistral_message)
# Check if the last message is an assistant message
if mistral_messages and hasattr(mistral_messages[-1], "role") and mistral_messages[-1].role == "assistant":
# Set prefix=True for the last assistant message to allow it as the last message
mistral_messages[-1].prefix = True
return mistral_messages
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/models/mistral.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/models/watsonx.py | from typing import Any, Dict, List, Sequence
from agno.media import Image
from agno.models.message import Message
from agno.utils.log import log_error, log_warning
def format_images_for_message(message: Message, images: Sequence[Image]) -> Message:
"""
Format an image into the format expected by WatsonX.
"""
# Create a default message content with text
message_content_with_image: List[Dict[str, Any]] = [{"type": "text", "text": message.content}]
# Add images to the message content
for image in images:
try:
if image.content is not None:
image_content = image.content
elif image.url is not None:
image_content = image.get_content_bytes() # type: ignore
else:
log_warning(f"Unsupported image format: {image}")
continue
if image_content is not None:
import base64
base64_image = base64.b64encode(image_content).decode("utf-8")
image_url = f"data:image/jpeg;base64,{base64_image}"
image_payload = {"type": "image_url", "image_url": {"url": image_url}}
message_content_with_image.append(image_payload)
except Exception as e:
log_error(f"Failed to process image: {str(e)}")
# Update the message content with the images
if len(message_content_with_image) > 1:
message.content = message_content_with_image
return message
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/models/watsonx.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/models/cerebras/cerebras.py | import json
from collections.abc import AsyncIterator
from dataclasses import dataclass
from os import getenv
from typing import Any, Dict, Iterator, List, Optional, Type, Union
import httpx
from pydantic import BaseModel
from agno.models.base import Model
from agno.models.message import Message
from agno.models.metrics import MessageMetrics
from agno.models.response import ModelResponse
from agno.run.agent import RunOutput
from agno.utils.http import get_default_async_client, get_default_sync_client
from agno.utils.log import log_debug, log_error, log_warning
try:
from cerebras.cloud.sdk import AsyncCerebras as AsyncCerebrasClient
from cerebras.cloud.sdk import Cerebras as CerebrasClient
from cerebras.cloud.sdk.types.chat.chat_completion import (
ChatChunkResponse,
ChatChunkResponseChoice,
ChatChunkResponseChoiceDelta,
ChatChunkResponseUsage,
ChatCompletionResponse,
ChatCompletionResponseChoice,
ChatCompletionResponseChoiceMessage,
ChatCompletionResponseUsage,
)
except (ImportError, ModuleNotFoundError):
raise ImportError("`cerebras-cloud-sdk` not installed. Please install using `pip install cerebras-cloud-sdk`")
@dataclass
class Cerebras(Model):
"""
A class for interacting with models using the Cerebras API.
"""
id: str = "llama-4-scout-17b-16e-instruct"
name: str = "Cerebras"
provider: str = "Cerebras"
supports_native_structured_outputs: bool = False
supports_json_schema_outputs: bool = True
# Request parameters
parallel_tool_calls: Optional[bool] = None
max_completion_tokens: Optional[int] = None
repetition_penalty: Optional[float] = None
temperature: Optional[float] = None
top_p: Optional[float] = None
top_k: Optional[int] = None
strict_output: bool = True # When True, guarantees schema adherence for structured outputs. When False, attempts to follow schema as a guide but may occasionally deviate
extra_headers: Optional[Any] = None
extra_query: Optional[Any] = None
extra_body: Optional[Any] = None
request_params: Optional[Dict[str, Any]] = None
# Client parameters
api_key: Optional[str] = None
base_url: Optional[Union[str, httpx.URL]] = None
timeout: Optional[float] = None
max_retries: Optional[int] = None
default_headers: Optional[Any] = None
default_query: Optional[Any] = None
http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
client_params: Optional[Dict[str, Any]] = None
# Cerebras clients
client: Optional[CerebrasClient] = None
async_client: Optional[AsyncCerebrasClient] = None
def _get_client_params(self) -> Dict[str, Any]:
# Fetch API key from env if not already set
if not self.api_key:
self.api_key = getenv("CEREBRAS_API_KEY")
if not self.api_key:
log_error("CEREBRAS_API_KEY not set. Please set the CEREBRAS_API_KEY environment variable.")
# Define base client params
base_params = {
"api_key": self.api_key,
"base_url": self.base_url,
"timeout": self.timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add additional client params if provided
if self.client_params:
client_params.update(self.client_params)
return client_params
def _ensure_additional_properties_false(self, schema: Dict[str, Any]) -> None:
"""
Recursively ensure all object types have additionalProperties: false.
Cerebras API requires this for JSON schema validation.
"""
if not isinstance(schema, dict):
return
# Set additionalProperties: false for object types
if schema.get("type") == "object":
schema["additionalProperties"] = False
# Recursively process nested schemas
if "properties" in schema and isinstance(schema["properties"], dict):
for prop_schema in schema["properties"].values():
self._ensure_additional_properties_false(prop_schema)
if "items" in schema:
self._ensure_additional_properties_false(schema["items"])
if "$defs" in schema and isinstance(schema["$defs"], dict):
for def_schema in schema["$defs"].values():
self._ensure_additional_properties_false(def_schema)
for key in ["allOf", "anyOf", "oneOf"]:
if key in schema and isinstance(schema[key], list):
for item in schema[key]:
self._ensure_additional_properties_false(item)
def get_client(self) -> CerebrasClient:
"""
Returns a Cerebras client.
Returns:
CerebrasClient: An instance of the Cerebras client.
"""
if self.client and not self.client.is_closed():
return self.client
client_params: Dict[str, Any] = self._get_client_params()
if self.http_client is not None:
client_params["http_client"] = self.http_client
else:
# Use global sync client when no custom http_client is provided
client_params["http_client"] = get_default_sync_client()
self.client = CerebrasClient(**client_params)
return self.client
def get_async_client(self) -> AsyncCerebrasClient:
"""
Returns an asynchronous Cerebras client.
Returns:
AsyncCerebras: An instance of the asynchronous Cerebras client.
"""
if self.async_client and not self.async_client.is_closed():
return self.async_client
client_params: Dict[str, Any] = self._get_client_params()
if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
client_params["http_client"] = self.http_client
else:
# Use global async client when no custom http_client is provided
client_params["http_client"] = get_default_async_client()
self.async_client = AsyncCerebrasClient(**client_params)
return self.async_client
def get_request_params(
self,
tools: Optional[List[Dict[str, Any]]] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""
Returns keyword arguments for API requests.
Returns:
Dict[str, Any]: A dictionary of keyword arguments for API requests.
"""
# Define base request parameters
base_params = {
"max_completion_tokens": self.max_completion_tokens,
"repetition_penalty": self.repetition_penalty,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"extra_headers": self.extra_headers,
"extra_query": self.extra_query,
"extra_body": self.extra_body,
"request_params": self.request_params,
}
# Filter out None values
request_params: Dict[str, Any] = {k: v for k, v in base_params.items() if v is not None}
# Add tools
if tools is not None and len(tools) > 0:
request_params["tools"] = [
{
"type": "function",
"function": {
"name": tool["function"]["name"],
"description": tool["function"]["description"],
"parameters": tool["function"]["parameters"],
},
}
for tool in tools
]
# Cerebras requires parallel_tool_calls=False for llama-4-scout-17b-16e-instruct
if self.id == "llama-4-scout-17b-16e-instruct":
request_params["parallel_tool_calls"] = False
elif self.parallel_tool_calls is not None:
request_params["parallel_tool_calls"] = self.parallel_tool_calls
# Handle response format for structured outputs
if response_format is not None:
if (
isinstance(response_format, dict)
and response_format.get("type") == "json_schema"
and isinstance(response_format.get("json_schema"), dict)
):
# Ensure json_schema has strict parameter set
schema = response_format["json_schema"]
if isinstance(schema.get("schema"), dict):
if "strict" not in schema:
schema["strict"] = self.strict_output
# Cerebras requires additionalProperties: false for all object types
self._ensure_additional_properties_false(schema["schema"])
request_params["response_format"] = response_format
# Add additional request params if provided
if self.request_params:
request_params.update(self.request_params)
if request_params:
log_debug(f"Calling {self.provider} with request parameters: {request_params}", log_level=2)
return request_params
def invoke(
self,
messages: List[Message],
assistant_message: Message,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
run_response: Optional[RunOutput] = None,
compress_tool_results: bool = False,
) -> ModelResponse:
"""
Send a chat completion request to the Cerebras API.
Args:
messages (List[Message]): A list of messages to send to the model.
Returns:
CompletionResponse: The chat completion response from the API.
"""
assistant_message.metrics.start_timer()
provider_response = self.get_client().chat.completions.create(
model=self.id,
messages=[self._format_message(m, compress_tool_results) for m in messages], # type: ignore
**self.get_request_params(response_format=response_format, tools=tools),
)
assistant_message.metrics.stop_timer()
model_response = self._parse_provider_response(provider_response, response_format=response_format) # type: ignore
return model_response
async def ainvoke(
self,
messages: List[Message],
assistant_message: Message,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
run_response: Optional[RunOutput] = None,
compress_tool_results: bool = False,
) -> ModelResponse:
"""
Sends an asynchronous chat completion request to the Cerebras API.
Args:
messages (List[Message]): A list of messages to send to the model.
Returns:
ChatCompletion: The chat completion response from the API.
"""
assistant_message.metrics.start_timer()
provider_response = await self.get_async_client().chat.completions.create(
model=self.id,
messages=[self._format_message(m, compress_tool_results) for m in messages], # type: ignore
**self.get_request_params(response_format=response_format, tools=tools),
)
assistant_message.metrics.stop_timer()
model_response = self._parse_provider_response(provider_response, response_format=response_format) # type: ignore
return model_response
def invoke_stream(
self,
messages: List[Message],
assistant_message: Message,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
run_response: Optional[RunOutput] = None,
compress_tool_results: bool = False,
) -> Iterator[ModelResponse]:
"""
Send a streaming chat completion request to the Cerebras API.
Args:
messages (List[Message]): A list of messages to send to the model.
Returns:
Iterator[ChatChunkResponse]: An iterator of chat completion chunks.
"""
assistant_message.metrics.start_timer()
for chunk in self.get_client().chat.completions.create(
model=self.id,
messages=[self._format_message(m, compress_tool_results) for m in messages], # type: ignore
stream=True,
**self.get_request_params(response_format=response_format, tools=tools),
):
yield self._parse_provider_response_delta(chunk) # type: ignore
assistant_message.metrics.stop_timer()
async def ainvoke_stream(
self,
messages: List[Message],
assistant_message: Message,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
run_response: Optional[RunOutput] = None,
compress_tool_results: bool = False,
) -> AsyncIterator[ModelResponse]:
"""
Sends an asynchronous streaming chat completion request to the Cerebras API.
Args:
messages (List[Message]): A list of messages to send to the model.
Returns:
AsyncIterator[ChatChunkResponse]: An asynchronous iterator of chat completion chunks.
"""
assistant_message.metrics.start_timer()
async_stream = await self.get_async_client().chat.completions.create(
model=self.id,
messages=[self._format_message(m, compress_tool_results) for m in messages], # type: ignore
stream=True,
**self.get_request_params(response_format=response_format, tools=tools),
)
async for chunk in async_stream: # type: ignore
yield self._parse_provider_response_delta(chunk) # type: ignore
assistant_message.metrics.stop_timer()
def _format_message(self, message: Message, compress_tool_results: bool = False) -> Dict[str, Any]:
"""
Format a message into the format expected by the Cerebras API.
Args:
message (Message): The message to format.
compress_tool_results: Whether to compress tool results.
Returns:
Dict[str, Any]: The formatted message.
"""
# Use compressed content for tool messages if compression is active
if message.role == "tool":
content = message.get_content(use_compressed_content=compress_tool_results)
else:
content = message.content if message.content is not None else ""
# Basic message content
message_dict: Dict[str, Any] = {
"role": message.role,
"content": content,
}
# Add name if present
if message.name:
message_dict["name"] = message.name
# Handle tool calls
if message.tool_calls:
# Ensure tool_calls is properly formatted
message_dict["tool_calls"] = [
{
"id": tool_call["id"],
"type": tool_call["type"],
"function": {
"name": tool_call["function"]["name"],
"arguments": json.dumps(tool_call["function"]["arguments"])
if isinstance(tool_call["function"]["arguments"], (dict, list))
else tool_call["function"]["arguments"],
},
}
for tool_call in message.tool_calls
]
# Handle tool responses
if message.role == "tool" and message.tool_call_id:
message_dict = {
"role": "tool",
"tool_call_id": message.tool_call_id,
"content": content,
}
# Ensure no None values in the message
message_dict = {k: v for k, v in message_dict.items() if v is not None}
return message_dict
def _parse_provider_response(self, response: ChatCompletionResponse, **kwargs) -> ModelResponse:
"""
Parse the Cerebras response into a ModelResponse.
Args:
response (CompletionResponse): The response from the Cerebras API.
Returns:
ModelResponse: The parsed response.
"""
model_response = ModelResponse()
# Get the first choice (assuming single response)
choice: ChatCompletionResponseChoice = response.choices[0]
message: ChatCompletionResponseChoiceMessage = choice.message
# Add role
if message.role is not None:
model_response.role = message.role
# Add content
if message.content is not None:
model_response.content = message.content
# Add tool calls
if message.tool_calls is not None:
try:
model_response.tool_calls = [
{
"id": tool_call.id,
"type": tool_call.type,
"function": {
"name": tool_call.function.name,
"arguments": tool_call.function.arguments,
},
}
for tool_call in message.tool_calls
]
except Exception as e:
log_warning(f"Error processing tool calls: {e}")
# Add usage metrics
if response.usage:
model_response.response_usage = self._get_metrics(response.usage)
return model_response
def _parse_provider_response_delta(
self, response: Union[ChatChunkResponse, ChatCompletionResponse]
) -> ModelResponse:
"""
Parse the streaming response from the Cerebras API into a ModelResponse.
Args:
response (ChatChunkResponse): The streaming response chunk.
Returns:
ModelResponse: The parsed response.
"""
model_response = ModelResponse()
# Get the first choice (assuming single response)
if response.choices is not None:
choice: Union[ChatChunkResponseChoice, ChatCompletionResponseChoice] = response.choices[0]
choice_delta: ChatChunkResponseChoiceDelta = choice.delta # type: ignore
if choice_delta:
# Add content
if choice_delta.content:
model_response.content = choice_delta.content
# Add tool calls - preserve index for proper aggregation in parse_tool_calls
if choice_delta.tool_calls:
model_response.tool_calls = [
{
"index": tool_call.index if hasattr(tool_call, "index") else idx,
"id": tool_call.id,
"type": tool_call.type,
"function": {
"name": tool_call.function.name if tool_call.function else None,
"arguments": tool_call.function.arguments if tool_call.function else None,
},
}
for idx, tool_call in enumerate(choice_delta.tool_calls)
]
# Add usage metrics
if response.usage:
model_response.response_usage = self._get_metrics(response.usage)
return model_response
def parse_tool_calls(self, tool_calls_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Build complete tool calls from streamed tool call delta data.
Cerebras streams tool calls incrementally with partial data in each chunk.
This method aggregates those chunks by index to produce complete tool calls.
Args:
tool_calls_data: List of tool call deltas from streaming chunks.
Returns:
List[Dict[str, Any]]: List of fully-formed tool call dicts.
"""
tool_calls: List[Dict[str, Any]] = []
for tool_call_delta in tool_calls_data:
# Get the index for this tool call (default to 0 if not present)
index = tool_call_delta.get("index", 0)
# Extend the list if needed
while len(tool_calls) <= index:
tool_calls.append(
{
"id": None,
"type": None,
"function": {
"name": "",
"arguments": "",
},
}
)
tool_call_entry = tool_calls[index]
# Update id if present
if tool_call_delta.get("id"):
tool_call_entry["id"] = tool_call_delta["id"]
# Update type if present
if tool_call_delta.get("type"):
tool_call_entry["type"] = tool_call_delta["type"]
# Update function name and arguments (concatenate for streaming)
if tool_call_delta.get("function"):
func_delta = tool_call_delta["function"]
if func_delta.get("name"):
tool_call_entry["function"]["name"] += func_delta["name"]
if func_delta.get("arguments"):
tool_call_entry["function"]["arguments"] += func_delta["arguments"]
# Filter out any incomplete tool calls (missing id or function name)
complete_tool_calls = [tc for tc in tool_calls if tc.get("id") and tc.get("function", {}).get("name")]
return complete_tool_calls
def _get_metrics(
self, response_usage: Union[ChatCompletionResponseUsage, ChatChunkResponseUsage]
) -> MessageMetrics:
"""
Parse the given Cerebras usage into an Agno MessageMetrics object.
Args:
response_usage: Usage data from Cerebras
Returns:
MessageMetrics: Parsed metrics data
"""
metrics = MessageMetrics()
metrics.input_tokens = response_usage.prompt_tokens or 0
metrics.output_tokens = response_usage.completion_tokens or 0
metrics.total_tokens = metrics.input_tokens + metrics.output_tokens
# Capture Cerebras timing metrics if available
provider_metrics: Dict[str, Any] = {}
if hasattr(response_usage, "time_system") and response_usage.time_system is not None:
provider_metrics["time_system"] = response_usage.time_system
if hasattr(response_usage, "time_prompt") and response_usage.time_prompt is not None:
provider_metrics["time_prompt"] = response_usage.time_prompt
if provider_metrics:
metrics.provider_metrics = provider_metrics
return metrics
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/cerebras/cerebras.py",
"license": "Apache License 2.0",
"lines": 497,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/models/cerebras/cerebras_openai.py | import json
from dataclasses import dataclass, field
from os import getenv
from typing import Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel
from agno.exceptions import ModelAuthenticationError
from agno.models.message import Message
from agno.models.openai.like import OpenAILike
from agno.utils.log import log_debug
@dataclass
class CerebrasOpenAI(OpenAILike):
id: str = "llama-4-scout-17b-16e-instruct"
name: str = "CerebrasOpenAI"
provider: str = "CerebrasOpenAI"
parallel_tool_calls: Optional[bool] = None
base_url: str = "https://api.cerebras.ai/v1"
api_key: Optional[str] = field(default_factory=lambda: getenv("CEREBRAS_API_KEY", None))
def _get_client_params(self) -> Dict[str, Any]:
"""
Returns client parameters for API requests, checking for CEREBRAS_API_KEY.
Returns:
Dict[str, Any]: A dictionary of client parameters for API requests.
"""
if not self.api_key:
self.api_key = getenv("CEREBRAS_API_KEY")
if not self.api_key:
raise ModelAuthenticationError(
message="CEREBRAS_API_KEY not set. Please set the CEREBRAS_API_KEY environment variable.",
model_name=self.name,
)
return super()._get_client_params()
def get_request_params(
self,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""
Returns keyword arguments for API requests.
Returns:
Dict[str, Any]: A dictionary of keyword arguments for API requests.
"""
# Get base request kwargs from the parent class
request_params = super().get_request_params(
response_format=response_format, tools=tools, tool_choice=tool_choice
)
# Add tools with proper formatting
if tools is not None and len(tools) > 0:
request_params["tools"] = [
{
"type": "function",
"function": {
"name": tool["function"]["name"],
"description": tool["function"]["description"],
"parameters": tool["function"]["parameters"],
},
}
for tool in tools
]
# Cerebras requires parallel_tool_calls=False for llama-4-scout-17b-16e-instruct
if self.id == "llama-4-scout-17b-16e-instruct":
request_params["parallel_tool_calls"] = False
elif self.parallel_tool_calls is not None:
request_params["parallel_tool_calls"] = self.parallel_tool_calls
if request_params:
log_debug(f"Calling {self.provider} with request parameters: {request_params}", log_level=2)
return request_params
def _format_message(self, message: Message, compress_tool_results: bool = False) -> Dict[str, Any]:
"""
Format a message into the format expected by the Cerebras API.
Args:
message (Message): The message to format.
Returns:
Dict[str, Any]: The formatted message.
"""
# Basic message content
message_dict: Dict[str, Any] = {
"role": message.role,
"content": message.content if message.content is not None else "",
}
# Add name if present
if message.name:
message_dict["name"] = message.name
# Handle tool calls
if message.tool_calls:
# Ensure tool_calls is properly formatted
message_dict["tool_calls"] = [
{
"id": tool_call["id"],
"type": tool_call["type"],
"function": {
"name": tool_call["function"]["name"],
"arguments": json.dumps(tool_call["function"]["arguments"])
if isinstance(tool_call["function"]["arguments"], (dict, list))
else tool_call["function"]["arguments"],
},
}
for tool_call in message.tool_calls
]
# Handle tool responses
if message.role == "tool" and message.tool_call_id:
content = message.get_content(use_compressed_content=compress_tool_results)
message_dict = {
"role": "tool",
"tool_call_id": message.tool_call_id,
"content": content if message.content is not None else "",
}
# Ensure no None values in the message
message_dict = {k: v for k, v in message_dict.items() if v is not None}
return message_dict
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/cerebras/cerebras_openai.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/models/cerebras/test_basic.py | import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent, RunOutput
from agno.db.sqlite import SqliteDb
from agno.models.cerebras import Cerebras
@pytest.fixture(scope="module")
def cerebras_model():
"""Fixture that provides a Cerebras model and reuses it across all tests in the module."""
return Cerebras(id="gpt-oss-120b")
def _assert_metrics(response: RunOutput):
assert response.metrics is not None
input_tokens = response.metrics.input_tokens
output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
assert input_tokens > 0
assert output_tokens > 0
assert total_tokens > 0
assert total_tokens == input_tokens + output_tokens
def test_basic(cerebras_model):
agent = Agent(model=cerebras_model, markdown=True, telemetry=False)
response: RunOutput = agent.run("Share a 2 sentence horror story")
assert response.content is not None and response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
def test_basic_stream(cerebras_model):
agent = Agent(model=cerebras_model, markdown=True, telemetry=False)
response_stream = agent.run("Share a 2 sentence horror story", stream=True)
# Verify it's an iterator
assert hasattr(response_stream, "__iter__")
responses = list(response_stream)
assert len(responses) > 0
for response in responses:
assert response.content is not None
@pytest.mark.asyncio
async def test_async_basic(cerebras_model):
agent = Agent(model=cerebras_model, markdown=True, telemetry=False)
response = await agent.arun("Share a 2 sentence horror story")
assert response.content is not None and response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
@pytest.mark.asyncio
async def test_async_basic_stream(cerebras_model):
agent = Agent(model=cerebras_model, markdown=True, telemetry=False)
response_stream = agent.arun("Share a 2 sentence horror story", stream=True)
async for chunk in response_stream:
assert chunk.content is not None
def test_with_memory(cerebras_model):
agent = Agent(
db=SqliteDb(db_file="tmp/test_with_memory.db"),
model=cerebras_model,
add_history_to_context=True,
num_history_runs=5,
markdown=True,
telemetry=False,
)
# First interaction
response1 = agent.run("My name is John Smith")
assert response1.content is not None
# Second interaction should remember the name
response2 = agent.run("What's my name?")
assert response2.content is not None and "John" in response2.content
# Verify memories were created
messages = agent.get_session_messages()
assert len(messages) == 5
assert [m.role for m in messages] == ["system", "user", "assistant", "user", "assistant"]
# Test metrics structure and types
_assert_metrics(response2)
def test_structured_output(cerebras_model):
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=cerebras_model,
output_schema=MovieScript,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_history(cerebras_model):
agent = Agent(
model=cerebras_model,
db=SqliteDb(db_file="tmp/cerebras/test_basic.db"),
add_history_to_context=True,
store_history_messages=True,
telemetry=False,
)
run_output = agent.run("Hello")
assert run_output.messages is not None
assert len(run_output.messages) == 2
run_output = agent.run("Hello 2")
assert run_output.messages is not None
assert len(run_output.messages) == 4
run_output = agent.run("Hello 3")
assert run_output.messages is not None
assert len(run_output.messages) == 6
run_output = agent.run("Hello 4")
assert run_output.messages is not None
assert len(run_output.messages) == 8
def test_client_persistence(cerebras_model):
"""Test that the same Cerebras client instance is reused across multiple calls"""
agent = Agent(model=cerebras_model, markdown=True, telemetry=False)
# First call should create a new client
agent.run("Hello")
first_client = cerebras_model.client
assert first_client is not None
# Second call should reuse the same client
agent.run("Hello again")
second_client = cerebras_model.client
assert second_client is not None
assert first_client is second_client, "Client should be persisted and reused"
# Third call should also reuse the same client
agent.run("Hello once more")
third_client = cerebras_model.client
assert third_client is not None
assert first_client is third_client, "Client should still be the same instance"
@pytest.mark.asyncio
async def test_async_client_persistence(cerebras_model):
"""Test that the same async Cerebras client instance is reused across multiple calls"""
agent = Agent(model=cerebras_model, markdown=True, telemetry=False)
# First call should create a new async client
await agent.arun("Hello")
first_client = cerebras_model.async_client
assert first_client is not None
# Second call should reuse the same async client
await agent.arun("Hello again")
second_client = cerebras_model.async_client
assert second_client is not None
assert first_client is second_client, "Async client should be persisted and reused"
# Third call should also reuse the same async client
await agent.arun("Hello once more")
third_client = cerebras_model.async_client
assert third_client is not None
assert first_client is third_client, "Async client should still be the same instance"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/cerebras/test_basic.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/cerebras/test_tool_use.py | import pytest
from agno.agent import Agent, RunOutput # noqa
from agno.models.cerebras import Cerebras
from agno.tools.websearch import WebSearchTools
def test_tool_use():
agent = Agent(
model=Cerebras(id="gpt-oss-120b"),
tools=[WebSearchTools(cache_results=True)],
telemetry=False,
)
response = agent.run("What's happening in France?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "France" in response.content
def test_tool_use_stream():
agent = Agent(
model=Cerebras(id="gpt-oss-120b"),
tools=[WebSearchTools(cache_results=True)],
telemetry=False,
)
response_stream = agent.run("What's happening in France?", stream=True, stream_events=True)
responses = []
tool_call_seen = False
for chunk in response_stream:
responses.append(chunk)
# Check for ToolCallStartedEvent or ToolCallCompletedEvent
if chunk.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(chunk, "tool") and chunk.tool: # type: ignore
if chunk.tool.tool_name: # type: ignore
tool_call_seen = True
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
@pytest.mark.asyncio
async def test_async_tool_use():
agent = Agent(
model=Cerebras(id="gpt-oss-120b"),
tools=[WebSearchTools(cache_results=True)],
telemetry=False,
)
response = await agent.arun("What's happening in France?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.role == "assistant")
assert response.content is not None
assert "France" in response.content
@pytest.mark.asyncio
async def test_async_tool_use_stream():
agent = Agent(
model=Cerebras(id="gpt-oss-120b"),
tools=[WebSearchTools(cache_results=True)],
telemetry=False,
)
tool_call_seen = False
keyword_seen_in_response = False
async for response in agent.arun(
"What is the current price of TSLA?",
stream=True,
stream_events=True,
):
if response.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(response, "tool") and response.tool: # type: ignore
if response.tool.tool_name: # type: ignore
tool_call_seen = True
if response.content is not None and "TSLA" in response.content:
keyword_seen_in_response = True
# Asserting we found tool responses in the response stream
assert tool_call_seen, "No tool calls observed in stream"
# Asserting we found the expected keyword in the response stream -> proving the correct tool was called
assert keyword_seen_in_response, "Keyword not found in response"
def test_tool_use_with_content():
agent = Agent(
model=Cerebras(id="gpt-oss-120b"),
tools=[WebSearchTools(cache_results=True)],
telemetry=False,
)
response = agent.run("What's happening in France? Summarize the key events.")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/cerebras/test_tool_use.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_metrics.py | import time
import pytest
from agno.agent import Agent, RunOutput # noqa
from agno.culture.manager import CultureManager
from agno.db.base import SessionType
from agno.eval.accuracy import AccuracyEval
from agno.eval.agent_as_judge import AgentAsJudgeEval
from agno.memory.manager import MemoryManager
from agno.metrics import ModelMetrics, RunMetrics, SessionMetrics, ToolCallMetrics
from agno.models.openai import OpenAIChat
from agno.tools.websearch import WebSearchTools
def add(a: int, b: int) -> str:
"""Add two numbers."""
return str(a + b)
def multiply(a: int, b: int) -> str:
"""Multiply two numbers."""
return str(a * b)
def test_run_response_metrics():
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=True,
)
response1 = agent.run("Hello my name is John")
response2 = agent.run("I live in New York")
assert response1.metrics.input_tokens >= 1
assert response2.metrics.input_tokens >= 1
assert response1.metrics.output_tokens >= 1
assert response2.metrics.output_tokens >= 1
assert response1.metrics.total_tokens >= 1
assert response2.metrics.total_tokens >= 1
def test_session_metrics(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[WebSearchTools(cache_results=True)],
db=shared_db,
markdown=True,
telemetry=False,
)
response = agent.run("Hi, my name is John")
total_input_tokens = response.metrics.input_tokens
total_output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
assert response.metrics.input_tokens > 0
assert response.metrics.output_tokens > 0
assert response.metrics.total_tokens > 0
assert response.metrics.total_tokens == response.metrics.input_tokens + response.metrics.output_tokens
session_from_db = agent.db.get_session(session_id=agent.session_id, session_type=SessionType.AGENT)
assert session_from_db.session_data["session_metrics"]["input_tokens"] == total_input_tokens
assert session_from_db.session_data["session_metrics"]["output_tokens"] == total_output_tokens
assert session_from_db.session_data["session_metrics"]["total_tokens"] == total_tokens
response = agent.run("What is current news in France?")
assert response.metrics.input_tokens > 0
assert response.metrics.output_tokens > 0
assert response.metrics.total_tokens > 0
assert response.metrics.total_tokens == response.metrics.input_tokens + response.metrics.output_tokens
total_input_tokens += response.metrics.input_tokens
total_output_tokens += response.metrics.output_tokens
total_tokens += response.metrics.total_tokens
# Ensure the total session metrics are updated
session_from_db = agent.db.get_session(session_id=agent.session_id, session_type=SessionType.AGENT)
assert session_from_db.session_data["session_metrics"]["input_tokens"] == total_input_tokens
assert session_from_db.session_data["session_metrics"]["output_tokens"] == total_output_tokens
assert session_from_db.session_data["session_metrics"]["total_tokens"] == total_tokens
def test_session_metrics_with_add_history(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
add_history_to_context=True,
num_history_runs=3,
markdown=True,
telemetry=False,
)
response = agent.run("Hi, my name is John")
total_input_tokens = response.metrics.input_tokens
total_output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
assert response.metrics.input_tokens > 0
assert response.metrics.output_tokens > 0
assert response.metrics.total_tokens > 0
assert response.metrics.total_tokens == response.metrics.input_tokens + response.metrics.output_tokens
session_from_db = agent.db.get_session(session_id=agent.session_id, session_type=SessionType.AGENT)
assert session_from_db.session_data["session_metrics"]["input_tokens"] == total_input_tokens
assert session_from_db.session_data["session_metrics"]["output_tokens"] == total_output_tokens
assert session_from_db.session_data["session_metrics"]["total_tokens"] == total_tokens
response = agent.run("What did I just tell you?")
assert response.metrics.input_tokens > 0
assert response.metrics.output_tokens > 0
assert response.metrics.total_tokens > 0
assert response.metrics.total_tokens == response.metrics.input_tokens + response.metrics.output_tokens
total_input_tokens += response.metrics.input_tokens
total_output_tokens += response.metrics.output_tokens
total_tokens += response.metrics.total_tokens
# Ensure the total session metrics are updated
session_from_db = agent.db.get_session(session_id=agent.session_id, session_type=SessionType.AGENT)
assert session_from_db.session_data["session_metrics"]["input_tokens"] == total_input_tokens
assert session_from_db.session_data["session_metrics"]["output_tokens"] == total_output_tokens
assert session_from_db.session_data["session_metrics"]["total_tokens"] == total_tokens
def test_run_metrics_details_structure():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"))
response = agent.run("Hello")
assert response.metrics is not None
assert isinstance(response.metrics, RunMetrics)
assert response.metrics.total_tokens > 0
assert response.metrics.details is not None
assert "model" in response.metrics.details
model_metrics = response.metrics.details["model"]
assert len(model_metrics) >= 1
assert isinstance(model_metrics[0], ModelMetrics)
assert model_metrics[0].id == "gpt-4o-mini"
assert model_metrics[0].provider == "OpenAI"
assert model_metrics[0].input_tokens > 0
assert model_metrics[0].total_tokens > 0
def test_run_metrics_details_sum_matches_total():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"))
response = agent.run("What is 2+2?")
detail_total = 0
for entries in response.metrics.details.values():
detail_total += sum(entry.total_tokens for entry in entries)
assert detail_total == response.metrics.total_tokens
def test_eval_metrics_sync():
eval_hook = AgentAsJudgeEval(
name="Sync Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be factually correct",
scoring_strategy="binary",
)
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), post_hooks=[eval_hook])
response = agent.run("What is the capital of Japan?")
assert "model" in response.metrics.details
assert "eval_model" in response.metrics.details
agent_tokens = sum(metric.total_tokens for metric in response.metrics.details["model"])
eval_tokens = sum(metric.total_tokens for metric in response.metrics.details["eval_model"])
assert agent_tokens > 0
assert eval_tokens > 0
detail_total = sum(entry.total_tokens for entries in response.metrics.details.values() for entry in entries)
assert detail_total == response.metrics.total_tokens
@pytest.mark.asyncio
async def test_eval_metrics_async():
eval_hook = AgentAsJudgeEval(
name="Async Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be helpful",
scoring_strategy="binary",
)
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), post_hooks=[eval_hook])
response = await agent.arun("What is 5 + 3?")
assert "model" in response.metrics.details
assert "eval_model" in response.metrics.details
assert sum(metric.total_tokens for metric in response.metrics.details["eval_model"]) > 0
def test_eval_metrics_streaming():
eval_hook = AgentAsJudgeEval(
name="Stream Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be concise",
scoring_strategy="binary",
)
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), post_hooks=[eval_hook])
final = None
for event in agent.run("Say hi.", stream=True, yield_run_output=True):
if isinstance(event, RunOutput):
final = event
assert final is not None
assert "model" in final.metrics.details
assert "eval_model" in final.metrics.details
def test_eval_metrics_numeric_scoring():
eval_hook = AgentAsJudgeEval(
name="Numeric Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Rate the quality of the response",
scoring_strategy="numeric",
threshold=5,
)
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), post_hooks=[eval_hook])
response = agent.run("Explain gravity in one sentence.")
assert "eval_model" in response.metrics.details
assert sum(metric.total_tokens for metric in response.metrics.details["eval_model"]) > 0
def test_accuracy_eval_metrics_sync():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"))
evaluation = AccuracyEval(
agent=agent,
input="What is 2+2?",
expected_output="4",
num_iterations=1,
model=OpenAIChat(id="gpt-4o-mini"),
)
result = evaluation.run(print_summary=False, print_results=False)
assert result is not None
assert result.avg_score is not None
if result.results:
assert result.results[0].score is not None
@pytest.mark.asyncio
async def test_accuracy_eval_metrics_async():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"))
evaluation = AccuracyEval(
agent=agent,
input="What is 3+3?",
expected_output="6",
num_iterations=1,
model=OpenAIChat(id="gpt-4o-mini"),
)
result = await evaluation.arun(print_summary=False, print_results=False)
assert result is not None
assert result.avg_score is not None
def test_memory_metrics_sync(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
memory_manager=MemoryManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_memory_on_run=True,
db=shared_db,
)
response = agent.run("My name is Bob and I live in New York.")
assert "model" in response.metrics.details
assert "memory_model" in response.metrics.details
assert sum(metric.total_tokens for metric in response.metrics.details["memory_model"]) > 0
detail_total = sum(entry.total_tokens for entries in response.metrics.details.values() for entry in entries)
assert detail_total == response.metrics.total_tokens
@pytest.mark.asyncio
async def test_memory_metrics_async(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
memory_manager=MemoryManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_memory_on_run=True,
db=shared_db,
)
response = await agent.arun("My favorite color is blue.")
assert "memory_model" in response.metrics.details
assert sum(metric.total_tokens for metric in response.metrics.details["memory_model"]) > 0
def test_memory_metrics_streaming(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
memory_manager=MemoryManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_memory_on_run=True,
db=shared_db,
)
final = None
for event in agent.run("I work at Microsoft as a designer.", stream=True, yield_run_output=True):
if isinstance(event, RunOutput):
final = event
assert final is not None
assert "memory_model" in final.metrics.details
def test_memory_model_metrics_fields(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
memory_manager=MemoryManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_memory_on_run=True,
db=shared_db,
)
response = agent.run("I have a dog named Max.")
memory_entries = response.metrics.details.get("memory_model", [])
assert len(memory_entries) >= 1
for entry in memory_entries:
assert isinstance(entry, ModelMetrics)
assert entry.id is not None
assert entry.provider is not None
assert entry.input_tokens > 0
def test_culture_metrics_sync(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
culture_manager=CultureManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_cultural_knowledge=True,
db=shared_db,
)
response = agent.run("Our team always does code reviews before merging PRs.")
assert "culture_model" in response.metrics.details
assert sum(metric.total_tokens for metric in response.metrics.details["culture_model"]) > 0
@pytest.mark.asyncio
async def test_culture_metrics_async(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
culture_manager=CultureManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_cultural_knowledge=True,
db=shared_db,
)
response = await agent.arun("We use trunk-based development with feature flags.")
assert "culture_model" in response.metrics.details
assert sum(metric.total_tokens for metric in response.metrics.details["culture_model"]) > 0
def test_culture_model_metrics_fields(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
culture_manager=CultureManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_cultural_knowledge=True,
db=shared_db,
)
response = agent.run("We deploy to production on Tuesdays.")
culture_entries = response.metrics.details.get("culture_model", [])
assert len(culture_entries) >= 1
for entry in culture_entries:
assert isinstance(entry, ModelMetrics)
assert entry.id is not None
assert entry.provider is not None
def test_tool_call_metrics_sync():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), tools=[add])
response = agent.run("Add 10 and 20.")
assert response.tools is not None
assert len(response.tools) > 0
tool = response.tools[0]
assert isinstance(tool.metrics, ToolCallMetrics)
assert tool.metrics.duration > 0
assert tool.metrics.start_time is not None
assert tool.metrics.end_time is not None
@pytest.mark.asyncio
async def test_tool_call_metrics_async():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), tools=[add])
response = await agent.arun("Add 7 and 8.")
assert response.tools is not None
assert len(response.tools) > 0
assert isinstance(response.tools[0].metrics, ToolCallMetrics)
assert response.tools[0].metrics.duration > 0
def test_tool_call_metrics_streaming():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), tools=[add])
final = None
for event in agent.run("Add 3 and 4.", stream=True, yield_run_output=True):
if isinstance(event, RunOutput):
final = event
assert final is not None
assert final.tools is not None
assert isinstance(final.tools[0].metrics, ToolCallMetrics)
assert final.tools[0].metrics.duration > 0
def test_tool_call_metrics_multiple_tools():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), tools=[add, multiply])
response = agent.run("Add 2 and 3, then multiply 4 and 5.")
assert response.tools is not None
assert len(response.tools) >= 2
for tool in response.tools:
assert isinstance(tool.metrics, ToolCallMetrics)
assert tool.metrics.duration > 0
# Same (provider, id) accumulates into a single ModelMetrics entry
assert len(response.metrics.details["model"]) >= 1
assert response.metrics.details["model"][0].total_tokens > 0
def test_tool_call_metrics_latency():
def slow_lookup(query: str) -> str:
"""Look up information (slow)."""
time.sleep(0.15)
return f"Result for {query}"
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), tools=[slow_lookup])
response = agent.run("Look up information about Python.")
assert response.tools is not None
assert response.tools[0].metrics.duration >= 0.1
def test_provider_metrics_openai():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"))
response = agent.run("Hello")
model_metric = response.metrics.details["model"][0]
assert model_metric.provider == "OpenAI"
assert model_metric.id == "gpt-4o-mini"
assert model_metric.input_tokens > 0
assert model_metric.output_tokens > 0
assert model_metric.total_tokens == model_metric.input_tokens + model_metric.output_tokens
def test_provider_metrics_openai_with_tools():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), tools=[add])
response = agent.run("Add 5 and 10.")
model_entries = response.metrics.details["model"]
# Same (provider, id) accumulates into a single entry
assert len(model_entries) >= 1
for entry in model_entries:
assert entry.provider == "OpenAI"
assert entry.id == "gpt-4o-mini"
assert entry.total_tokens > 0
def test_provider_metrics_gemini():
from agno.models.google import Gemini
agent = Agent(model=Gemini(id="gemini-2.5-flash"))
response = agent.run("Hello")
model_metric = response.metrics.details["model"][0]
assert model_metric.provider == "Google"
assert model_metric.id == "gemini-2.5-flash"
assert model_metric.input_tokens > 0
assert model_metric.total_tokens > 0
def test_session_metrics_type(shared_db):
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db)
agent.run("First run.")
agent.run("Second run.")
session_metrics = agent.get_session_metrics()
assert isinstance(session_metrics, SessionMetrics)
assert session_metrics.input_tokens > 0
assert session_metrics.total_tokens > 0
assert isinstance(session_metrics.details, dict)
assert len(session_metrics.details) > 0
for model_type, metrics_list in session_metrics.details.items():
assert isinstance(metrics_list, list)
for metric in metrics_list:
assert isinstance(metric, ModelMetrics)
def test_session_metrics_with_memory(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
memory_manager=MemoryManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_memory_on_run=True,
db=shared_db,
)
agent.run("My name is Charlie.")
agent.run("I live in London.")
session_metrics = agent.get_session_metrics()
assert isinstance(session_metrics, SessionMetrics)
assert session_metrics.total_tokens > 0
for model_type, metrics_list in session_metrics.details.items():
for detail in metrics_list:
assert isinstance(detail, ModelMetrics)
assert detail.id is not None
def test_session_metrics_with_eval(shared_db):
eval_hook = AgentAsJudgeEval(
name="Session Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be helpful",
scoring_strategy="binary",
)
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), post_hooks=[eval_hook], db=shared_db)
response1 = agent.run("What is 2+2?")
response2 = agent.run("What is 3+3?")
assert "eval_model" in response1.metrics.details
assert "eval_model" in response2.metrics.details
session_metrics = agent.get_session_metrics()
assert session_metrics.total_tokens > 0
@pytest.mark.asyncio
async def test_session_metrics_async(shared_db):
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db)
await agent.arun("First async run.")
await agent.arun("Second async run.")
session_metrics = agent.get_session_metrics()
assert session_metrics.total_tokens > 0
def test_session_metrics_run_independence(shared_db):
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db)
response1 = agent.run("Say hello.")
response2 = agent.run("Say goodbye.")
assert response1.metrics.total_tokens > 0
assert response2.metrics.total_tokens > 0
session_metrics = agent.get_session_metrics()
assert session_metrics.total_tokens >= response1.metrics.total_tokens + response2.metrics.total_tokens
def test_session_metrics_streaming(shared_db):
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db)
for msg in ["Stream run 1.", "Stream run 2."]:
for event in agent.run(msg, stream=True, yield_run_output=True):
if isinstance(event, RunOutput):
assert event.metrics.total_tokens > 0
session_metrics = agent.get_session_metrics()
assert session_metrics.total_tokens > 0
def test_eval_plus_memory_sync(shared_db):
eval_hook = AgentAsJudgeEval(
name="Combined Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be accurate",
scoring_strategy="binary",
)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
memory_manager=MemoryManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_memory_on_run=True,
post_hooks=[eval_hook],
db=shared_db,
)
response = agent.run("My favorite food is pizza.")
assert "model" in response.metrics.details
assert "memory_model" in response.metrics.details
assert "eval_model" in response.metrics.details
for key in ["model", "memory_model", "eval_model"]:
assert sum(metric.total_tokens for metric in response.metrics.details[key]) > 0
detail_total = sum(entry.total_tokens for entries in response.metrics.details.values() for entry in entries)
assert detail_total == response.metrics.total_tokens
@pytest.mark.asyncio
async def test_eval_plus_memory_async(shared_db):
eval_hook = AgentAsJudgeEval(
name="Async Combined",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be relevant",
scoring_strategy="binary",
)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
memory_manager=MemoryManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_memory_on_run=True,
post_hooks=[eval_hook],
db=shared_db,
)
response = await agent.arun("I speak French and English.")
assert "model" in response.metrics.details
assert "memory_model" in response.metrics.details
assert "eval_model" in response.metrics.details
def test_tools_plus_eval_sync():
eval_hook = AgentAsJudgeEval(
name="Tool Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should include the computed result",
scoring_strategy="binary",
)
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), tools=[add], post_hooks=[eval_hook])
response = agent.run("What is 7 + 8?")
assert "model" in response.metrics.details
assert "eval_model" in response.metrics.details
# Same (provider, id) accumulates into a single entry
assert len(response.metrics.details["model"]) >= 1
assert response.metrics.details["model"][0].total_tokens > 0
def test_all_three_combined(shared_db):
eval_hook = AgentAsJudgeEval(
name="Full Combo",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be correct",
scoring_strategy="binary",
)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
memory_manager=MemoryManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_memory_on_run=True,
tools=[add],
post_hooks=[eval_hook],
db=shared_db,
)
response = agent.run("Add 100 and 200. Remember the answer for me.")
for key in ["model", "memory_model", "eval_model"]:
assert key in response.metrics.details
assert sum(metric.total_tokens for metric in response.metrics.details[key]) > 0
def test_culture_plus_eval_sync(shared_db):
eval_hook = AgentAsJudgeEval(
name="Culture Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should acknowledge the cultural practice",
scoring_strategy="binary",
)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
culture_manager=CultureManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_cultural_knowledge=True,
post_hooks=[eval_hook],
db=shared_db,
)
response = agent.run("We always write unit tests before merging code.")
assert "culture_model" in response.metrics.details
assert "eval_model" in response.metrics.details
def test_culture_plus_memory_sync(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
culture_manager=CultureManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_cultural_knowledge=True,
memory_manager=MemoryManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_memory_on_run=True,
db=shared_db,
)
response = agent.run("My name is Eve. Our team uses pair programming.")
assert "culture_model" in response.metrics.details
assert "memory_model" in response.metrics.details
def test_multi_run_memory_session(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
memory_manager=MemoryManager(model=OpenAIChat(id="gpt-4o-mini"), db=shared_db),
update_memory_on_run=True,
db=shared_db,
)
runs_tokens = []
for msg in ["I am Dave.", "I work at Apple.", "I like hiking."]:
response = agent.run(msg)
runs_tokens.append(response.metrics.total_tokens)
assert "memory_model" in response.metrics.details
session_metrics = agent.get_session_metrics()
assert session_metrics.total_tokens >= sum(runs_tokens)
def test_multi_turn_metrics_independence():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"))
tokens = []
for i in range(3):
response = agent.run(f"Say the number {i}.")
tokens.append(response.metrics.total_tokens)
if response.metrics.details and "model" in response.metrics.details:
assert len(response.metrics.details["model"]) == 1
assert max(tokens) < min(tokens) * 5
def test_streaming_metrics():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"))
final = None
for event in agent.run("Tell me a joke.", stream=True, yield_run_output=True):
if isinstance(event, RunOutput):
final = event
assert final is not None
assert final.metrics.total_tokens > 0
assert "model" in final.metrics.details
def test_no_eval_key_without_eval():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"))
response = agent.run("Hello")
assert "model" in response.metrics.details
assert "eval_model" not in response.metrics.details
def test_no_memory_key_without_memory():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"))
response = agent.run("Hello")
assert "memory_model" not in response.metrics.details
def test_no_culture_key_without_culture():
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"))
response = agent.run("Hello")
assert "culture_model" not in response.metrics.details
def test_eval_duration_tracked():
eval_hook = AgentAsJudgeEval(
name="Duration Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be factually correct",
scoring_strategy="binary",
)
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), post_hooks=[eval_hook])
response = agent.run("What is the capital of France?")
assert response.metrics.additional_metrics is not None
assert "eval_duration" in response.metrics.additional_metrics
assert response.metrics.additional_metrics["eval_duration"] > 0
def test_detail_keys_reset_between_runs():
eval_hook = AgentAsJudgeEval(
name="Reset Test",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be correct",
scoring_strategy="binary",
)
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), post_hooks=[eval_hook])
agent.run("What is 1+1?")
response2 = agent.run("What is 2+2?")
assert sum(metric.total_tokens for metric in response2.metrics.details["eval_model"]) > 0
assert len(response2.metrics.details["eval_model"]) == 1
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_metrics.py",
"license": "Apache License 2.0",
"lines": 605,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/utils/models/llama.py | from typing import Any, Dict
from agno.agent import Message
from agno.utils.log import log_warning
from agno.utils.openai import process_image
ROLE_MAP = {
"user": "user",
"assistant": "assistant",
"system": "system",
"tool": "tool",
}
TOOL_CALL_ROLE_MAP = {
"user": "user",
"assistant": "assistant",
"system": "user",
"tool": "tool",
}
def format_message(
message: Message, openai_like: bool = False, tool_calls: bool = False, compress_tool_results: bool = False
) -> Dict[str, Any]:
"""
Format a message into the format expected by Llama API.
Args:
message (Message): The message to format.
openai_like (bool): Whether to format the message as an OpenAI-like message.
tool_calls (bool): Whether tool calls are present.
compress_tool_results: Whether to compress tool results.
Returns:
Dict[str, Any]: The formatted message.
"""
message_dict: Dict[str, Any] = {
"role": ROLE_MAP[message.role] if not tool_calls else TOOL_CALL_ROLE_MAP[message.role],
"content": [{"type": "text", "text": message.content or " "}],
"name": message.name,
"tool_call_id": message.tool_call_id,
"tool_calls": message.tool_calls,
}
message_dict = {k: v for k, v in message_dict.items() if v is not None}
if message.images is not None and len(message.images) > 0:
for image in message.images:
image_payload = process_image(image)
if image_payload:
message_dict["content"].append(image_payload)
if message.videos is not None and len(message.videos) > 0:
log_warning("Video input is currently unsupported.")
if message.audio is not None and len(message.audio) > 0:
log_warning("Audio input is currently unsupported.")
if message.role == "tool":
# Use compressed content if compression is active
content = message.get_content(use_compressed_content=compress_tool_results)
message_dict = {
"role": "tool",
"tool_call_id": message.tool_call_id,
"content": content,
}
if message.role == "assistant":
text_content = {"type": "text", "text": message.content or " "}
if message.tool_calls is not None and len(message.tool_calls) > 0:
message_dict = {
"content": [text_content] if openai_like else text_content,
"role": "assistant",
"tool_calls": message.tool_calls,
"stop_reason": "tool_calls",
}
else:
message_dict = {
"role": "assistant",
"content": [text_content] if openai_like else text_content,
"stop_reason": "stop",
}
return message_dict
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/models/llama.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/models/meta/llama/test_basic.py | import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent, RunOutput
from agno.db.sqlite import SqliteDb
from agno.models.meta.llama import Llama
@pytest.fixture(scope="module")
def llama_model():
"""Fixture that provides a Llama model and reuses it across all tests in the module."""
return Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8")
def _assert_metrics(response: RunOutput):
assert response.metrics is not None
input_tokens = response.metrics.input_tokens
output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
assert input_tokens > 0
assert output_tokens > 0
assert total_tokens > 0
assert total_tokens == input_tokens + output_tokens
def test_basic(llama_model):
agent = Agent(model=llama_model, markdown=True, telemetry=False)
response: RunOutput = agent.run("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
def test_basic_stream(llama_model):
agent = Agent(model=llama_model, markdown=True, telemetry=False)
response_stream = agent.run("Share a 2 sentence horror story", stream=True)
# Verify it's an iterator
assert hasattr(response_stream, "__iter__")
responses = list(response_stream)
assert len(responses) > 0
for response in responses:
assert response.content is not None
@pytest.mark.asyncio
async def test_async_basic(llama_model):
agent = Agent(model=llama_model, markdown=True, telemetry=False)
response = await agent.arun("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
@pytest.mark.asyncio
async def test_async_basic_stream(llama_model):
agent = Agent(model=llama_model, markdown=True, telemetry=False)
async for response in agent.arun("Share a 2 sentence horror story", stream=True):
assert response.content is not None
def test_with_memory(llama_model):
agent = Agent(
db=SqliteDb(db_file="tmp/test_with_memory.db"),
model=llama_model,
add_history_to_context=True,
num_history_runs=5,
markdown=True,
telemetry=False,
)
# First interaction
response1 = agent.run("My name is John Smith")
assert response1.content is not None
# Second interaction should remember the name
response2 = agent.run("What's my name?")
assert response2.content is not None
assert "John Smith" in response2.content # type: ignore
# Verify memories were created
messages = agent.get_session_messages()
assert len(messages) == 5
assert [m.role for m in messages] == ["system", "user", "assistant", "user", "assistant"]
# Test metrics structure and types
_assert_metrics(response2)
def test_structured_output(llama_model):
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=llama_model,
output_schema=MovieScript,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_json_response_mode(llama_model):
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=llama_model,
output_schema=MovieScript,
use_json_mode=True,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_history(llama_model):
agent = Agent(
model=llama_model,
db=SqliteDb(db_file="tmp/meta/llama/test_basic.db"),
add_history_to_context=True,
store_history_messages=True,
telemetry=False,
)
run_output = agent.run("Hello")
assert run_output.messages is not None
assert len(run_output.messages) == 2
run_output = agent.run("Hello 2")
assert run_output.messages is not None
assert len(run_output.messages) == 4
run_output = agent.run("Hello 3")
assert run_output.messages is not None
assert len(run_output.messages) == 6
run_output = agent.run("Hello 4")
assert run_output.messages is not None
assert len(run_output.messages) == 8
def test_client_persistence(llama_model):
"""Test that the same Llama client instance is reused across multiple calls"""
agent = Agent(model=llama_model, markdown=True, telemetry=False)
# First call should create a new client
agent.run("Hello")
first_client = llama_model.client
assert first_client is not None
# Second call should reuse the same client
agent.run("Hello again")
second_client = llama_model.client
assert second_client is not None
assert first_client is second_client, "Client should be persisted and reused"
# Third call should also reuse the same client
agent.run("Hello once more")
third_client = llama_model.client
assert third_client is not None
assert first_client is third_client, "Client should still be the same instance"
@pytest.mark.asyncio
async def test_async_client_persistence(llama_model):
"""Test that the same async Llama client instance is reused across multiple calls"""
agent = Agent(model=llama_model, markdown=True, telemetry=False)
# First call should create a new async client
await agent.arun("Hello")
first_client = llama_model.async_client
assert first_client is not None
# Second call should reuse the same async client
await agent.arun("Hello again")
second_client = llama_model.async_client
assert second_client is not None
assert first_client is second_client, "Async client should be persisted and reused"
# Third call should also reuse the same async client
await agent.arun("Hello once more")
third_client = llama_model.async_client
assert third_client is not None
assert first_client is third_client, "Async client should still be the same instance"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/meta/llama/test_basic.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/meta/llama/test_multimodal.py | from pathlib import Path
from agno.agent.agent import Agent
from agno.media import Image
from agno.models.meta import Llama
image_path = Path(__file__).parent.parent.parent.joinpath("sample_image.jpg")
def test_image_input_file():
agent = Agent(model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"), markdown=True, telemetry=False)
response = agent.run(
"Tell me about this image?",
images=[Image(filepath=image_path)],
)
assert response.content is not None
assert "golden" in response.content.lower()
def test_image_input_bytes():
agent = Agent(model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"), markdown=True, telemetry=False)
image_bytes = image_path.read_bytes()
response = agent.run(
"Tell me about this image?",
images=[Image(content=image_bytes)],
)
assert response.content is not None
assert "golden" in response.content.lower()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/meta/llama/test_multimodal.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/meta/llama/test_tool_use.py | from typing import Optional
import pytest
from agno.agent import Agent, RunOutput # noqa
from agno.models.meta import Llama
from agno.tools.websearch import WebSearchTools
from agno.tools.yfinance import YFinanceTools
def test_tool_use():
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
response = agent.run("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "TSLA" in response.content
def test_tool_use_stream():
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
response_stream = agent.run("What is the current price of TSLA?", stream=True, stream_events=True)
responses = []
tool_call_seen = False
for chunk in response_stream:
responses.append(chunk)
# Check for ToolCallStartedEvent or ToolCallCompletedEvent
if chunk.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(chunk, "tool") and chunk.tool:
if chunk.tool.tool_name: # type: ignore
tool_call_seen = True
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
assert any("TSLA" in r.content for r in responses if r.content)
@pytest.mark.asyncio
async def test_async_tool_use():
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
response = await agent.arun("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.role == "assistant" and msg.tool_calls is not None)
assert response.content is not None
assert "TSLA" in response.content
@pytest.mark.asyncio
async def test_async_tool_use_stream():
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
async for chunk in agent.arun("What is the current price of TSLA?", stream=True, stream_events=True):
if chunk.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(chunk, "tool") and chunk.tool:
if chunk.tool.tool_name: # type: ignore
tool_call_seen = True
if chunk.content is not None and "TSLA" in chunk.content:
keyword_seen_in_response = True
assert tool_call_seen, "No tool calls observed in stream"
assert keyword_seen_in_response, "Keyword not found in response"
def test_tool_use_with_content():
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
response = agent.run("What is the current price of TSLA? What does the ticker stand for?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "TSLA" in response.content
assert "Tesla" in response.content
def test_parallel_tool_calls():
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
response = agent.run("What is the current price of TSLA and AAPL?")
# Verify tool usage
assert response.messages is not None
tool_calls = []
for msg in response.messages:
if msg.tool_calls is not None:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2 # Total of 2 tool calls made
assert response.content is not None
assert "TSLA" in response.content and "AAPL" in response.content
def test_multiple_tool_calls():
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True), WebSearchTools(cache_results=True)],
telemetry=False,
)
response = agent.run("What is the current price of TSLA and what is the latest news about it?")
# Verify tool usage
assert response.messages is not None
tool_calls = []
for msg in response.messages:
if msg.tool_calls is not None:
tool_calls.extend(msg.tool_calls)
assert response.content is not None
assert "TSLA" in response.content
def test_tool_call_custom_tool_no_parameters():
def get_the_weather_in_tokyo():
"""
Get the weather in Tokyo
"""
return "It is currently 70 degrees and cloudy in Tokyo"
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[get_the_weather_in_tokyo],
telemetry=False,
)
response = agent.run("What is the weather in Tokyo? Use the tool get_the_weather_in_tokyo")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "Tokyo" in response.content
def test_tool_call_custom_tool_optional_parameters():
def get_the_weather(city: Optional[str] = None):
"""
Get the weather in a city
Args:
city: The city to get the weather for
"""
if city is None:
return "It is currently 70 degrees and cloudy in Tokyo"
else:
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[get_the_weather],
telemetry=False,
)
response = agent.run("What is the weather in Paris?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "70" in response.content
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/meta/llama/test_tool_use.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/meta/llama_openai/test_basic.py | import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent, RunOutput
from agno.db.sqlite import SqliteDb
from agno.models.meta.llama_openai import LlamaOpenAI
@pytest.fixture(scope="module")
def llama_openai_model():
"""Fixture that provides a LlamaOpenAI model and reuses it across all tests in the module."""
return LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8")
def _assert_metrics(response: RunOutput):
assert response.metrics is not None
input_tokens = response.metrics.input_tokens
output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
assert input_tokens > 0
assert output_tokens > 0
assert total_tokens > 0
assert total_tokens == input_tokens + output_tokens
def test_basic(llama_openai_model):
agent = Agent(model=llama_openai_model, markdown=True, telemetry=False)
response: RunOutput = agent.run("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
def test_basic_stream(llama_openai_model):
agent = Agent(model=llama_openai_model, markdown=True, telemetry=False)
for response in agent.run("Share a 2 sentence horror story", stream=True):
assert response.content is not None
@pytest.mark.asyncio
async def test_async_basic(llama_openai_model):
agent = Agent(model=llama_openai_model, markdown=True, telemetry=False)
response = await agent.arun("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
@pytest.mark.asyncio
async def test_async_basic_stream(llama_openai_model):
agent = Agent(model=llama_openai_model, markdown=True, telemetry=False)
async for response in agent.arun("Share a 2 sentence horror story", stream=True):
assert response.content is not None
def test_with_memory(llama_openai_model):
agent = Agent(
db=SqliteDb(db_file="tmp/test_with_memory.db"),
model=llama_openai_model,
add_history_to_context=True,
num_history_runs=5,
markdown=True,
telemetry=False,
)
# First interaction
response1 = agent.run("My name is John Smith")
assert response1.content is not None
# Second interaction should remember the name
response2 = agent.run("What's my name?")
assert response2.content is not None
assert "John Smith" in response2.content # type: ignore
# Verify memories were created
messages = agent.get_session_messages()
assert len(messages) == 5
assert [m.role for m in messages] == ["system", "user", "assistant", "user", "assistant"]
# Test metrics structure and types
_assert_metrics(response2)
def test_structured_output(llama_openai_model):
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=llama_openai_model,
output_schema=MovieScript,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_json_response_mode(llama_openai_model):
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=llama_openai_model,
output_schema=MovieScript,
use_json_mode=True,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_history(llama_openai_model):
agent = Agent(
model=llama_openai_model,
db=SqliteDb(db_file="tmp/meta/llama_openai/test_basic.db"),
add_history_to_context=True,
store_history_messages=True,
telemetry=False,
)
run_output = agent.run("Hello")
assert run_output.messages is not None
assert len(run_output.messages) == 2
run_output = agent.run("Hello 2")
assert run_output.messages is not None
assert len(run_output.messages) == 4
run_output = agent.run("Hello 3")
assert run_output.messages is not None
assert len(run_output.messages) == 6
run_output = agent.run("Hello 4")
assert run_output.messages is not None
assert len(run_output.messages) == 8
def test_client_persistence(llama_openai_model):
"""Test that the same LlamaOpenAI client instance is reused across multiple calls"""
agent = Agent(model=llama_openai_model, markdown=True, telemetry=False)
# First call should create a new client
agent.run("Hello")
first_client = llama_openai_model.client
assert first_client is not None
# Second call should reuse the same client
agent.run("Hello again")
second_client = llama_openai_model.client
assert second_client is not None
assert first_client is second_client, "Client should be persisted and reused"
# Third call should also reuse the same client
agent.run("Hello once more")
third_client = llama_openai_model.client
assert third_client is not None
assert first_client is third_client, "Client should still be the same instance"
@pytest.mark.asyncio
async def test_async_client_persistence(llama_openai_model):
"""Test that the same async LlamaOpenAI client instance is reused across multiple calls"""
agent = Agent(model=llama_openai_model, markdown=True, telemetry=False)
# First call should create a new async client
await agent.arun("Hello")
first_client = llama_openai_model.async_client
assert first_client is not None
# Second call should reuse the same async client
await agent.arun("Hello again")
second_client = llama_openai_model.async_client
assert second_client is not None
assert first_client is second_client, "Async client should be persisted and reused"
# Third call should also reuse the same async client
await agent.arun("Hello once more")
third_client = llama_openai_model.async_client
assert third_client is not None
assert first_client is third_client, "Async client should still be the same instance"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/meta/llama_openai/test_basic.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/meta/llama_openai/test_multimodal.py | from pathlib import Path
from agno.agent.agent import Agent
from agno.media import Image
from agno.models.meta import LlamaOpenAI
from agno.utils.media import download_image
image_path = Path(__file__).parent.joinpath("sample.jpg")
download_image(
url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg",
output_path=str(image_path),
)
def test_image_input_file():
agent = Agent(model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"), markdown=True, telemetry=False)
response = agent.run(
"Tell me about this image?",
images=[Image(filepath=image_path)],
)
assert response.content is not None
assert "golden" in response.content.lower()
assert "bridge" in response.content.lower()
def test_image_input_bytes():
agent = Agent(model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"), markdown=True, telemetry=False)
image_bytes = image_path.read_bytes()
response = agent.run(
"Tell me about this image?",
images=[Image(content=image_bytes)],
)
assert response.content is not None
assert "golden" in response.content.lower()
assert "bridge" in response.content.lower()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/meta/llama_openai/test_multimodal.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/meta/llama_openai/test_tool_use.py | from typing import Optional
import pytest
from agno.agent import Agent, RunOutput # noqa
from agno.models.meta import LlamaOpenAI
from agno.tools.websearch import WebSearchTools
from agno.tools.yfinance import YFinanceTools
def test_tool_use():
agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
response = agent.run("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "TSLA" in response.content
def test_tool_use_stream():
agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
for chunk in agent.run("What is the current price of TSLA?", stream=True, stream_events=True):
if chunk.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(chunk, "tool") and chunk.tool: # type: ignore
if chunk.tool.tool_name: # type: ignore
tool_call_seen = True
if chunk.content is not None and "TSLA" in chunk.content:
keyword_seen_in_response = True
assert tool_call_seen, "No tool calls observed in stream"
assert keyword_seen_in_response, "Keyword not found in response"
@pytest.mark.asyncio
async def test_async_tool_use():
agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
response = await agent.arun("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.role == "assistant" and msg.tool_calls is not None)
assert response.content is not None
assert "TSLA" in response.content
@pytest.mark.asyncio
async def test_async_tool_use_stream():
agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
async for chunk in agent.arun("What is the current price of TSLA?", stream=True, stream_events=True):
if chunk.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(chunk, "tool") and chunk.tool: # type: ignore
if chunk.tool.tool_name: # type: ignore
tool_call_seen = True
if chunk.content is not None and "TSLA" in chunk.content:
keyword_seen_in_response = True
assert tool_call_seen, "No tool calls observed in stream"
assert keyword_seen_in_response, "Keyword not found in response"
def test_tool_use_with_content():
agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
response = agent.run("What is the current price of TSLA? What does the ticker stand for?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "TSLA" in response.content
assert "Tesla" in response.content
def test_parallel_tool_calls():
agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
response = agent.run("What is the current price of TSLA and AAPL?")
# Verify tool usage
assert response.messages is not None
tool_calls = []
for msg in response.messages:
if msg.tool_calls is not None:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2 # Total of 2 tool calls made
assert response.content is not None
assert "TSLA" in response.content and "AAPL" in response.content
def test_multiple_tool_calls():
agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools(cache_results=True), WebSearchTools(cache_results=True)],
telemetry=False,
)
response = agent.run("What is the current price of TSLA and what is the latest news about it?")
# Verify tool usage
assert response.messages is not None
tool_calls = []
for msg in response.messages:
if msg.tool_calls is not None:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2 # Total of 2 tool calls made
assert response.content is not None
assert "TSLA" in response.content
def test_tool_call_custom_tool_no_parameters():
def get_the_weather_in_tokyo():
"""
Get the weather in Tokyo
"""
return "It is currently 70 degrees and cloudy in Tokyo"
agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[get_the_weather_in_tokyo],
telemetry=False,
)
response = agent.run("What is the weather in Tokyo? Use the tool get_the_weather_in_tokyo")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "Tokyo" in response.content
def test_tool_call_custom_tool_optional_parameters():
def get_the_weather(city: Optional[str] = None):
"""
Get the weather in a city
Args:
city: The city to get the weather for
"""
if city is None:
return "It is currently 70 degrees and cloudy in Tokyo"
else:
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[get_the_weather],
telemetry=False,
)
response = agent.run("What is the weather in Paris?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "70" in response.content
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/meta/llama_openai/test_tool_use.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/models/meta/llama.py | from collections.abc import AsyncIterator
from dataclasses import dataclass
from os import getenv
from typing import Any, Dict, Iterator, List, Optional, Type, Union
import httpx
from pydantic import BaseModel
from agno.exceptions import ModelProviderError
from agno.models.base import Model
from agno.models.message import Message
from agno.models.metrics import MessageMetrics
from agno.models.response import ModelResponse
from agno.run.agent import RunOutput
from agno.utils.http import get_default_async_client, get_default_sync_client
from agno.utils.log import log_debug, log_error, log_warning
from agno.utils.models.llama import format_message
try:
from llama_api_client import AsyncLlamaAPIClient, LlamaAPIClient
from llama_api_client.types.create_chat_completion_response import CreateChatCompletionResponse, Metric
from llama_api_client.types.create_chat_completion_response_stream_chunk import (
CreateChatCompletionResponseStreamChunk,
EventDeltaTextDelta,
EventDeltaToolCallDelta,
EventDeltaToolCallDeltaFunction,
EventMetric,
)
from llama_api_client.types.message_text_content_item import MessageTextContentItem
except ImportError:
raise ImportError("`llama-api-client` not installed. Please install using `pip install llama-api-client`")
@dataclass
class Llama(Model):
"""
A class for interacting with Llama models using the Llama API using the Llama SDK.
"""
id: str = "Llama-4-Maverick-17B-128E-Instruct-FP8"
name: str = "Llama"
provider: str = "Llama"
supports_native_structured_outputs: bool = False
supports_json_schema_outputs: bool = True
# Request parameters
max_completion_tokens: Optional[int] = None
repetition_penalty: Optional[float] = None
temperature: Optional[float] = None
top_p: Optional[float] = None
top_k: Optional[int] = None
extra_headers: Optional[Any] = None
extra_query: Optional[Any] = None
extra_body: Optional[Any] = None
request_params: Optional[Dict[str, Any]] = None
# Client parameters
api_key: Optional[str] = None
base_url: Optional[Union[str, httpx.URL]] = None
timeout: Optional[float] = None
max_retries: Optional[int] = None
default_headers: Optional[Any] = None
default_query: Optional[Any] = None
http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
client_params: Optional[Dict[str, Any]] = None
# OpenAI clients
client: Optional[LlamaAPIClient] = None
async_client: Optional[AsyncLlamaAPIClient] = None
def _get_client_params(self) -> Dict[str, Any]:
# Fetch API key from env if not already set
if not self.api_key:
self.api_key = getenv("LLAMA_API_KEY")
if not self.api_key:
log_error("LLAMA_API_KEY not set. Please set the LLAMA_API_KEY environment variable.")
# Define base client params
base_params = {
"api_key": self.api_key,
"base_url": self.base_url,
"timeout": self.timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add additional client params if provided
if self.client_params:
client_params.update(self.client_params)
return client_params
def get_client(self) -> LlamaAPIClient:
"""
Returns a Llama client.
Returns:
LlamaAPIClient: An instance of the Llama client.
"""
if self.client and not self.client.is_closed():
return self.client
client_params: Dict[str, Any] = self._get_client_params()
if self.http_client:
if isinstance(self.http_client, httpx.Client):
client_params["http_client"] = self.http_client
else:
log_warning("http_client is not an instance of httpx.Client. Using default global httpx.Client.")
# Use global sync client when user http_client is invalid
client_params["http_client"] = get_default_sync_client()
else:
# Use global sync client when no custom http_client is provided
client_params["http_client"] = get_default_sync_client()
self.client = LlamaAPIClient(**client_params)
return self.client
def get_async_client(self) -> AsyncLlamaAPIClient:
"""
Returns an asynchronous Llama client.
Returns:
AsyncLlamaAPIClient: An instance of the asynchronous Llama client.
"""
if self.async_client and not self.async_client.is_closed():
return self.async_client
client_params: Dict[str, Any] = self._get_client_params()
if self.http_client:
if isinstance(self.http_client, httpx.AsyncClient):
client_params["http_client"] = self.http_client
else:
log_warning(
"http_client is not an instance of httpx.AsyncClient. Using default global httpx.AsyncClient."
)
# Use global async client when user http_client is invalid
client_params["http_client"] = get_default_async_client()
else:
# Use global async client when no custom http_client is provided
client_params["http_client"] = get_default_async_client()
# Create and cache the client
self.async_client = AsyncLlamaAPIClient(**client_params)
return self.async_client
def get_request_params(
self,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
) -> Dict[str, Any]:
"""
Returns keyword arguments for API requests.
"""
# Define base request parameters
base_params = {
"max_completion_tokens": self.max_completion_tokens,
"repetition_penalty": self.repetition_penalty,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"extra_headers": self.extra_headers,
"extra_query": self.extra_query,
"extra_body": self.extra_body,
"request_params": self.request_params,
}
# Filter out None values
request_params = {k: v for k, v in base_params.items() if v is not None}
# Add tools
if tools is not None and len(tools) > 0:
request_params["tools"] = tools
if response_format is not None:
request_params["response_format"] = response_format
# Add additional request params if provided
if self.request_params:
request_params.update(self.request_params)
if request_params:
log_debug(f"Calling {self.provider} with request parameters: {request_params}", log_level=2)
return request_params
def to_dict(self) -> Dict[str, Any]:
"""
Convert the model to a dictionary.
Returns:
Dict[str, Any]: The dictionary representation of the model.
"""
model_dict = super().to_dict()
model_dict.update(
{
"max_completion_tokens": self.max_completion_tokens,
"repetition_penalty": self.repetition_penalty,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"extra_headers": self.extra_headers,
"extra_query": self.extra_query,
"extra_body": self.extra_body,
"request_params": self.request_params,
}
)
cleaned_dict = {k: v for k, v in model_dict.items() if v is not None}
return cleaned_dict
def invoke(
self,
messages: List[Message],
assistant_message: Message,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
run_response: Optional[RunOutput] = None,
compress_tool_results: bool = False,
) -> ModelResponse:
"""
Send a chat completion request to the Llama API.
"""
assistant_message.metrics.start_timer()
provider_response = self.get_client().chat.completions.create(
model=self.id,
messages=[
format_message(m, tool_calls=bool(tools), compress_tool_results=compress_tool_results) # type: ignore
for m in messages
],
**self.get_request_params(tools=tools, response_format=response_format),
)
assistant_message.metrics.stop_timer()
model_response = self._parse_provider_response(provider_response, response_format=response_format)
return model_response
async def ainvoke(
self,
messages: List[Message],
assistant_message: Message,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
run_response: Optional[RunOutput] = None,
compress_tool_results: bool = False,
) -> ModelResponse:
"""
Sends an asynchronous chat completion request to the Llama API.
"""
assistant_message.metrics.start_timer()
provider_response = await self.get_async_client().chat.completions.create(
model=self.id,
messages=[
format_message(m, tool_calls=bool(tools), compress_tool_results=compress_tool_results) # type: ignore
for m in messages
],
**self.get_request_params(tools=tools, response_format=response_format),
)
assistant_message.metrics.stop_timer()
model_response = self._parse_provider_response(provider_response, response_format=response_format)
return model_response
def invoke_stream(
self,
messages: List[Message],
assistant_message: Message,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
run_response: Optional[RunOutput] = None,
compress_tool_results: bool = False,
) -> Iterator[ModelResponse]:
"""
Send a streaming chat completion request to the Llama API.
"""
try:
assistant_message.metrics.start_timer()
for chunk in self.get_client().chat.completions.create(
model=self.id,
messages=[
format_message(m, tool_calls=bool(tools), compress_tool_results=compress_tool_results) # type: ignore
for m in messages
],
stream=True,
**self.get_request_params(tools=tools, response_format=response_format),
):
yield self._parse_provider_response_delta(chunk) # type: ignore
assistant_message.metrics.stop_timer()
except Exception as e:
log_error(f"Error from Llama API: {e}")
raise ModelProviderError(message=str(e), model_name=self.name, model_id=self.id) from e
async def ainvoke_stream(
self,
messages: List[Message],
assistant_message: Message,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
run_response: Optional[RunOutput] = None,
compress_tool_results: bool = False,
) -> AsyncIterator[ModelResponse]:
"""
Sends an asynchronous streaming chat completion request to the Llama API.
"""
assistant_message.metrics.start_timer()
try:
async for chunk in await self.get_async_client().chat.completions.create(
model=self.id,
messages=[
format_message(m, tool_calls=bool(tools), compress_tool_results=compress_tool_results) # type: ignore
for m in messages
],
stream=True,
**self.get_request_params(tools=tools, response_format=response_format),
):
yield self._parse_provider_response_delta(chunk) # type: ignore
assistant_message.metrics.stop_timer()
except Exception as e:
log_error(f"Error from Llama API: {e}")
raise ModelProviderError(message=str(e), model_name=self.name, model_id=self.id) from e
def parse_tool_calls(self, tool_calls_data: List[EventDeltaToolCallDeltaFunction]) -> List[Dict[str, Any]]:
"""
Parse the tool calls from the Llama API.
Args:
tool_calls_data (List[Tuple[str, Any]]): The tool calls data.
Returns:
List[Dict[str, Any]]: The parsed tool calls.
"""
tool_calls: List[Dict[str, Any]] = []
_tool_call_id: Optional[str] = None
_function_name_parts: List[str] = []
_function_arguments_parts: List[str] = []
def _create_tool_call():
nonlocal _tool_call_id
if _tool_call_id and (_function_name_parts or _function_arguments_parts):
tool_calls.append(
{
"id": _tool_call_id,
"type": "function",
"function": {
"name": "".join(_function_name_parts),
"arguments": "".join(_function_arguments_parts),
},
}
)
_tool_call_id = None
_function_name_parts.clear()
_function_arguments_parts.clear()
for _field, _value in tool_calls_data:
if _field == "function" and isinstance(_value, EventDeltaToolCallDeltaFunction):
if _value.name and (_tool_call_id or _function_name_parts or _function_arguments_parts):
_create_tool_call()
if _value.name:
_function_name_parts.append(_value.name)
if _value.arguments:
_function_arguments_parts.append(_value.arguments)
elif _field == "id":
if _value and _tool_call_id:
_create_tool_call()
if _value:
_tool_call_id = _value # type: ignore
_create_tool_call()
return tool_calls
def _parse_provider_response(self, response: CreateChatCompletionResponse, **kwargs) -> ModelResponse:
"""
Parse the Llama response into a ModelResponse.
Args:
response: Response from invoke() method
Returns:
ModelResponse: Parsed response data
"""
model_response = ModelResponse()
# Get response message
response_message = response.completion_message
# Add role
if response_message.role is not None:
model_response.role = response_message.role
# Add content
if response_message.content is not None:
if isinstance(response_message.content, MessageTextContentItem):
model_response.content = response_message.content.text
else:
model_response.content = response_message.content
# Add tool calls
if response_message.tool_calls is not None and len(response_message.tool_calls) > 0:
try:
for tool_call in response_message.tool_calls:
tool_name = tool_call.function.name
tool_input = tool_call.function.arguments
function_def = {"name": tool_name}
if tool_input:
function_def["arguments"] = tool_input
model_response.tool_calls.append(
{
"id": tool_call.id,
"type": "function",
"function": function_def,
}
)
except Exception as e:
log_warning(f"Error processing tool calls: {e}")
# Add metrics from the metrics list
if hasattr(response, "metrics") and response.metrics is not None:
model_response.response_usage = self._get_metrics(response.metrics)
return model_response
def _parse_provider_response_delta(
self, response: CreateChatCompletionResponseStreamChunk, **kwargs
) -> ModelResponse:
"""
Parse the Llama streaming response into a ModelResponse.
Args:
response_delta: Raw response chunk from the Llama API
Returns:
ModelResponse: Parsed response data
"""
model_response = ModelResponse()
if response is not None:
delta = response.event
# Capture metrics event
if delta.event_type == "metrics" and delta.metrics is not None:
model_response.response_usage = self._get_metrics(delta.metrics)
if isinstance(delta.delta, EventDeltaTextDelta):
model_response.content = delta.delta.text
# Add tool calls
if isinstance(delta.delta, EventDeltaToolCallDelta):
model_response.tool_calls = delta.delta # type: ignore
return model_response
def _get_metrics(self, response_usage: Union[List[Metric], List[EventMetric]]) -> MessageMetrics:
"""
Parse the given Llama usage into an Agno MessageMetrics object.
Args:
response_usage: Usage data from Llama
Returns:
MessageMetrics: Parsed metrics data
"""
metrics = MessageMetrics()
for metric in response_usage:
metrics_field = metric.metric
if metrics_field == "num_prompt_tokens":
metrics.input_tokens = int(metric.value)
elif metrics_field == "num_completion_tokens":
metrics.output_tokens = int(metric.value)
metrics.total_tokens = metrics.input_tokens + metrics.output_tokens
return metrics
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/meta/llama.py",
"license": "Apache License 2.0",
"lines": 413,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/models/meta/llama_openai.py | from dataclasses import dataclass
from os import getenv
from typing import Any, Dict, Optional
try:
from openai import AsyncOpenAI as AsyncOpenAIClient
except ImportError:
raise ImportError("`openai` not installed. Please install using `pip install openai`")
from agno.exceptions import ModelAuthenticationError
from agno.models.meta.llama import Message
from agno.models.openai.like import OpenAILike
from agno.utils.models.llama import format_message
@dataclass
class LlamaOpenAI(OpenAILike):
"""
Class for interacting with the Llama API via OpenAI-like interface.
Attributes:
id (str): The ID of the language model.
name (str): The name of the API.
provider (str): The provider of the API.
api_key (Optional[str]): The API key for the xAI API.
base_url (Optional[str]): The base URL for the xAI API.
"""
id: str = "Llama-4-Maverick-17B-128E-Instruct-FP8"
name: str = "LlamaOpenAI"
provider: str = "LlamaOpenAI"
api_key: Optional[str] = None
base_url: Optional[str] = "https://api.llama.com/compat/v1/"
# Request parameters
max_completion_tokens: Optional[int] = None
repetition_penalty: Optional[float] = None
temperature: Optional[float] = None
top_p: Optional[float] = None
top_k: Optional[int] = None
extra_headers: Optional[Any] = None
extra_query: Optional[Any] = None
extra_body: Optional[Any] = None
request_params: Optional[Dict[str, Any]] = None
supports_native_structured_outputs: bool = False
supports_json_schema_outputs: bool = True
# Cached async client
openai_async_client: Optional[AsyncOpenAIClient] = None
def _get_client_params(self) -> Dict[str, Any]:
"""
Returns client parameters for API requests, checking for LLAMA_API_KEY.
Returns:
Dict[str, Any]: A dictionary of client parameters for API requests.
"""
if not self.api_key:
self.api_key = getenv("LLAMA_API_KEY")
if not self.api_key:
raise ModelAuthenticationError(
message="LLAMA_API_KEY not set. Please set the LLAMA_API_KEY environment variable.",
model_name=self.name,
)
return super()._get_client_params()
def _format_message(self, message: Message) -> Dict[str, Any]:
"""
Format a message into the format expected by Llama API.
Args:
message (Message): The message to format.
Returns:
Dict[str, Any]: The formatted message.
"""
return format_message(message, openai_like=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/meta/llama_openai.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ansible/ansible:lib/ansible/_internal/_powershell/_clixml.py | """Helpers for PowerShell's CLIXML data"""
from __future__ import annotations
import base64
import re
import xml.etree.ElementTree as ET
# This is weird, we are matching on byte sequences that match the utf-16-be
# matches for '_x(a-fA-F0-9){4}_'. The \x00 and {4} will match the hex sequence
# when it is encoded as utf-16-be byte sequence.
_STRING_DESERIAL_FIND = re.compile(rb"\x00_\x00x((?:\x00[a-fA-F0-9]){4})\x00_")
# Finds _x in a case insensitive way, _x is the escape sequence for a CLIXML
# str so needs to be escaped first.
_STRING_SERIAL_ESCAPE_ESCAPE = re.compile("(?i)_(x)")
# Finds C0, C1, and surrogate pairs in a unicode string for us to encode
# according to the PSRP rules.
_STRING_SERIAL_ESCAPE = re.compile("[\u0000-\u001f\u007f-\u009f\ud800-\ud8ff\udc00-\udfff\U00010000-\U0010ffff]")
def replace_stderr_clixml(stderr: bytes) -> bytes:
"""Replace CLIXML with stderr data.
Tries to replace an embedded CLIXML string with the actual stderr data. If
it fails to parse the CLIXML data, it will return the original data. This
will replace any line inside the stderr string that contains a valid CLIXML
sequence.
:param bytes stderr: The stderr to try and decode.
:returns: The stderr with the decoded CLIXML data or the original data.
"""
clixml_header = b"#< CLIXML"
# Instead of checking both patterns we just see if the next char
# is \r or \n to match both Windows and POSIX newline after the marker.
clixml_idx = stderr.find(clixml_header)
if clixml_idx == -1:
return stderr
newline_idx = clixml_idx + 9
if len(stderr) < (newline_idx + 1) or stderr[newline_idx] not in (ord(b'\r'), ord(b'\n')):
return stderr
lines: list[bytes] = []
is_clixml = False
for line in stderr.splitlines(True):
if is_clixml:
is_clixml = False
# If the line does not contain the closing CLIXML tag, we just
# add the found header line and this line without trying to parse.
end_idx = line.find(b"</Objs>")
if end_idx == -1:
lines.append(clixml_header)
lines.append(line)
continue
clixml = line[: end_idx + 7]
remaining = line[end_idx + 7 :]
# While we expect the stderr to be UTF-8 encoded, we fallback to
# the most common "ANSI" codepage used by Windows cp437 if it is
# not valid UTF-8.
try:
clixml_text = clixml.decode("utf-8")
except UnicodeDecodeError:
clixml_text = clixml.decode("cp437")
try:
decoded_clixml = extract_clixml_strings(clixml_text, stream="Error")
lines.append("".join(decoded_clixml).encode('utf-8', errors='surrogatepass'))
if remaining:
lines.append(remaining)
except Exception:
# Any errors and we just add the original CLIXML header and
# line back in.
lines.append(clixml_header)
lines.append(line)
elif line.startswith(clixml_header):
# The next line should contain the full CLIXML data.
clixml_header = line # Preserve original newlines value.
is_clixml = True
else:
lines.append(line)
# This should never happen but if there was a CLIXML header without a newline
# following it, we need to add it back.
if is_clixml:
lines.append(clixml_header)
return b"".join(lines)
def extract_clixml_strings(
data: str,
stream: str | None = None,
) -> list[str]:
"""
Takes a string that contains a CLIXML <Objs> element and extracts any
string elements within. This is a rudimentary extraction designed for
stderr CLIXML and -EncodedArguments.
:param data: The raw CLIXML string.
:param stream: The optional string to extra the data for.
:returns: A list of CLIXML strings encoded within the CLIXML string.
"""
lines: list[str] = []
# A serialized string will serialize control chars and surrogate pairs as
# _xDDDD_ values where DDDD is the hex representation of a big endian
# UTF-16 code unit. As a surrogate pair uses 2 UTF-16 code units, we need
# to operate our text replacement on the utf-16-be byte encoding of the raw
# text. This allows us to replace the _xDDDD_ values with the actual byte
# values and then decode that back to a string from the utf-16-be bytes.
def rplcr(matchobj: re.Match) -> bytes:
match_hex = matchobj.group(1)
hex_string = match_hex.decode("utf-16-be")
return base64.b16decode(hex_string.upper())
# There are some scenarios where the stderr contains a nested CLIXML element like
# '<# CLIXML\r\n<# CLIXML\r\n<Objs>...</Objs><Objs>...</Objs>'.
# Parse each individual <Objs> element and add the error strings to our stderr list.
# https://github.com/ansible/ansible/issues/69550
while data:
start_idx = data.find("<Objs ")
end_idx = data.find("</Objs>")
if start_idx == -1 or end_idx == -1:
break
end_idx += 7
current_element = data[start_idx:end_idx]
data = data[end_idx:]
clixml = ET.fromstring(current_element)
namespace_match = re.match(r'{(.*)}', clixml.tag)
namespace = f"{{{namespace_match.group(1)}}}" if namespace_match else ""
entries = clixml.findall(".//%sS" % namespace)
if not entries:
continue
# If this is a new CLIXML element, add a newline to separate the messages.
if lines:
lines.append("\r\n")
for string_entry in entries:
actual_stream = string_entry.attrib.get('S', None)
if actual_stream != stream:
continue
b_line = (string_entry.text or "").encode("utf-16-be")
b_escaped = re.sub(_STRING_DESERIAL_FIND, rplcr, b_line)
lines.append(b_escaped.decode("utf-16-be", errors="surrogatepass"))
return lines
def build_array_list_clixml(
values: list[str],
) -> str:
"""Builds a CLIXML string representing a System.Collections.ArrayList of strings."""
def rplcr(matchobj: object) -> str:
surrogate_char = matchobj.group(0)
byte_char = surrogate_char.encode("utf-16-be", errors="surrogatepass")
hex_char = base64.b16encode(byte_char).decode()
hex_split = [hex_char[i : i + 4] for i in range(0, len(hex_char), 4)]
return "".join([f"_x{i}_" for i in hex_split])
objs = ET.Element('Objs', xmlns="http://schemas.microsoft.com/powershell/2004/04", Version="1.1.0.1")
obj = ET.SubElement(objs, 'Obj', RefId="0")
tn = ET.SubElement(obj, 'TN', RefId="0")
ET.SubElement(tn, 'T').text = "System.Collections.ArrayList"
ET.SubElement(tn, 'T').text = "System.Object"
lst = ET.SubElement(obj, 'LST')
for v in values:
# Before running the translation we need to make sure that '_x' is
# escaped as '_x005F_x'. While MS-PSRP doesn't state this, the x is
# case insensitive so we need to escape both '_x' and '_X'.
v = re.sub(_STRING_SERIAL_ESCAPE_ESCAPE, r"_x005F_\1", v)
# Escape any control or codepoints that are represented as a
# surrogate pair in UTF-16.
v = re.sub(_STRING_SERIAL_ESCAPE, rplcr, v)
ET.SubElement(lst, 'S').text = v
return ET.tostring(objs, encoding='unicode')
| {
"repo_id": "ansible/ansible",
"file_path": "lib/ansible/_internal/_powershell/_clixml.py",
"license": "GNU General Public License v3.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ansible/ansible:lib/ansible/_internal/_powershell/_script.py | """Helpers for PowerShell command line and scripting arguments."""
from __future__ import annotations
import base64
import re
import shlex
from . import _clixml
# There are 5 chars that need to be escaped in a single quote.
# https://github.com/PowerShell/PowerShell/blob/b7cb335f03fe2992d0cbd61699de9d9aafa1d7c1/src/System.Management.Automation/engine/parser/CharTraits.cs#L265-L272
_PWSH_SINGLE_QUOTES = re.compile("(['\u2018\u2019\u201a\u201b])")
# These chars are a known set of chars that are safe to use as a bare command.
_PWSH_BARE_CMD = re.compile(r'^[\w\-\\\.\[\]/_~!%^:]+$')
def _get_encoded_arguments(
script: str,
args: list[str] | None = None,
) -> list[str]:
"""Builds an -EncodedCommand cmdline argument
Returns a list of arguments that can be used with PowerShell to invoke
a script with optional arguments.
:param script: The script to encode.
:param args: The string arguments to encode.
:returns: The encoded arguments string value as a base64 string.
"""
encoded_cmd = base64.b64encode(script.encode('utf-16-le')).decode()
pwsh_args = ['-EncodedCommand', encoded_cmd]
if not args:
return pwsh_args
# -EncodedCommand does not accept normal positional parameters. Instead it
# only supports extra args through an encoded CLIXML string.
clixml = _clixml.build_array_list_clixml(args)
encoded_args = base64.b64encode(clixml.encode('utf-16-le')).decode()
pwsh_args.extend(['-EncodedArguments', encoded_args])
return pwsh_args
def parse_encoded_cmdline(
cmd: str,
) -> tuple[str, list[str]] | None:
"""Parses a PowerShell encoded command line string.
Attempts to parse a command line string for the encoded command/arguments.
Will return None if not present or if the command line is not a valid for
PowerShell.
:param cmd: The command line string to parse.
:returns: A tuple of the decoded command and list of arguments or None if ill-formed.
"""
cmd_split = shlex.split(cmd)
try:
enc_cmd_idx = cmd_split.index("-EncodedCommand")
except ValueError:
enc_cmd_idx = -1
if enc_cmd_idx == -1 or len(cmd_split) <= enc_cmd_idx + 1:
return None
enc_cmd_raw = cmd_split[enc_cmd_idx + 1]
enc_cmd = base64.b64decode(enc_cmd_raw).decode('utf-16-le')
try:
enc_arg_idx = cmd_split.index("-EncodedArguments")
except ValueError:
return enc_cmd, []
if len(cmd_split) <= enc_arg_idx + 1:
# Was ill-formed so assume it's not a valid encoded pwsh cmd.
return None
clixml_raw = cmd_split[enc_arg_idx + 1]
clixml = base64.b64decode(clixml_raw).decode('utf-16-le')
cmd_args = _clixml.extract_clixml_strings(clixml)
return enc_cmd, cmd_args
def get_pwsh_encoded_cmdline(
script: str,
*,
args: list[str] | None = None,
pwsh_path: str = 'powershell',
disable_input: bool = False,
override_execution_policy: bool = False,
) -> list[str]:
"""Builds a PowerShell command line argument list.
Builds the encoded command line arguments for running the provided script.
:param script: The PowerShell script to encode.
:param args: Optional positional arguments to the script.
:param pwsh_path: The PowerShell executable path, defaults to powershell.
:param disable_input: Tells PowerShell to not read input from stdin.
:param override_execution_policy: Adds args to override the execution policy.
:returns: The command line arguments as a list.
"""
cmd_args = [pwsh_path, '-NoProfile', '-NonInteractive']
if override_execution_policy:
cmd_args.extend(['-ExecutionPolicy', 'Unrestricted'])
if disable_input:
cmd_args.extend(['-InputFormat', 'None'])
encoded_args = _get_encoded_arguments(script, args)
cmd_args.extend(encoded_args)
return cmd_args
def build_pwsh_cmd_statement(
command: str,
args: list[str] | None = None,
) -> str:
"""Builds a PowerShell command statement.
Builds a valid PowerShell command statement with optional arguments. This
statement can be used in a PowerShell script to execute a command with the
arguments provided.
:param command: The PowerShell command to execute.
:param args: The arguments to provide to the command.
:returns: The PowerShell script as a string representing this command statement.
"""
pwsh_cmd = quote_pwsh_argument(command)
if pwsh_cmd != command:
# If we quoted the cmd we need to add the call operator for pwsh
# to treat it as a command.
pwsh_cmd = f"& {pwsh_cmd}"
if args:
remaining_args = " ".join([quote_pwsh_argument(a) for a in args])
pwsh_cmd += f" {remaining_args}"
return pwsh_cmd
def quote_pwsh_argument(
value: str,
*,
force_quote: bool = False,
) -> str:
"""Quotes a value for use as a PowerShell argument.
This safely quotes the provided value for use in a PowerShell script. The
value will either be a bare keyword or a quoted string that is safe to use
in a PowerShell script.
:param value: The value to quote.
:param force_quote: Force the quotes even if the value doesn't need it.
:returns: The value that is safe to use as a PowerShell argument.
"""
if not force_quote and _PWSH_BARE_CMD.match(value):
return value
# Escaping a pwsh string for single quotes is to just double up on the
# single quote values inside the string.
return f"'{_PWSH_SINGLE_QUOTES.sub(r'\1\1', value)}'"
| {
"repo_id": "ansible/ansible",
"file_path": "lib/ansible/_internal/_powershell/_script.py",
"license": "GNU General Public License v3.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ansible/ansible:test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_powershell.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r'''
---
module: win_powershell
version_added: 1.5.0
short_description: Run PowerShell scripts
description:
- Runs a PowerShell script and outputs the data in a structured format.
- Use M(ansible.windows.win_command) or M(ansible.windows.win_shell) to run a traditional PowerShell process with
stdout, stderr, and rc results.
options:
arguments:
description:
- A list of arguments to pass to I(executable) when running a script in another PowerShell process.
- These are not arguments to pass to I(script), use I(parameters) for that purpose.
type: list
elements: str
chdir:
description:
- The PowerShell location to set when starting the script.
- This can be a location in any of the PowerShell providers.
- The default location is dependent on many factors, if relative paths are used then set this option.
type: str
creates:
description:
- A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
type: str
depth:
description:
- How deep the return values are serialized for C(result), C(output), and C(information[x].message_data).
- This also controls the depth of the diff output set by C($Ansible.Diff).
- Setting this to a higher value can dramatically increase the amount of data that needs to be returned.
default: 2
type: int
error_action:
description:
- The C($ErrorActionPreference) to set before executing I(script).
- C(silently_continue) will ignore any errors and exceptions raised.
- C(continue) is the default behaviour in PowerShell, errors are present in the I(error) return value but only
terminating exceptions will stop the script from continuing and set it as failed.
- C(stop) will treat errors like exceptions, will stop the script and set it as failed.
choices:
- silently_continue
- continue
- stop
default: continue
type: str
executable:
description:
- A custom PowerShell executable to run the script in.
- When not defined the script will run in the current module PowerShell interpreter.
- Both the remote PowerShell and the one specified by I(executable) must be running on PowerShell v5.1 or newer.
- Setting this value may change the values returned in the C(output) return value depending on the underlying .NET
type.
type: str
parameters:
description:
- Parameters to pass into the script as key value pairs.
- The key corresponds to the parameter name and the value is the value for that parameter.
type: dict
path:
description:
- The path to a PowerShell script to run.
- When O(remote_src=False), or unset, this path is searched on the Ansible host.
- When O(remote_src=True), or set, this path is searched on the target host.
- This option is mutually exclusive with O(script).
- Scripts are expected to be saved with UTF-8 encoding, using a different encoding will result in non-ASCII
characters being read as invalid characters.
type: str
version_added: 3.1.0
remote_src:
description:
- When V(false), the O(path) specified will be searched on the Ansible host.
- When V(true), the O(path) specified will be searched on the target host.
default: false
type: bool
version_added: 3.1.0
removes:
description:
- A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be
skipped.
type: str
script:
description:
- The PowerShell script to run.
- This option is mutually exclusive with O(path).
type: str
sensitive_parameters:
description:
- Parameters to pass into the script as a SecureString or PSCredential.
- Each sensitive value will be marked with C(no_log) to ensure they are
not exposed in the module invocation args logs.
- The I(value) suboption can be used to create a SecureString value while
I(username) and I(password) can be used to create a PSCredential value.
type: list
elements: dict
suboptions:
name:
description:
- The name of the parameter to pass this value to.
required: true
type: str
value:
description:
- The string to pass as a SecureString of the parameter specified by
I(name).
- This is mutually exclusive with I(username) and I(password).
type: str
username:
description:
- The C(UserName) for the PSCredential value.
- This is mutually exclusive with I(value).
- This value is B(NOT) added to the C(no_log) list.
type: str
password:
description:
- The C(Password) for the PSCredential value.
- This is mutually exclusive with I(value) and must be set when
I(username) is provided.
type: str
seealso:
- module: ansible.windows.win_command
- module: ansible.windows.win_shell
notes:
- The module is set as failed when a terminating exception is throw, or C(error_action=stop) and a normal error record
is raised.
- The output values are processed using a custom filter and while it mostly matches the C(ConvertTo-Json) result the
following value types are different.
- C(DateTime) will be an ISO 8601 string in UTC, C(DateTimeOffset) will have the offset as specified by the value.
- C(Enum) will contain a dictionary with C(Type), C(String), C(Value) being the type name, string representation and
raw integer value respectively.
- C(Type) will contain a dictionary with C(Name), C(FullName), C(AssemblyQualifiedName), C(BaseType) being the type
name, the type name including the namespace, the full assembly name the type was defined in and the base type it
derives from.
- The script has access to the C($Ansible) variable where it can set C(Result), C(Changed), C(Failed), C(Diff),
or access C(Tmpdir).
- C($Ansible.Result) is a value that is returned back to the controller as is.
- C($Ansible.Diff) was added in the C(1.12.0) release of C(ansible.windows) and is a dictionary that is set to the diff
result that can be interpreted by Ansible.
- C($Ansible.Changed) can be set to C(true) or C(false) to reflect whether the module made a change or not. By default
this is set to C(true).
- C($Ansible.Failed) can be set to C(true) if the script wants to return the failure back to the controller.
- C($Ansible.Tmpdir) is the path to a temporary directory to use as a scratch location that is cleaned up after the
module has finished.
- C($Ansible.Verbosity) reveals Ansible's verbosity level for this play. Allows the script to set VerbosePreference/DebugPreference
based on verbosity. Added in C(1.9.0).
- Any host/console direct output like C(Write-Host) or C([Console]::WriteLine) is not considered an output object, they are
returned as a string in I(host_out) and I(host_err).
- Any output stream object is instead returned as a list in I(output). This is true not only for C(Write-Output) and its
built-in alias C(echo), but also for implicit output; i.e. C(Write-Output "foo") and C("foo") give the same result.
- The module will skip running the script when in check mode unless the script defines
C([CmdletBinding(SupportsShouldProcess)]).
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Run basic PowerShell script
ansible.windows.win_powershell:
script: |
echo "Hello World"
- name: Run script located in the adjacent files directory
ansible.windows.win_powershell:
path: my-script.ps1
- name: Run script located on the target Windows host
ansible.windows.win_powershell:
path: C:\temp\my-script.ps1
remote_src: true
- name: Run PowerShell script with parameters
ansible.windows.win_powershell:
script: |
[CmdletBinding()]
param (
[String]
$Path,
[Switch]
$Force
)
New-Item -Path $Path -ItemType Directory -Force:$Force
parameters:
Path: C:\temp
Force: true
- name: Run PowerShell script that modifies the module changed result
ansible.windows.win_powershell:
script: |
if (Get-Service -Name test -ErrorAction SilentlyContinue) {
Remove-Service -Name test
}
else {
$Ansible.Changed = $false
}
- name: Run PowerShell script in PowerShell 7
ansible.windows.win_powershell:
script: |
$PSVersionTable.PSVersion.Major
executable: pwsh.exe
arguments:
- -ExecutionPolicy
- ByPass
register: pwsh_output
failed_when:
- pwsh_output.output[0] != 7
- name: Run code in check mode
ansible.windows.win_powershell:
script: |
[CmdletBinding(SupportsShouldProcess)]
param ()
# Use $Ansible to detect check mode
if ($Ansible.CheckMode) {
echo 'running in check mode'
}
else {
echo 'running in normal mode'
}
# Use builtin ShouldProcess (-WhatIf)
if ($PSCmdlet.ShouldProcess('target')) {
echo 'also running in normal mode'
}
else {
echo 'also running in check mode'
}
check_mode: true
- name: Create a file in a specific directory using chdir
ansible.windows.win_powershell:
script: |
New-Item -Path 'created_by_ansible.txt' -ItemType File -Force
chdir: 'C:\Temp'
- name: Return a failure back to Ansible
ansible.windows.win_powershell:
script: |
if (Test-Path C:\bad.file) {
$Ansible.Failed = $true
}
- name: Define when the script made a change or not
ansible.windows.win_powershell:
script: |
if ((Get-Item WSMan:\localhost\Service\Auth\Basic).Value -eq 'true') {
Set-Item WSMan:\localhost\Service\Auth\Basic -Value false
}
else {
$Ansible.Changed = $true
}
- name: Define when to enable Verbose/Debug output
ansible.windows.win_powershell:
script: |
if ($Ansible.Verbosity -ge 3) {
$VerbosePreference = "Continue"
}
if ($Ansible.Verbosity -eq 5) {
$DebugPreference = "Continue"
}
Write-Output "Hello World!"
Write-Verbose "Hello World!"
Write-Debug "Hello World!"
- name: Set sensitive parameter value as SecureString parameter
ansible.windows.win_powershell:
script: |
param(
[string]$Uri,
[SecureString]$Token
)
Invoke-WebRequest -Uri $Uri -Token $Token
parameters:
Uri: foo
sensitive_parameters:
- name: Token
value: '{{ sensitive_value }}'
- name: Set credential parameter
ansible.windows.win_powershell:
script: |
param(
[string]$Uri,
[PSCredential]$Credential
)
Invoke-WebRequest -Uri $Uri -Credential $Credential
parameters:
Uri: foo
sensitive_parameters:
- name: Credential
username: CredUserName
password: '{{ sensitive_value }}'
'''
RETURN = r'''
result:
description:
- The values that were set by C($Ansible.Result) in the script.
- Defaults to an empty dict but can be set to anything by the script.
returned: always
type: complex
sample: {'key': 'value', 'other key': 1}
contains: {} # Satisfy the validate-modules sanity check
host_out:
description:
- The strings written to the host output, typically the stdout.
- This is not the same as objects sent to the output stream in PowerShell.
returned: always
type: str
sample: "Line 1\nLine 2"
host_err:
description:
- The strings written to the host error output, typically the stderr.
- This is not the same as objects sent to the error stream in PowerShell.
returned: always
type: str
sample: "Error 1\nError 2"
output:
description:
- A list containing all the objects outputted by the script.
- The list elements can be anything as it is based on what was ran.
returned: always
type: list
sample: ['output 1', 2, ['inner list'], {'key': 'value'}, None]
error:
description:
- A list of error records created by the script.
returned: always
type: list
elements: dict
contains:
output:
description:
- The formatted error record message as typically seen in a PowerShell console.
type: str
returned: always
sample: |
Write-Error "error" : error
+ CategoryInfo : NotSpecified: (:) [Write-Error], WriteErrorException
+ FullyQualifiedErrorId : Microsoft.PowerShell.Commands.WriteErrorException
error_details:
description:
- Additional details about an ErrorRecord.
- Can be null if there are not additional details.
type: dict
contains:
message:
description:
- Message for the error record.
returned: always
type: str
sample: Specific error message
recommended_action:
description:
- Recommended action in the even that this error occurs.
- This is empty unless the code which generates the error adds this explicitly.
returned: always
type: str
sample: Delete file
exception:
description:
- Details about the exception behind the error record.
type: dict
contains:
message:
description:
- The exception message.
type: str
returned: always
sample: The method ran into an error
type:
description:
- The full .NET type of the Exception class.
type: str
returned: always
sample: System.Exception
help_link:
description:
- A link to the help details for the exception.
- May not be set as it's dependent on whether the .NET exception class provides this info.
type: str
returned: always
sample: http://docs.ansible.com/
source:
description:
- Name of the application or object that causes the error.
- This may be an empty string as it's dependent on the code that raises the exception.
type: str
returned: always
sample: C:\Windows
hresult:
description:
- The signed integer assigned to this exception.
- May not be set as it's dependent on whether the .NET exception class provides this info.
type: int
returned: always
sample: -1
inner_exception:
description:
- The inner exception details if there is one present.
- The dict contains the same keys as a normal exception.
returned: always
type: dict
target_object:
description:
- The object which the error occurred.
- May be null if no object was specified when the record was created.
- Type type of this object depends on the error record itself.
- If the value is a complex type, it will follow the C(depth) limit specified.
type: raw
returned: always
sample: C:\Windows
category_info:
description:
- More information about the error record.
type: dict
contains:
category:
description:
- The category name of the error record.
type: str
returned: always
sample: NotSpecified
category_id:
description:
- The integer representation of the category.
type: int
returned: always
sample: 0
activity:
description:
- Description of the operation which encountered the error.
type: str
returned: always
sample: Write-Error
reason:
description:
- Description of the error.
type: str
returned: always
sample: WriteErrorException
target_name:
description:
- Description of the target object.
- Can be an empty string if no target was specified.
type: str
returned: always
sample: C:\Windows
target_type:
description:
- Description of the type of the target object.
- Can be an empty string if no target object was specified.
type: str
returned: always
sample: String
fully_qualified_error_id:
description:
- The unique identifier for the error condition
- May be null if no id was specified when the record was created.
type: str
returned: always
sample: ParameterBindingFailed
script_stack_trace:
description:
- The script stack trace for the error record.
type: str
returned: always
sample: 'at <ScriptBlock>, <No file>: line 1'
pipeline_iteration_info:
description:
- The status of the pipeline when this record was created.
- The values are 0 index based.
- Each element entry represents the command index in a pipeline statement.
- The value of each element represents the pipeline input idx in that command.
- For Example C('C:\Windows', 'C:\temp' | Get-ChildItem | Get-Item), C([1, 2, 9]) represents an error occurred
with the 2nd output, 3rd, and 9th output of the 1st, 2nd, and 3rd command in that pipeline respectively.
type: list
elements: int
returned: always
sample: [0, 0]
warning:
description:
- A list of warning messages created by the script.
- Warning messages only appear when C($WarningPreference = 'Continue').
returned: always
type: list
elements: str
sample: ['warning record']
verbose:
description:
- A list of warning messages created by the script.
- Verbose messages only appear when C($VerbosePreference = 'Continue').
returned: always
type: list
elements: str
sample: ['verbose record']
debug:
description:
- A list of warning messages created by the script.
- Debug messages only appear when C($DebugPreference = 'Continue').
returned: always
type: list
elements: str
sample: ['debug record']
information:
description:
- A list of information records created by the script.
- The information stream was only added in PowerShell v5, older versions will always have an empty list as a value.
returned: always
type: list
elements: dict
contains:
message_data:
description:
- Message data associated with the record.
- The value here can be of any type.
type: complex
returned: always
sample: information record
contains: {} # Satisfy the validate-modules sanity check
source:
description:
- The source of the record.
type: str
returned: always
sample: Write-Information
time_generated:
description:
- The time the record was generated.
- This is the time in UTC as an ISO 8601 formatted string.
type: str
returned: always
sample: '2021-02-11T04:46:00.4694240Z'
tags:
description:
- A list of tags associated with the record.
type: list
elements: str
returned: always
sample: ['Host']
'''
| {
"repo_id": "ansible/ansible",
"file_path": "test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_powershell.py",
"license": "GNU General Public License v3.0",
"lines": 531,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/units/_internal/_powershell/test_clixml.py | from __future__ import annotations
import pytest
from ansible._internal._powershell import _clixml
CLIXML_WITH_ERROR = b'#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
b'<S S="Error">My error</S></Objs>'
def test_replace_stderr_clixml_by_itself():
data = CLIXML_WITH_ERROR
expected = b"My error"
actual = _clixml.replace_stderr_clixml(data)
assert actual == expected
def test_replace_stderr_clixml_with_pre_and_post_lines():
data = b"pre\r\n" + CLIXML_WITH_ERROR + b"\r\npost"
expected = b"pre\r\nMy error\r\npost"
actual = _clixml.replace_stderr_clixml(data)
assert actual == expected
def test_replace_stderr_clixml_with_remaining_data_on_line():
data = b"pre\r\n" + CLIXML_WITH_ERROR + b"inline\r\npost"
expected = b"pre\r\nMy errorinline\r\npost"
actual = _clixml.replace_stderr_clixml(data)
assert actual == expected
def test_replace_stderr_clixml_with_non_utf8_data():
# \x82 in cp437 is é but is an invalid UTF-8 sequence
data = CLIXML_WITH_ERROR.replace(b"error", b"\x82rror")
expected = "My érror".encode("utf-8")
actual = _clixml.replace_stderr_clixml(data)
assert actual == expected
def test_replace_stderr_clixml_across_liens():
data = b"#< CLIXML\r\n<Objs Version=\"foo\">\r\n</Objs>"
expected = data
actual = _clixml.replace_stderr_clixml(data)
assert actual == expected
@pytest.mark.parametrize("newline", ["\n", "\r\n"])
def test_replace_stderr_clixml_with_invalid_clixml_data(newline):
data = f"#< CLIXML{newline}<Objs Version=\"foo\"><</Objs>".encode()
expected = data
actual = _clixml.replace_stderr_clixml(data)
assert actual == expected
def test_replace_stderr_clixml_with_no_clixml():
data = b"foo"
expected = data
actual = _clixml.replace_stderr_clixml(data)
assert actual == expected
def test_replace_stderr_clixml_with_header_but_no_data():
data = b"foo\r\n#< CLIXML\r\n"
expected = data
actual = _clixml.replace_stderr_clixml(data)
assert actual == expected
def test_extract_clixml_empty():
empty = '#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04"></Objs>'
expected = []
actual = _clixml.extract_clixml_strings(empty)
assert actual == expected
def test_extract_clixml_with_progress():
progress = '#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
'<Obj S="progress" RefId="0"><TN RefId="0"><T>System.Management.Automation.PSCustomObject</T><T>System.Object</T></TN><MS>' \
'<I64 N="SourceId">1</I64><PR N="Record"><AV>Preparing modules for first use.</AV><AI>0</AI><Nil />' \
'<PI>-1</PI><PC>-1</PC><T>Completed</T><SR>-1</SR><SD> </SD></PR></MS></Obj></Objs>'
expected = []
actual = _clixml.extract_clixml_strings(progress)
assert actual == expected
def test_extract_clixml_error_single_stream():
single_stream = '#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
'<S S="Error">fake : The term \'fake\' is not recognized as the name of a cmdlet. Check _x000D__x000A_</S>' \
'<S S="Error">the spelling of the name, or if a path was included._x000D__x000A_</S>' \
'<S S="Error">At line:1 char:1_x000D__x000A_</S>' \
'<S S="Error">+ fake cmdlet_x000D__x000A_</S><S S="Error">+ ~~~~_x000D__x000A_</S>' \
'<S S="Error"> + CategoryInfo : ObjectNotFound: (fake:String) [], CommandNotFoundException_x000D__x000A_</S>' \
'<S S="Error"> + FullyQualifiedErrorId : CommandNotFoundException_x000D__x000A_</S>' \
'<S S="Error"> _x000D__x000A_</S>' \
'</Objs>'
expected = [
"fake : The term 'fake' is not recognized as the name of a cmdlet. Check \r\n",
"the spelling of the name, or if a path was included.\r\n",
"At line:1 char:1\r\n",
"+ fake cmdlet\r\n",
"+ ~~~~\r\n",
" + CategoryInfo : ObjectNotFound: (fake:String) [], CommandNotFoundException\r\n",
" + FullyQualifiedErrorId : CommandNotFoundException\r\n",
" \r\n",
]
actual = _clixml.extract_clixml_strings(single_stream, stream="Error")
assert actual == expected
def test_extract_clixml_error_multiple_streams():
multiple_stream = '#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
'<S S="Error">fake : The term \'fake\' is not recognized as the name of a cmdlet. Check _x000D__x000A_</S>' \
'<S S="Error">the spelling of the name, or if a path was included._x000D__x000A_</S>' \
'<S S="Error">At line:1 char:1_x000D__x000A_</S>' \
'<S S="Error">+ fake cmdlet_x000D__x000A_</S><S S="Error">+ ~~~~_x000D__x000A_</S>' \
'<S S="Error"> + CategoryInfo : ObjectNotFound: (fake:String) [], CommandNotFoundException_x000D__x000A_</S>' \
'<S S="Error"> + FullyQualifiedErrorId : CommandNotFoundException_x000D__x000A_</S><S S="Error"> _x000D__x000A_</S>' \
'<S S="Info">hi info</S>' \
'<S S="Info">other</S>' \
'</Objs>'
expected = ["hi info", "other"]
actual = _clixml.extract_clixml_strings(multiple_stream, stream="Info")
assert actual == expected
def test_extract_clixml_error_multiple_elements():
multiple_elements = '#< CLIXML\r\n#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
'<Obj S="progress" RefId="0"><TN RefId="0"><T>System.Management.Automation.PSCustomObject</T><T>System.Object</T></TN><MS>' \
'<I64 N="SourceId">1</I64><PR N="Record"><AV>Preparing modules for first use.</AV><AI>0</AI><Nil />' \
'<PI>-1</PI><PC>-1</PC><T>Completed</T><SR>-1</SR><SD> </SD></PR></MS></Obj>' \
'<S S="Error">Error 1</S></Objs>' \
'<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04"><Obj S="progress" RefId="0">' \
'<TN RefId="0"><T>System.Management.Automation.PSCustomObject</T><T>System.Object</T></TN><MS>' \
'<I64 N="SourceId">1</I64><PR N="Record"><AV>Preparing modules for first use.</AV><AI>0</AI><Nil />' \
'<PI>-1</PI><PC>-1</PC><T>Completed</T><SR>-1</SR><SD> </SD></PR></MS></Obj>' \
'<Obj S="progress" RefId="1"><TNRef RefId="0" /><MS><I64 N="SourceId">2</I64>' \
'<PR N="Record"><AV>Preparing modules for first use.</AV><AI>0</AI><Nil />' \
'<PI>-1</PI><PC>-1</PC><T>Completed</T><SR>-1</SR><SD> </SD></PR></MS></Obj>' \
'<S S="Error">Error 2</S></Objs>'
expected = ["Error 1", "\r\n", "Error 2"]
actual = _clixml.extract_clixml_strings(multiple_elements, stream="Error")
assert actual == expected
@pytest.mark.parametrize('clixml, expected', [
('', ''),
('just newline _x000A_', 'just newline \n'),
('surrogate pair _xD83C__xDFB5_', 'surrogate pair 🎵'),
('null char _x0000_', 'null char \0'),
('normal char _x0061_', 'normal char a'),
('escaped literal _x005F_x005F_', 'escaped literal _x005F_'),
('underscope before escape _x005F__x000A_', 'underscope before escape _\n'),
('surrogate high _xD83C_', 'surrogate high \uD83C'),
('surrogate low _xDFB5_', 'surrogate low \uDFB5'),
('lower case hex _x005f_', 'lower case hex _'),
('invalid hex _x005G_', 'invalid hex _x005G_'),
# Tests regex actually matches UTF-16-BE hex chars (b"\x00" then hex char).
("_x\u6100\u6200\u6300\u6400_", "_x\u6100\u6200\u6300\u6400_"),
])
def test_extract_clixml_error_with_comlex_escaped_chars(clixml, expected):
clixml_data = (
'<# CLIXML\r\n'
'<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">'
f'<S S="Error">{clixml}</S>'
'</Objs>'
)
# b_expected = expected.encode(errors="surrogatepass")
actual = _clixml.extract_clixml_strings(clixml_data, stream="Error")
assert actual == [expected]
def test_extract_clixml_string_encoded_arguments():
# Generated from - change pwsh to an exe that prints back argv
# pwsh {} -args 'simple', '_x005F_', ([char]::ConvertFromUtf32(0x1F3B5))
encoded_clixml = (
'<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">'
'<Obj RefId="0">'
'<TN RefId="0"><T>System.Collections.ArrayList</T><T>System.Object</T></TN>'
'<LST>'
'<S>simple</S>'
'<S>_x005F_x005F_</S>'
'<S>_xD83C__xDFB5_</S>'
'</LST>'
'</Obj>'
'</Objs>'
)
expected = ['simple', '_x005F_', "\U0001F3B5"]
actual = _clixml.extract_clixml_strings(encoded_clixml)
assert actual == expected
| {
"repo_id": "ansible/ansible",
"file_path": "test/units/_internal/_powershell/test_clixml.py",
"license": "GNU General Public License v3.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/units/_internal/_powershell/test_script.py | from __future__ import annotations
import pytest
from ansible._internal._powershell import _script
# print_argv is a binary that echoes back the argv it receives.
ENCODED_CMD_CASES = [
(
# print_argv {'foo'}
"'foo'",
[],
['-EncodedCommand', 'JwBmAG8AbwAnAA=='],
),
(
# print_argv {'foo'} -args 'simple', '_x005F_', ([char]::ConvertFromUtf32(0x1F3B5))
"'foo'",
['simple', '_x005F_', "\U0001F3B5"],
['-EncodedCommand', 'JwBmAG8AbwAnAA==', '-EncodedArguments', (
'PABPAGIAagBzACAAeABtAGwAbgBzAD0AIgBoAHQAdABwADoALwAvAHMAYwBoAGUAbQBhAHMALgBtAGkAYwByAG8AcwBvAGYAdAAuAGMAb'
'wBtAC8AcABvAHcAZQByAHMAaABlAGwAbAAvADIAMAAwADQALwAwADQAIgAgAFYAZQByAHMAaQBvAG4APQAiADEALgAxAC4AMAAuADEAIg'
'A+ADwATwBiAGoAIABSAGUAZgBJAGQAPQAiADAAIgA+ADwAVABOACAAUgBlAGYASQBkAD0AIgAwACIAPgA8AFQAPgBTAHkAcwB0AGUAbQA'
'uAEMAbwBsAGwAZQBjAHQAaQBvAG4AcwAuAEEAcgByAGEAeQBMAGkAcwB0ADwALwBUAD4APABUAD4AUwB5AHMAdABlAG0ALgBPAGIAagBl'
'AGMAdAA8AC8AVAA+ADwALwBUAE4APgA8AEwAUwBUAD4APABTAD4AcwBpAG0AcABsAGUAPAAvAFMAPgA8AFMAPgBfAHgAMAAwADUARgBfA'
'HgAMAAwADUARgBfADwALwBTAD4APABTAD4AXwB4AEQAOAAzAEMAXwBfAHgARABGAEIANQBfADwALwBTAD4APAAvAEwAUwBUAD4APAAvAE'
'8AYgBqAD4APAAvAE8AYgBqAHMAPgA='
)],
)
]
@pytest.mark.parametrize('value, expected', [
('foo', 'foo'),
('foo_bar', 'foo_bar'),
('foo-bar', 'foo-bar'),
('123', '123'),
(r'C:\temp\pwsh.exe', r'C:\temp\pwsh.exe'),
('C:/temp/pwsh.exe', 'C:/temp/pwsh.exe'),
('', "''"),
('foo bar', "'foo bar'"),
('@foo', "'@foo'"),
("foo'bar", "'foo''bar'"),
("foo\u2018bar", "'foo\u2018\u2018bar'"),
("foo\u2019bar", "'foo\u2019\u2019bar'"),
("foo\u201abar", "'foo\u201a\u201abar'"),
("foo\u201bbar", "'foo\u201b\u201bbar'"),
])
def test_quote_argument(value, expected):
actual = _script.quote_pwsh_argument(value)
assert actual == expected
def test_quote_argument_force():
actual = _script.quote_pwsh_argument('foo', force_quote=True)
assert actual == "'foo'"
@pytest.mark.parametrize('cmd, args, expected', ENCODED_CMD_CASES)
def test_build_encoded_command(cmd, args, expected):
actual = _script._get_encoded_arguments(cmd, args)
assert actual == expected
@pytest.mark.parametrize('expected_cmd, expected_args, cmd_args', ENCODED_CMD_CASES)
def test_parse_encoded_command(expected_cmd, expected_args, cmd_args):
actual_cmd, actual_args = _script.parse_encoded_cmdline(" ".join(cmd_args))
assert actual_cmd == expected_cmd
assert actual_args == expected_args
def test_parse_encoded_command_no_encoded_command():
actual = _script.parse_encoded_cmdline('pwsh -EncodedArguments YQA=')
assert actual is None
def test_parse_encoded_command_no_value():
actual = _script.parse_encoded_cmdline('pwsh -EncodedCommand')
assert actual is None
def test_parse_encoded_command_no_args():
actual_script, actual_args = _script.parse_encoded_cmdline('pwsh -EncodedCommand YQA= foo')
assert actual_script == 'a'
assert actual_args == []
def test_parse_encoded_command_quoted():
actual_script, actual_args = _script.parse_encoded_cmdline("pwsh '-EncodedCommand' YQA= foo")
assert actual_script == 'a'
assert actual_args == []
def test_parse_encoded_command_no_value_after_args():
actual = _script.parse_encoded_cmdline('pwsh -EncodedCommand YQA= -EncodedArguments')
assert actual is None
@pytest.mark.parametrize(
('cmd', 'args', 'expected'),
(
pytest.param(
'foo.exe',
None,
'foo.exe',
id='no-args-relative-path',
),
pytest.param(
r'C:\Program Files\foo.exe',
None,
r"& 'C:\Program Files\foo.exe'",
id='no-args-absolute-path',
),
pytest.param(
'foo.exe',
['simple', 'with space'],
"foo.exe simple 'with space'",
id='whitespace',
),
pytest.param(
'foo.exe',
["with 'single' quote"],
"foo.exe 'with ''single'' quote'",
id='single-quote',
),
pytest.param(
'C:/path with space/test',
['arg1', 'arg 2'],
"& 'C:/path with space/test' arg1 'arg 2'",
id='spaced-path',
),
),
)
def test_build_pwsh_cmd_statement(cmd, args, expected):
actual = _script.build_pwsh_cmd_statement(cmd, args)
assert actual == expected
| {
"repo_id": "ansible/ansible",
"file_path": "test/units/_internal/_powershell/test_script.py",
"license": "GNU General Public License v3.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/units/utils/test_jsonrpc.py | from __future__ import annotations
import json
import pickle
from ansible._internal._datatag._tags import Origin
from ansible.utils.jsonrpc import JsonRpcServer
def test_response_type_cleansing() -> None:
"""Avoid unpickling errors in module contexts by ensuring that non-scalar JsonRpc responses are not pickled with tags."""
class RPCTest:
def returns_list_with_tagged_str(self) -> list:
return [Origin(description="blar").tag("taggedstr")]
s = JsonRpcServer()
s.register(RPCTest())
req = dict(method="returns_list_with_tagged_str", id=1, params=(tuple(), {}))
jsonrpc_res = s.handle_request(json.dumps(req))
deserialized_res = json.loads(jsonrpc_res)
pickled_res = deserialized_res.get("result")
assert pickled_res is not None
res = pickle.loads(pickled_res.encode(errors="surrogateescape"))
assert res == ["taggedstr"]
assert not Origin.is_tagged_on(res[0])
| {
"repo_id": "ansible/ansible",
"file_path": "test/units/utils/test_jsonrpc.py",
"license": "GNU General Public License v3.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/sanity/code-smell/no-s3.py | """
Disallow direct linking to S3 buckets.
S3 buckets should be accessed through a CloudFront distribution.
"""
from __future__ import annotations
import re
import sys
def main():
"""Main entry point."""
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'rb') as path_fd:
for line, b_text in enumerate(path_fd.readlines()):
try:
text = b_text.decode()
except UnicodeDecodeError:
continue
if match := re.search(r'(http.*?s3\..*?amazonaws\.com)', text):
print(f'{path}:{line + 1}:{match.start(1) + 1}: use a CloudFront distribution instead of an S3 bucket: {match.group(1)}')
if __name__ == '__main__':
main()
| {
"repo_id": "ansible/ansible",
"file_path": "test/sanity/code-smell/no-s3.py",
"license": "GNU General Public License v3.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/ansible-test-collection-indirect-by-meta/ansible_collections/ansible_test/ansible_test_collection_indirect_by_meta/plugins/modules/hello.py | from __future__ import annotations
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(argument_spec=dict())
module.exit_json(source='meta')
if __name__ == '__main__':
main()
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/ansible-test-collection-indirect-by-meta/ansible_collections/ansible_test/ansible_test_collection_indirect_by_meta/plugins/modules/hello.py",
"license": "GNU General Public License v3.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/ansible-test-collection-indirect-by-needs-target/ansible_collections/ansible_test/ansible_test_collection_indirect_by_needs_target/plugins/modules/hello.py | from __future__ import annotations
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(argument_spec=dict())
module.exit_json(source='needs/target')
if __name__ == '__main__':
main()
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/ansible-test-collection-indirect-by-needs-target/ansible_collections/ansible_test/ansible_test_collection_indirect_by_needs_target/plugins/modules/hello.py",
"license": "GNU General Public License v3.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/ansible-test-sanity-pylint/ansible_collections/ns/col/plugins/module_utils/unwanted.py | from __future__ import annotations
import os
import subprocess
import sys
def main() -> None:
os.popen('echo')
os.posix_spawn('echo', ['echo'], {})
os.posix_spawnp('echo', ['echo'], {})
os.spawnl(os.P_WAIT, 'echo', 'echo')
os.spawnle(os.P_WAIT, 'echo', 'echo', {})
os.spawnlp(os.P_WAIT, 'echo', 'echo')
os.spawnlpe(os.P_WAIT, 'echo', 'echo', {})
os.spawnv(os.P_WAIT, 'echo', ['echo'])
os.spawnve(os.P_WAIT, 'echo', ['echo'], {})
os.spawnvp(os.P_WAIT, 'echo', ['echo'])
os.spawnvpe(os.P_WAIT, 'echo', ['echo'], {})
os.system('echo')
subprocess.Popen('echo')
subprocess.call('echo')
subprocess.check_call('echo')
subprocess.check_output('echo')
subprocess.getoutput('echo')
subprocess.getstatusoutput('echo')
subprocess.run('echo', check=True)
print()
sys.exit(0)
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/ansible-test-sanity-pylint/ansible_collections/ns/col/plugins/module_utils/unwanted.py",
"license": "GNU General Public License v3.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/sanity/code-smell/codespell.py | """Check for common misspelled words using codespell."""
from __future__ import annotations
import pathlib
import re
import subprocess
import sys
import tempfile
import typing as t
def main() -> None:
paths = sys.argv[1:] or sys.stdin.read().splitlines()
ignore_words_lines = (pathlib.Path(__file__).parent / 'codespell' / 'ignore-words.txt').read_text().splitlines()
ignore_words = [re.split(r'\s*#', line, maxsplit=1)[0] for line in ignore_words_lines]
quiet_level = (
32 # don't print configuration file
+ 16 # don't print the list of fixed files
+ 4 # omit warnings about automatic fixes that were disabled in the dictionary
+ 2 # disable warnings about binary files
+ 1 # disable warnings about wrong encoding
)
paths = [path for path in paths if path != 'test/sanity/ignore.txt']
with tempfile.NamedTemporaryFile(mode='wt') as ignore_words_temp_file:
ignore_words_temp_file.write('\n'.join(ignore_words))
ignore_words_temp_file.flush()
cmd = [
sys.executable,
'-m',
'codespell_lib',
'--disable-colors',
'--builtin',
'clear', # minimize false positives
'--quiet-level',
str(quiet_level),
'--ignore-words',
ignore_words_temp_file.name,
] + paths
process = subprocess.run(
cmd,
stdin=subprocess.DEVNULL,
capture_output=True,
check=False,
text=True,
)
if process.stderr:
print(process.stderr.strip(), file=sys.stderr)
sys.exit(1)
if not (stdout := process.stdout.strip()):
return
if process.returncode not in (0, 65):
print(f'Unexpected return code: {process.returncode}')
sys.exit(1)
pattern = re.compile(r'^(?P<path>[^:]*):(?P<line>[0-9]+): (?P<left>.*) ==> (?P<right>.*)$')
matches = parse_to_list_of_dict(pattern, stdout)
results: list[str] = []
for match in matches:
path, line_num_str, left, right = match['path'], match['line'], match['left'], match['right']
line_num = int(line_num_str)
try:
line = pathlib.Path(path).read_text().splitlines()[line_num - 1]
col_num = line.index(left) + 1
except UnicodeDecodeError:
col_num = 0
if len(left) <= 3 and pathlib.Path(path).suffix == '.py':
continue # ignore short words in Python files, as they're likely just short variable names
code = re.sub('[^a-zA-Z]', '_', left)
results.append(f"{path}:{line_num}:{col_num}: {code}: {left} ==> {right}")
if results:
print('\n'.join(results))
def parse_to_list_of_dict(pattern: re.Pattern, value: str) -> list[dict[str, t.Any]]:
matched = []
unmatched = []
for line in value.splitlines():
match = re.search(pattern, line)
if match:
matched.append(match.groupdict())
else:
unmatched.append(line)
if unmatched:
raise Exception(f'Pattern {pattern!r} did not match values:\n' + '\n'.join(unmatched))
return matched
if __name__ == '__main__':
main()
| {
"repo_id": "ansible/ansible",
"file_path": "test/sanity/code-smell/codespell.py",
"license": "GNU General Public License v3.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/units/ansible_test/_internal/ci/test_azp.py | from __future__ import annotations
import argparse
import json
import os
import typing as t
import pytest
import pytest_mock
if t.TYPE_CHECKING:
from ansible_test._internal.ci.azp import AzurePipelinesChanges
def create_azure_pipelines_changes(mocker: pytest_mock.MockerFixture) -> AzurePipelinesChanges:
"""Prepare an AzurePipelinesChanges instance for testing."""
from ansible_test._internal.ci.azp import AzurePipelinesChanges
from ansible_test._internal.config import CommonConfig
namespace = argparse.Namespace()
namespace.color = False
namespace.explain = False
namespace.verbosity = False
namespace.debug = False
namespace.truncate = False
namespace.redact = False
namespace.display_traceback = False
config = CommonConfig(namespace, 'sanity')
env = dict(
HOME=os.environ['HOME'],
SYSTEM_COLLECTIONURI='https://dev.azure.com/ansible/',
SYSTEM_TEAMPROJECT='ansible',
BUILD_REPOSITORY_PROVIDER='GitHub',
BUILD_SOURCEBRANCH='devel',
BUILD_SOURCEBRANCHNAME='devel',
)
mocker.patch.dict(os.environ, env, clear=True)
return AzurePipelinesChanges(config)
@pytest.mark.parametrize("status_code,response,expected_commits,expected_warning", (
# valid 200 responses
(200, dict(value=[]), None, None),
(200, dict(value=[dict(sourceVersion='abc')]), {'abc'}, None),
# invalid 200 responses
(200, 'not-json', None, "Unable to find project due to HTTP 200 Non-JSON result."),
(200, '"not-a-dict"', None, "Unexpected response format from HTTP 200 JSON result: string indices must be integers, not 'str'"),
(200, dict(value='not-a-list'), None, "Unexpected response format from HTTP 200 JSON result: string indices must be integers, not 'str'"),
(200, dict(value=['not-a-dict']), None, "Unexpected response format from HTTP 200 JSON result: string indices must be integers, not 'str'"),
(200, dict(), None, "Missing 'value' key in response from HTTP 200 JSON result."),
(200, dict(value=[{}]), None, "Missing 'sourceVersion' key in response from HTTP 200 JSON result."),
# non-200 responses
(404, '', None, "Unable to find project due to HTTP 404 Non-JSON result."),
(404, '""', None, "Unable to find project due to HTTP 404 JSON result."),
(404, dict(value=[]), None, "Unable to find project due to HTTP 404 JSON result."),
))
def test_get_successful_merge_run_commits(
status_code: int,
response: object,
expected_commits: set[str] | None,
expected_warning: str | None,
mocker: pytest_mock.MockerFixture,
) -> None:
"""Verify AZP commit retrieval handles invalid responses gracefully."""
from ansible_test._internal.ci.azp import AzurePipelinesChanges
from ansible_test._internal.git import Git
from ansible_test._internal.http import HttpClient, HttpResponse
from ansible_test._internal.util import display
if not isinstance(response, str):
response = json.dumps(response)
if expected_warning:
expected_warning = f'Cannot determine changes. All tests will be executed. Reason: {expected_warning}'
patched_get = mocker.patch.object(HttpClient, 'get', return_value=HttpResponse('GET', 'URL', status_code, response))
patched_warning = mocker.patch.object(display, 'warning')
mocker.patch.object(Git, 'run_git', return_value='') # avoid git
spy_get_successful_merge_run_commits = mocker.spy(AzurePipelinesChanges, 'get_successful_merge_run_commits')
create_azure_pipelines_changes(mocker)
assert patched_get.call_count == 1
if expected_warning:
patched_warning.assert_called_once_with(expected_warning)
else:
patched_warning.assert_not_called()
assert spy_get_successful_merge_run_commits.spy_return == (expected_commits or set())
| {
"repo_id": "ansible/ansible",
"file_path": "test/units/ansible_test/_internal/ci/test_azp.py",
"license": "GNU General Public License v3.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/units/_internal/_encryption/test_crypt.py | from __future__ import annotations
import errno
import pytest
from pytest_mock import MockerFixture
from ansible._internal._encryption._crypt import _CryptLib, CryptFacade, _FAILURE_TOKENS
class TestCryptFacade:
def test_unsupported_platform(self, mocker: MockerFixture) -> None:
"""Test that unsupported platforms are skipped."""
mock_libs = (
_CryptLib('foo', include_platforms=frozenset({'fake_platform'})),
)
mocker.patch('ansible._internal._encryption._crypt._CRYPT_LIBS', mock_libs)
with pytest.raises(ImportError, match=r'Cannot find crypt implementation'):
CryptFacade()
def test_libc_fallback(self, mocker: MockerFixture) -> None:
"""Test that a library name of None will load the libc library."""
mock_libs = (
_CryptLib(None),
)
mocker.patch('ansible._internal._encryption._crypt._CRYPT_LIBS', mock_libs)
load_lib_mock = mocker.patch('ctypes.cdll.LoadLibrary')
crypt_facade = CryptFacade()
load_lib_mock.assert_called_once_with(None)
assert crypt_facade._crypt_name is None
def test_library_with_no_crypt_methods(self, mocker: MockerFixture) -> None:
"""Test that a library without crypt() and crypt_r() is skipped."""
mock_libs = (
_CryptLib(None),
)
class MockCDLL:
pass
mocker.patch('ansible._internal._encryption._crypt._CRYPT_LIBS', mock_libs)
mocker.patch('ctypes.cdll.LoadLibrary', return_value=MockCDLL())
with pytest.raises(ImportError, match=r'Cannot find crypt implementation'):
CryptFacade()
def test_library_with_no_crypt_r_or_crypt_gensalt_rn(self, mocker: MockerFixture) -> None:
"""Test that a library without crypt_r() or crypt_gensalt_rn() is prepped correctly."""
mock_libs = (
_CryptLib(None),
)
class MockCDLL:
class MockCrypt:
def __init__(self):
self.argtypes = None
self.restype = None
def __init__(self):
self.crypt = self.MockCrypt()
self.crypt_gensalt = self.MockCrypt()
mocker.patch('ansible._internal._encryption._crypt._CRYPT_LIBS', mock_libs)
mocker.patch('ctypes.cdll.LoadLibrary', return_value=MockCDLL())
crypt_facade = CryptFacade()
assert crypt_facade._crypt_impl is not None
assert crypt_facade._crypt_impl.argtypes is not None
assert crypt_facade._crypt_impl.restype is not None
assert crypt_facade._use_crypt_r is False
assert crypt_facade._crypt_gensalt_impl is not None
assert crypt_facade._crypt_gensalt_impl.argtypes is not None
assert crypt_facade._crypt_gensalt_impl.restype is not None
assert crypt_facade._use_crypt_gensalt_rn is False
assert crypt_facade.has_crypt_gensalt
def test_crypt_fail_errno(self, mocker: MockerFixture) -> None:
"""Test crypt() setting failure errno raises OSError."""
mocker.patch('ctypes.get_errno', return_value=errno.EBADFD)
crypt_facade = CryptFacade()
with pytest.raises(OSError, match=r'crypt failed:'):
crypt_facade.crypt(b"test", b"123")
def test_crypt_result_none(self, mocker: MockerFixture) -> None:
"""Test crypt() implementation returning None raises ValueError."""
crypt_facade = CryptFacade()
mocker.patch.object(crypt_facade, '_crypt_impl', return_value=None)
with pytest.raises(ValueError, match=r'crypt failed: invalid salt or unsupported algorithm'):
crypt_facade.crypt(b"test", b"123")
def test_crypt_result_failure(self, mocker: MockerFixture) -> None:
"""Test crypt() implementation returning failure token raises ValueError."""
crypt_facade = CryptFacade()
mocker.patch.object(crypt_facade, '_crypt_impl', return_value=list(_FAILURE_TOKENS)[0])
with pytest.raises(ValueError, match=r'crypt failed: invalid salt or unsupported algorithm'):
crypt_facade.crypt(b"test", b"123")
def test_crypt_gensalt_called_with_no_impl(self, mocker: MockerFixture) -> None:
"""Calling crypt_gensalt() without impl should raise NotImplementedError."""
crypt_facade = CryptFacade()
mock_prop = mocker.patch('ansible._internal._encryption._crypt.CryptFacade.has_crypt_gensalt', new_callable=mocker.PropertyMock)
mock_prop.return_value = False
with pytest.raises(NotImplementedError, match=r'crypt_gensalt not available \(requires libxcrypt\)'):
crypt_facade.crypt_gensalt(b"", 1, b"")
def test_crypt_gensalt(self, mocker: MockerFixture) -> None:
"""Test the NOT _use_crypt_gensalt_rn code path of crypt_gensalt()."""
crypt_facade = CryptFacade()
crypt_facade._use_crypt_gensalt_rn = False
mock_impl = mocker.patch.object(crypt_facade, '_crypt_gensalt_impl', return_value='')
crypt_facade.crypt_gensalt(b'', 1, b'')
mock_impl.assert_called_once_with(b'', 1, b'', 0)
def test_crypt_gensalt_fail_errno(self, mocker: MockerFixture) -> None:
"""Test crypt_gensalt() setting failure errno raises OSError."""
mocker.patch('ctypes.get_errno', return_value=errno.EBADFD)
crypt_facade = CryptFacade()
with pytest.raises(OSError, match=r'crypt_gensalt failed:'):
crypt_facade.crypt_gensalt(b'', 1, b'')
def test_crypt_gensalt_result_none(self, mocker: MockerFixture) -> None:
"""Test crypt_gensalt() implementation returning None raises ValueError."""
crypt_facade = CryptFacade()
mocker.patch.object(crypt_facade, '_crypt_gensalt_impl', return_value=None)
with pytest.raises(ValueError, match=r'crypt_gensalt failed: unable to generate salt'):
crypt_facade.crypt_gensalt(b'', 1, b'')
def test_crypt_gensalt_result_failure(self, mocker: MockerFixture) -> None:
"""Test crypt_gensalt() implementation returning failure token raises ValueError."""
crypt_facade = CryptFacade()
# Skip the _rn version as it modifies impl return value
crypt_facade._use_crypt_gensalt_rn = False
mocker.patch.object(crypt_facade, '_crypt_gensalt_impl', return_value=list(_FAILURE_TOKENS)[0])
with pytest.raises(ValueError, match=r'crypt_gensalt failed: invalid prefix or unsupported algorithm'):
crypt_facade.crypt_gensalt(b'', 1, b'')
| {
"repo_id": "ansible/ansible",
"file_path": "test/units/_internal/_encryption/test_crypt.py",
"license": "GNU General Public License v3.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/units/module_utils/basic/test_human_to_bytes.py | # -*- coding: utf-8 -*-
# Copyright: (c) 2025 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import pytest
from ansible.module_utils.basic import AnsibleModule
@pytest.mark.parametrize('value, isbits, expected', [
("4KB", False, 4096),
("4KB", None, 4096),
("4Kb", True, 4096),
])
def test_validator_function(value: str, isbits: bool | None, expected: int) -> None:
assert AnsibleModule.human_to_bytes(value, isbits=isbits) == expected
@pytest.mark.parametrize('value, expected', [
("4KB", 4096),
])
def test_validator_function_default_isbits(value: str, expected: int) -> None:
assert AnsibleModule.human_to_bytes(value) == expected
@pytest.mark.parametrize('value, isbits', [
("4Kb", False),
("4KB", True),
])
def test_validator_functions(value: str, isbits: bool) -> None:
with pytest.raises(ValueError):
AnsibleModule.human_to_bytes(value, isbits=isbits)
| {
"repo_id": "ansible/ansible",
"file_path": "test/units/module_utils/basic/test_human_to_bytes.py",
"license": "GNU General Public License v3.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/constants.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <matt@sivel.net>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import re
REJECTLIST_DIRS = frozenset(('.git', 'test', '.github', '.idea'))
SYS_EXIT_REGEX = re.compile(r'[^#]*sys.exit\s*\(.*')
NO_LOG_REGEX = re.compile(r'(?:pass(?!ive)|secret|token|key)', re.I)
# Everything that should not be used in a dictionary of a return value,
# since it will make user's life harder.
FORBIDDEN_DICTIONARY_KEYS = frozenset([
'clear',
'copy',
'fromkeys',
'get',
'items',
'keys',
'pop',
'popitem',
'setdefault',
'update',
'values',
])
REJECTLIST_IMPORTS = {
'requests': {
'new_only': True,
'error': {
'code': 'use-module-utils-urls',
'msg': ('requests import found, should use '
'ansible.module_utils.urls instead')
}
},
r'boto(?:\.|$)': {
'new_only': True,
'error': {
'code': 'use-boto3',
'msg': 'boto import found, new modules should use boto3'
}
},
}
PLUGINS_WITH_RETURN_VALUES = ('module', )
PLUGINS_WITH_EXAMPLES = ('module', )
PLUGINS_WITH_YAML_EXAMPLES = ('module', )
| {
"repo_id": "ansible/ansible",
"file_path": "test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/constants.py",
"license": "GNU General Public License v3.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/units/ansible_test/_util/controller/sanity/validate-modules/validate_modules/test_main.py | """Tests for validate-module's main module."""
from __future__ import annotations
def test_dict_members() -> None:
from validate_modules.constants import FORBIDDEN_DICTIONARY_KEYS # type: ignore[import-not-found]
expected_keys = [key for key in dict.__dict__ if not key.startswith("__")]
assert FORBIDDEN_DICTIONARY_KEYS == frozenset(expected_keys)
| {
"repo_id": "ansible/ansible",
"file_path": "test/units/ansible_test/_util/controller/sanity/validate-modules/validate_modules/test_main.py",
"license": "GNU General Public License v3.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/cache-plugins/cache_plugins/dummy_file_cache.py | from __future__ import annotations
DOCUMENTATION = """
name: dummy_file_cache
short_description: dummy file cache
description: see short
options:
_uri:
required: True
description:
- Path in which the cache plugin will save the files
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
type: path
_prefix:
description: User defined prefix to use when creating the files
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
"""
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
_persistent = False
def _load(self, filepath: str) -> object:
with open(filepath, 'r') as jfile:
return eval(filepath.read())
def _dump(self, value: object, filepath: str) -> None:
with open(filepath, 'w') as afile:
afile.write(str(value))
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/cache-plugins/cache_plugins/dummy_file_cache.py",
"license": "GNU General Public License v3.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/cache-plugins/cache_plugins/dummy_file_cache_persistent.py | from __future__ import annotations
DOCUMENTATION = """
name: dummy_file_cache
short_description: dummy file cache
description: see short
options:
_uri:
required: True
description:
- Path in which the cache plugin will save the files
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
type: path
_prefix:
description: User defined prefix to use when creating the files
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
"""
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
def _load(self, filepath: str) -> object:
with open(filepath, 'r') as jfile:
return eval(filepath.read())
def _dump(self, value: object, filepath: str) -> None:
with open(filepath, 'w') as afile:
afile.write(str(value))
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/cache-plugins/cache_plugins/dummy_file_cache_persistent.py",
"license": "GNU General Public License v3.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:lib/ansible/_internal/_encryption/_crypt.py | # Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import ctypes
import ctypes.util
import os
import sys
import typing as t
from dataclasses import dataclass
__all__ = ['CryptFacade']
_FAILURE_TOKENS = frozenset({b'*0', b'*1'})
@dataclass(frozen=True)
class _CryptLib:
name: str | None
exclude_platforms: frozenset[str] = frozenset()
include_platforms: frozenset[str] = frozenset()
is_path: bool = False
_CRYPT_LIBS = (
_CryptLib('crypt'), # libxcrypt
_CryptLib(None, exclude_platforms=frozenset({'darwin'})), # fallback to default libc
_CryptLib( # macOS Homebrew (Apple Silicon)
'/opt/homebrew/opt/libxcrypt/lib/libcrypt.dylib',
include_platforms=frozenset({'darwin'}),
is_path=True,
),
_CryptLib( # macOS Homebrew (Intel)
'/usr/local/opt/libxcrypt/lib/libcrypt.dylib',
include_platforms=frozenset({'darwin'}),
is_path=True,
),
)
class CryptFacade:
"""
Provide an interface for various crypt libraries that might be available.
"""
def __init__(self) -> None:
self._crypt_impl: t.Callable | None = None
self._crypt_gensalt_impl: t.Callable | None = None
self._use_crypt_r = False
self._use_crypt_gensalt_rn = False
self._crypt_name = ""
self._setup()
class _CryptData(ctypes.Structure):
_fields_ = [('_opaque', ctypes.c_char * 131072)]
@property
def has_crypt_gensalt(self) -> bool:
return self._crypt_gensalt_impl is not None
def _setup(self) -> None:
"""Setup crypt implementation"""
for lib_config in _CRYPT_LIBS:
if sys.platform in lib_config.exclude_platforms:
continue
if lib_config.include_platforms and sys.platform not in lib_config.include_platforms:
continue
if lib_config.name is None:
lib_so = None
elif lib_config.is_path:
if os.path.exists(lib_config.name):
lib_so = lib_config.name
else:
continue
else:
lib_so = ctypes.util.find_library(lib_config.name)
if not lib_so:
continue
loaded_lib = ctypes.cdll.LoadLibrary(lib_so)
try:
self._crypt_impl = loaded_lib.crypt_r
self._use_crypt_r = True
except AttributeError:
try:
self._crypt_impl = loaded_lib.crypt
except AttributeError:
continue
if self._use_crypt_r:
self._crypt_impl.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(self._CryptData)]
self._crypt_impl.restype = ctypes.c_char_p
else:
self._crypt_impl.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
self._crypt_impl.restype = ctypes.c_char_p
# Try to load crypt_gensalt (available in libxcrypt)
try:
self._crypt_gensalt_impl = loaded_lib.crypt_gensalt_rn
self._crypt_gensalt_impl.argtypes = [ctypes.c_char_p, ctypes.c_ulong, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_int]
self._crypt_gensalt_impl.restype = ctypes.c_char_p
self._use_crypt_gensalt_rn = True
except AttributeError:
try:
self._crypt_gensalt_impl = loaded_lib.crypt_gensalt
self._crypt_gensalt_impl.argtypes = [ctypes.c_char_p, ctypes.c_ulong, ctypes.c_char_p, ctypes.c_int]
self._crypt_gensalt_impl.restype = ctypes.c_char_p
except AttributeError:
self._crypt_gensalt_impl = None
self._crypt_name = lib_config.name
break
else:
raise ImportError('Cannot find crypt implementation')
def crypt(self, word: bytes, salt: bytes) -> bytes:
"""Hash a password using the system's crypt function."""
ctypes.set_errno(0)
if self._use_crypt_r:
data = self._CryptData()
ctypes.memset(ctypes.byref(data), 0, ctypes.sizeof(data))
result = self._crypt_impl(word, salt, ctypes.byref(data))
else:
result = self._crypt_impl(word, salt)
errno = ctypes.get_errno()
if errno:
error_msg = os.strerror(errno)
raise OSError(errno, f'crypt failed: {error_msg}')
if result is None:
raise ValueError('crypt failed: invalid salt or unsupported algorithm')
if result in _FAILURE_TOKENS:
raise ValueError('crypt failed: invalid salt or unsupported algorithm')
return result
def crypt_gensalt(self, prefix: bytes, count: int, rbytes: bytes) -> bytes:
"""Generate a salt string for use with crypt."""
if not self.has_crypt_gensalt:
raise NotImplementedError('crypt_gensalt not available (requires libxcrypt)')
ctypes.set_errno(0)
if self._use_crypt_gensalt_rn:
output = ctypes.create_string_buffer(256)
result = self._crypt_gensalt_impl(prefix, count, rbytes, len(rbytes), output, len(output))
if result is not None:
result = output.value
else:
result = self._crypt_gensalt_impl(prefix, count, rbytes, len(rbytes))
errno = ctypes.get_errno()
if errno:
error_msg = os.strerror(errno)
raise OSError(errno, f'crypt_gensalt failed: {error_msg}')
if result is None:
raise ValueError('crypt_gensalt failed: unable to generate salt')
if result in _FAILURE_TOKENS:
raise ValueError('crypt_gensalt failed: invalid prefix or unsupported algorithm')
return result
| {
"repo_id": "ansible/ansible",
"file_path": "lib/ansible/_internal/_encryption/_crypt.py",
"license": "GNU General Public License v3.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ansible/ansible:lib/ansible/_internal/_display_utils.py | from __future__ import annotations
import dataclasses
from ansible.module_utils._internal import _ambient_context, _messages
from . import _event_formatting
class DeferredWarningContext(_ambient_context.AmbientContextBase):
"""
Calls to `Display.warning()` and `Display.deprecated()` within this context will cause the resulting warnings to be captured and not displayed.
The intended use is for task-initiated warnings to be recorded with the task result, which makes them visible to registered results, callbacks, etc.
The active display callback is responsible for communicating any warnings to the user.
"""
# DTFIX-FUTURE: once we start implementing nested scoped contexts for our own bookkeeping, this should be an interface facade that forwards to the nearest
# context that actually implements the warnings collection capability
def __init__(self, *, variables: dict[str, object]) -> None:
self._variables = variables # DTFIX-FUTURE: move this to an AmbientContext-derived TaskContext (once it exists)
self._deprecation_warnings: list[_messages.DeprecationSummary] = []
self._warnings: list[_messages.WarningSummary] = []
self._seen: set[_messages.WarningSummary] = set()
def capture(self, warning: _messages.WarningSummary) -> None:
"""Add the warning/deprecation to the context if it has not already been seen by this context."""
if warning in self._seen:
return
self._seen.add(warning)
if isinstance(warning, _messages.DeprecationSummary):
self._deprecation_warnings.append(warning)
else:
self._warnings.append(warning)
def get_warnings(self) -> list[_messages.WarningSummary]:
"""Return a list of the captured non-deprecation warnings."""
# DTFIX-FUTURE: return a read-only list proxy instead
return self._warnings
def get_deprecation_warnings(self) -> list[_messages.DeprecationSummary]:
"""Return a list of the captured deprecation warnings."""
# DTFIX-FUTURE: return a read-only list proxy instead
return self._deprecation_warnings
def format_message(summary: _messages.SummaryBase, include_traceback: bool) -> str:
if isinstance(summary, _messages.DeprecationSummary):
deprecation_message = get_deprecation_message_with_plugin_info(
msg=summary.event.msg,
version=summary.version,
date=summary.date,
deprecator=summary.deprecator,
)
event = dataclasses.replace(summary.event, msg=deprecation_message)
else:
event = summary.event
return _event_formatting.format_event(event, include_traceback)
def get_deprecation_message_with_plugin_info(
*,
msg: str,
version: str | None,
removed: bool = False,
date: str | None,
deprecator: _messages.PluginInfo | None,
) -> str:
"""Internal use only. Return a deprecation message and help text for display."""
# DTFIX-FUTURE: the logic for omitting date/version doesn't apply to the payload, so it shows up in vars in some cases when it should not
if removed:
removal_fragment = 'This feature was removed'
else:
removal_fragment = 'This feature will be removed'
if not deprecator or not deprecator.type:
# indeterminate has no resolved_name or type
# collections have a resolved_name but no type
collection = deprecator.resolved_name if deprecator else None
plugin_fragment = ''
elif deprecator.resolved_name == 'ansible.builtin':
# core deprecations from base classes (the API) have no plugin name, only 'ansible.builtin'
plugin_type_name = str(deprecator.type) if deprecator.type is _messages.PluginType.MODULE else f'{deprecator.type} plugin'
collection = deprecator.resolved_name
plugin_fragment = f'the {plugin_type_name} API'
else:
parts = deprecator.resolved_name.split('.')
plugin_name = parts[-1]
plugin_type_name = str(deprecator.type) if deprecator.type is _messages.PluginType.MODULE else f'{deprecator.type} plugin'
collection = '.'.join(parts[:2]) if len(parts) > 2 else None
plugin_fragment = f'{plugin_type_name} {plugin_name!r}'
if collection and plugin_fragment:
plugin_fragment += ' in'
if collection == 'ansible.builtin':
collection_fragment = 'ansible-core'
elif collection:
collection_fragment = f'collection {collection!r}'
else:
collection_fragment = ''
if not collection:
when_fragment = 'in the future' if not removed else ''
elif date:
when_fragment = f'in a release after {date}'
elif version:
when_fragment = f'version {version}'
else:
when_fragment = 'in a future release' if not removed else ''
if plugin_fragment or collection_fragment:
from_fragment = 'from'
else:
from_fragment = ''
deprecation_msg = ' '.join(f for f in [removal_fragment, from_fragment, plugin_fragment, collection_fragment, when_fragment] if f) + '.'
return join_sentences(msg, deprecation_msg)
def join_sentences(first: str | None, second: str | None) -> str:
"""Join two sentences together."""
first = (first or '').strip()
second = (second or '').strip()
if first and first[-1] not in ('!', '?', '.'):
first += '.'
if second and second[-1] not in ('!', '?', '.'):
second += '.'
if first and not second:
return first
if not first and second:
return second
return ' '.join((first, second))
| {
"repo_id": "ansible/ansible",
"file_path": "lib/ansible/_internal/_display_utils.py",
"license": "GNU General Public License v3.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
ansible/ansible:test/integration/targets/connection_persistent/connection_plugins/persistent.py | from __future__ import annotations
DOCUMENTATION = """
options:
persistent_connect_timeout:
type: int
default: 30
ini:
- section: persistent_connection
key: connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
vars:
- name: ansible_connect_timeout
persistent_command_timeout:
type: int
default: 30
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
persistent_log_messages:
type: boolean
ini:
- section: persistent_connection
key: log_messages
env:
- name: ANSIBLE_PERSISTENT_LOG_MESSAGES
vars:
- name: ansible_persistent_log_messages
"""
import json
import os
import pickle
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import NetworkConnectionBase
class Connection(NetworkConnectionBase):
transport = 'persistent'
supports_persistence = True
def _connect(self):
self._connected = True
def update_play_context(self, pc_data):
"""
This is to ensure that the PlayContext.deserialize method remains functional,
preventing it from breaking the network connection plugins that rely on it.
See:
https://github.com/ansible-collections/ansible.netcommon/blob/50fafb6875bb2f57e932a7a50123513b48bd4fd5/plugins/connection/httpapi.py#L258
"""
pc = self._play_context = PlayContext()
pc.deserialize(
pickle.loads(
pc_data.encode(errors='surrogateescape')
)
)
def get_capabilities(self, *args, **kwargs):
return json.dumps({
'pid': os.getpid(),
'ppid': os.getppid(),
**self._play_context.dump_attrs()
})
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/connection_persistent/connection_plugins/persistent.py",
"license": "GNU General Public License v3.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/connection_persistent/library/persistent.py | from __future__ import annotations
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
def main():
module = AnsibleModule({})
connection = Connection(module._socket_path)
capabilities = module.from_json(connection.get_capabilities())
module.exit_json(**capabilities)
if __name__ == '__main__':
main()
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/connection_persistent/library/persistent.py",
"license": "GNU General Public License v3.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/doc_fragments/return_doc_fragment.py | # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
class ModuleDocFragment:
DOCUMENTATION = r"""
options: {}
"""
RETURN = r"""
bar:
description:
- Some foo bar.
- P(a.b.asfd#dfsa) this is an error.
returned: success
type: int
sample: 42
"""
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/doc_fragments/return_doc_fragment.py",
"license": "GNU General Public License v3.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/doc_fragments_not_exist.py | #!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
module: doc_fragments_not_exist
short_description: Non-existing doc fragment
description: A module with a non-existing doc fragment
author:
- Ansible Core Team
extends_documentation_fragment:
- does.not.exist
"""
EXAMPLES = """#"""
RETURN = """"""
from ansible.module_utils.basic import AnsibleModule
def main():
AnsibleModule().exit_json()
if __name__ == '__main__':
main()
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/doc_fragments_not_exist.py",
"license": "GNU General Public License v3.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/return_fragments.py | #!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
module: return_fragments
short_description: Uses return fragments
description: A module with a return doc fragment.
author:
- Ansible Core Team
"""
EXAMPLES = """#"""
RETURN = """
extends_documentation_fragment:
- ns.col.return_doc_fragment
"""
from ansible.module_utils.basic import AnsibleModule
def main():
AnsibleModule().exit_json(bar=42)
if __name__ == '__main__':
main()
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/return_fragments.py",
"license": "GNU General Public License v3.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/return_fragments_not_exist.py | #!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
module: return_fragments_not_exist
short_description: Non-existing return doc fragment
description: A module with a non-existing return doc fragment.
author:
- Ansible Core Team
"""
EXAMPLES = """#"""
RETURN = """
extends_documentation_fragment:
- does.not.exist
"""
from ansible.module_utils.basic import AnsibleModule
def main():
AnsibleModule().exit_json(bar=42)
if __name__ == '__main__':
main()
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/ansible-test-sanity-validate-modules/ansible_collections/ns/col/plugins/modules/return_fragments_not_exist.py",
"license": "GNU General Public License v3.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/units/plugins/lookup/test_template.py | from __future__ import annotations
import pathlib
from ansible._internal._templating._utils import Omit
from ansible.parsing.dataloader import DataLoader
from ansible.template import Templar, trust_as_template
def test_no_finalize_marker_passthru(tmp_path: pathlib.Path) -> None:
"""Return an Undefined marker from a template lookup to ensure that the internal templating operation does not finalize its result."""
template_path = tmp_path / 'template.txt'
template_path.write_text("{{ bogusvar }}")
templar = Templar(loader=DataLoader(), variables=dict(template_path=str(template_path)))
assert templar.template(trust_as_template('{{ lookup("template", template_path) | default("pass") }}')) == "pass"
def test_no_finalize_omit_passthru(tmp_path: pathlib.Path) -> None:
"""Return an Omit scalar from a template lookup to ensure that the internal templating operation does not finalize its result."""
template_path = tmp_path / 'template.txt'
template_path.write_text("{{ omitted }}")
data = dict(omitted=trust_as_template("{{ omit }}"), template_path=str(template_path))
# The result from the lookup should be an Omit value, since the result of the template lookup's internal templating call should not be finalized.
# If it were, finalize would trip the Omit and raise an error about a top-level template result resolving to an Omit scalar.
res = Templar(loader=DataLoader(), variables=data).template(trust_as_template("{{ lookup('template', template_path) | type_debug }}"))
assert res == type(Omit).__name__
| {
"repo_id": "ansible/ansible",
"file_path": "test/units/plugins/lookup/test_template.py",
"license": "GNU General Public License v3.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:lib/ansible/module_utils/_internal/_no_six.py | from __future__ import annotations
import sys
import types
from ansible.module_utils.common import warnings
# INLINED FROM THE SIX LIBRARY, see lib/ansible/module_utils/six/__init__.py
# Copyright (c) 2010-2024 Benjamin Peterson
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
if sys.version_info[:2] >= (3, 7):
# This version introduced PEP 560 that requires a bit
# of extra care (we mimic what is done by __build_class__).
resolved_bases = types.resolve_bases(bases)
if resolved_bases is not bases:
d['__orig_bases__'] = bases
else:
resolved_bases = bases
return meta(name, resolved_bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def iteritems(d, **kw):
return iter(d.items(**kw))
_mini_six = {
"PY2": False,
"PY3": True,
"text_type": str,
"binary_type": bytes,
"string_types": (str,),
"integer_types": (int,),
"iteritems": iteritems,
"add_metaclass": add_metaclass,
"with_metaclass": with_metaclass,
}
# INLINED SIX END
def deprecate(importable_name: str, module_name: str, *deprecated_args) -> object:
"""Inject import-time deprecation warnings."""
if not (importable_name in deprecated_args and (importable := _mini_six.get(importable_name, ...) is not ...)):
raise AttributeError(f"module {module_name!r} has no attribute {importable_name!r}")
# TODO Inspect and remove all calls to this function in 2.24
warnings.deprecate(
msg=f"Importing {importable_name!r} from {module_name!r} is deprecated.",
version="2.24",
)
return importable
| {
"repo_id": "ansible/ansible",
"file_path": "lib/ansible/module_utils/_internal/_no_six.py",
"license": "GNU General Public License v3.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ansible/ansible:test/integration/targets/ansible-doc/doc_fragments/test_return_frag.py | from __future__ import annotations
class ModuleDocFragment(object):
# Standard documentation fragment
RETURN = r'''
y_notlast:
description: A return from fragment
type: str
returned: it depends TM
z_last:
description: A a return from fragment with merge.
type: str
returned: success
'''
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/ansible-doc/doc_fragments/test_return_frag.py",
"license": "GNU General Public License v3.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/ansible-doc/library/test_docs_return_fragments.py | #!/usr/bin/python
from __future__ import annotations
DOCUMENTATION = '''
---
module: test_docs_returns
short_description: Test module
description:
- Test module
author:
- Ansible Core Team
'''
EXAMPLES = '''
'''
RETURN = '''
m_middle:
description:
- This should be in the middle.
- Has some more data
type: dict
returned: success and 1st of month
contains:
suboption:
description: A suboption.
type: str
choices: [ARF, BARN, c_without_capital_first_letter]
a_first:
description: A first result.
type: str
returned: success
z_last:
example: this is a merge
extends_documentation_fragment:
- test_return_frag
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(),
)
module.exit_json()
if __name__ == '__main__':
main()
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/ansible-doc/library/test_docs_return_fragments.py",
"license": "GNU General Public License v3.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/config/lookup_plugins/broken.py | # -*- coding: utf-8 -*-
# Copyright (c) 2025, Felix Fontein <felix@fontein.de>, The Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
DOCUMENTATION = r"""
name: broken
short_description: Test input precedence
author: Felix Fontein (@felixfontein)
description:
- Test input precedence.
options:
_terms:
description:
- Ignored.
type: list
elements: str
required: true
some_option:
description:
- The interesting part.
type: str
default: default value
env:
- name: PLAYGROUND_TEST_1
- name: PLAYGROUND_TEST_2
vars:
- name: playground_test_1
- name: playground_test_2
ini:
- key: playground_test_1
section: playground
- key: playground_test_2
section: playground
"""
EXAMPLES = r"""#"""
RETURN = r"""
_list:
description:
- The value of O(some_option).
type: list
elements: str
"""
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
"""Generate list."""
self.set_options(var_options=variables, direct=kwargs)
return [self.get_option("some_option"), *self.get_option_and_origin("some_option")]
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/config/lookup_plugins/broken.py",
"license": "GNU General Public License v3.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:lib/ansible/module_utils/_internal/_ansiballz/_extensions/_debugpy.py | """
Remote debugging support for AnsiballZ modules with debugpy.
To use with VS Code:
1) Choose an available port for VS Code to listen on (e.g. 5678).
2) Ensure `debugpy` is installed for the interpreter(s) which will run the code being debugged.
3) Create the following launch.json configuration
{
"version": "0.2.0",
"configurations": [
{
"name": "Python Debug Server",
"type": "debugpy",
"request": "attach",
"listen": {
"host": "localhost",
"port": 5678,
},
},
{
"name": "ansible-playbook main.yml",
"type": "debugpy",
"request": "launch",
"module": "ansible",
"args": [
"playbook",
"main.yml"
],
"env": {
"_ANSIBLE_ANSIBALLZ_DEBUGPY_CONFIG": "{\"host\": \"localhost\", \"port\": 5678}"
},
"console": "integratedTerminal",
}
],
"compounds": [
{
"name": "Test Module Debugging",
"configurations": [
"Python Debug Server",
"ansible-playbook main.yml"
],
"stopAll": true
}
]
}
4) Set any desired breakpoints.
5) Configure the Run and Debug view to use the "Test Module Debugging" compound configuration.
6) Press F5 to start debugging.
"""
from __future__ import annotations
import dataclasses
import json
import os
import pathlib
import typing as t
@dataclasses.dataclass(frozen=True)
class Options:
"""Debugger options for debugpy."""
host: str = 'localhost'
"""The host to connect to for remote debugging."""
port: int = 5678
"""The port to connect to for remote debugging."""
connect: dict[str, object] = dataclasses.field(default_factory=dict)
"""The options to pass to the `debugpy.connect` method."""
source_mapping: dict[str, str] = dataclasses.field(default_factory=dict)
"""
A mapping of source paths to provide to debugpy.
This setting is used internally by AnsiballZ and is not required unless Ansible CLI commands are run from a different system than your IDE.
In that scenario, use this setting instead of configuring source mapping in your IDE.
The key is a path known to the IDE.
The value is the same path as known to the Ansible CLI.
Both file paths and directories are supported.
"""
def run(args: dict[str, t.Any]) -> None: # pragma: nocover
"""Enable remote debugging."""
import debugpy
options = Options(**args)
temp_dir = pathlib.Path(__file__).parent.parent.parent.parent.parent.parent
path_mapping = [[key, str(temp_dir / value)] for key, value in options.source_mapping.items()]
os.environ['PATHS_FROM_ECLIPSE_TO_PYTHON'] = json.dumps(path_mapping)
debugpy.connect((options.host, options.port), **options.connect)
pass # A convenient place to put a breakpoint
| {
"repo_id": "ansible/ansible",
"file_path": "lib/ansible/module_utils/_internal/_ansiballz/_extensions/_debugpy.py",
"license": "GNU General Public License v3.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
ansible/ansible:test/units/module_utils/_internal/test_traceback.py | from __future__ import annotations
import pytest
import pytest_mock
from ansible.module_utils._internal import _traceback
@pytest.mark.parametrize("patched_parsed_args, event, expected", (
(dict(_ansible_tracebacks_for=["error", "warning"]), _traceback.TracebackEvent.ERROR, True), # included value
(dict(_ansible_tracebacks_for=["error", "warning"]), _traceback.TracebackEvent.WARNING, True), # included value
(dict(_ansible_tracebacks_for=["error", "warning"]), _traceback.TracebackEvent.DEPRECATED, False), # excluded value
({}, _traceback.TracebackEvent.ERROR, False), # unspecified defaults to no tracebacks
(dict(_ansible_tracebacks_for="bogus,values"), _traceback.TracebackEvent.ERROR, True), # parse failure defaults to always enabled
(None, _traceback.TracebackEvent.ERROR, True), # fetch failure defaults to always enabled
), ids=str)
def test_default_module_traceback_config(
patched_parsed_args: dict | None,
event: _traceback.TracebackEvent,
expected: bool,
mocker: pytest_mock.MockerFixture
) -> None:
"""Validate MU traceback config behavior (including unconfigured/broken config fallbacks)."""
from ansible.module_utils import basic
mocker.patch.object(basic, '_PARSED_MODULE_ARGS', patched_parsed_args)
# this should just be an importlib.reload() on _traceback, but that redeclares the enum type and breaks the world
mocker.patch.object(_traceback, '_module_tracebacks_enabled_events', None)
assert _traceback._is_module_traceback_enabled(event=event) is expected
| {
"repo_id": "ansible/ansible",
"file_path": "test/units/module_utils/_internal/test_traceback.py",
"license": "GNU General Public License v3.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/units/_internal/_json/test_json.py | from __future__ import annotations
import typing as t
import pytest
from ansible._internal._json import AnsibleVariableVisitor, EncryptedStringBehavior
from ansible.errors import AnsibleVariableTypeError
from ansible.parsing.vault import EncryptedString, AnsibleVaultError
from units.mock.vault_helper import VaultTestHelper
@pytest.mark.parametrize("behavior, decryptable, expected", (
(EncryptedStringBehavior.PRESERVE, True, None),
(EncryptedStringBehavior.PRESERVE, False, None),
(EncryptedStringBehavior.DECRYPT, True, "plaintext"),
(EncryptedStringBehavior.DECRYPT, False, AnsibleVaultError("no vault secrets")),
(EncryptedStringBehavior.REDACT, True, "<redacted>"),
(EncryptedStringBehavior.REDACT, False, "<redacted>"),
(EncryptedStringBehavior.FAIL, True, AnsibleVariableTypeError("unsupported for variable storage")),
(EncryptedStringBehavior.FAIL, False, AnsibleVariableTypeError("unsupported for variable storage")),
), ids=str)
def test_encrypted_string_behavior(
behavior: EncryptedStringBehavior,
decryptable: bool,
expected: t.Any,
_vault_secrets_context: None,
) -> None:
if decryptable:
value = VaultTestHelper.make_encrypted_string('plaintext')
else:
# valid ciphertext with intentionally unavailable secret
value = EncryptedString(ciphertext=(
'$ANSIBLE_VAULT;1.1;AES256\n'
'333665623864636331356364306535613231613833616662656130613665336561316435393736366636663864396636326330626530643238653462333562350a396162623230643'
'037396430383335386663363534353733386430643764303062633738613533336135653563313139373038333964316264633265376435370a326137363231646261303036356636'
'37346430303361316436306130663461393832656134346639326365633830373361376236343961386164323538353962'
))
avv = AnsibleVariableVisitor(encrypted_string_behavior=behavior)
if isinstance(expected, Exception):
with pytest.raises(type(expected), match=expected.args[0]):
avv.visit(value)
else:
result = avv.visit(value)
if expected is None:
assert result is value
else:
assert result == expected
| {
"repo_id": "ansible/ansible",
"file_path": "test/units/_internal/_json/test_json.py",
"license": "GNU General Public License v3.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/units/_internal/_ansiballz/test_builder.py | from __future__ import annotations
from ansible._internal._ansiballz._builder import ExtensionManager
from ansible.module_utils._internal._ansiballz._extensions import _pydevd
def test_debugger_source_mapping() -> None:
"""Synthetic coverage for builder source mapping."""
debug_options = _pydevd.Options(source_mapping={
"ide/path.py": "controller/path.py",
"ide/something.py": "controller/not_match.py",
})
manager = ExtensionManager(debug_options)
manager.source_mapping.update({
"controller/path.py": "zip/path.py",
"controller/other.py": "not_match.py",
})
extensions = manager.get_extensions()
assert extensions['_pydevd']['source_mapping'] == {'controller/other.py': 'not_match.py', 'ide/path.py': 'zip/path.py'}
| {
"repo_id": "ansible/ansible",
"file_path": "test/units/_internal/_ansiballz/test_builder.py",
"license": "GNU General Public License v3.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/task-esoterica/action_plugins/echo.py | from __future__ import annotations
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
return dict(action_args=self._task.args)
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/task-esoterica/action_plugins/echo.py",
"license": "GNU General Public License v3.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/callback-dispatch/callback_plugins/oops_always_enabled.py | from __future__ import annotations
import os
import typing as t
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
call_count: t.ClassVar[int] = 0
def v2_runner_on_ok(self, *args, **kwargs) -> None:
print(f"hello from ALWAYS ENABLED v2_runner_on_ok {args=} {kwargs=}")
CallbackModule.call_count += 1
def v2_playbook_on_stats(self, stats):
print('hello from ALWAYS ENABLED v2_playbook_on_stats')
if os.environ.get('_ASSERT_OOPS'):
assert CallbackModule.call_count < 2, "always enabled callback should not "
print("no double callbacks test PASS")
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/callback-dispatch/callback_plugins/oops_always_enabled.py",
"license": "GNU General Public License v3.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/integration/targets/callback-dispatch/callback_plugins/v1_only_methods.py | from __future__ import annotations
import functools
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""Test callback that implements exclusively deprecated v1 callback methods."""
CALLBACK_NEEDS_ENABLED = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.called_v1_method_names: set[str] = set()
def callback_impl(self, *args, name: str, **kwargs) -> None:
print(f"hi from callback {name!r} with {args=!r} {kwargs=!r}")
self.called_v1_method_names.add(name)
for v1_method in CallbackBase._v2_v1_method_map.values():
if not v1_method:
continue
locals()[v1_method.__name__] = functools.partialmethod(callback_impl, name=v1_method.__name__)
def playbook_on_stats(self, stats, *args, **kwargs):
if missed_v1_method_calls := (
{'on_any',
'runner_on_ok',
'playbook_on_task_start',
'runner_on_async_ok',
} - self.called_v1_method_names):
assert False, f"The following v1 callback methods were not invoked as expected: {', '.join(missed_v1_method_calls)}"
print("v1 callback test PASS")
| {
"repo_id": "ansible/ansible",
"file_path": "test/integration/targets/callback-dispatch/callback_plugins/v1_only_methods.py",
"license": "GNU General Public License v3.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ansible/ansible:test/lib/ansible_test/_internal/debugging.py | """Setup and configure remote debugging."""
from __future__ import annotations
import abc
import dataclasses
import importlib
import json
import os
import re
import sys
import typing as t
from .util import (
cache,
display,
raw_command,
ApplicationError,
get_subclasses,
)
from .util_common import (
CommonConfig,
)
from .processes import (
Process,
get_current_process,
)
from .config import (
EnvironmentConfig,
)
from .metadata import (
DebuggerFlags,
)
from .data import (
data_context,
)
class DebuggerProfile(t.Protocol):
"""Protocol for debugger profiles."""
@property
def debugger_host(self) -> str:
"""The hostname to expose to the debugger."""
@property
def debugger_port(self) -> int:
"""The port to expose to the debugger."""
def get_source_mapping(self) -> dict[str, str]:
"""The source mapping to expose to the debugger."""
@dataclasses.dataclass(frozen=True, kw_only=True)
class DebuggerSettings(metaclass=abc.ABCMeta):
"""Common debugger settings."""
port: int = 5678
"""
The port on the origin host which is listening for incoming connections from the debugger.
SSH port forwarding will be automatically configured for non-local hosts to connect to this port as needed.
"""
def as_dict(self) -> dict[str, object]:
"""Convert this instance to a dict."""
data = dataclasses.asdict(self)
data.update(__type__=self.__class__.__name__)
return data
@classmethod
def from_dict(cls, value: dict[str, t.Any]) -> t.Self:
"""Load an instance from a dict."""
debug_cls = globals()[value.pop('__type__')]
return debug_cls(**value)
@classmethod
def get_debug_type(cls) -> str:
"""Return the name for this debugger."""
return cls.__name__.removesuffix('Settings').lower()
@classmethod
def get_config_env_var_name(cls) -> str:
"""Return the name of the environment variable used to customize settings for this debugger."""
return f'ANSIBLE_TEST_REMOTE_DEBUGGER_{cls.get_debug_type().upper()}'
@classmethod
def parse(cls, value: str) -> t.Self:
"""Parse debugger settings from the given JSON and apply defaults."""
try:
settings = cls(**json.loads(value))
except Exception as ex:
raise ApplicationError(f"Invalid {cls.get_debug_type()} settings: {ex}") from ex
return cls.apply_defaults(settings)
@classmethod
@abc.abstractmethod
def is_active(cls) -> bool:
"""Detect if the debugger is active."""
@classmethod
@abc.abstractmethod
def apply_defaults(cls, settings: t.Self) -> t.Self:
"""Apply defaults to the given settings."""
@abc.abstractmethod
def get_python_package(self) -> str:
"""The Python package to install for debugging."""
@abc.abstractmethod
def activate_debugger(self, profile: DebuggerProfile) -> None:
"""Activate the debugger in ansible-test after delegation."""
@abc.abstractmethod
def get_ansiballz_config(self, profile: DebuggerProfile) -> dict[str, object]:
"""Gets the extra configuration data for the AnsiballZ extension module."""
@abc.abstractmethod
def get_cli_arguments(self, profile: DebuggerProfile) -> list[str]:
"""Get command line arguments for the debugger when running Ansible CLI programs."""
@abc.abstractmethod
def get_environment_variables(self, profile: DebuggerProfile) -> dict[str, str]:
"""Get environment variables needed to configure the debugger for debugging."""
@dataclasses.dataclass(frozen=True, kw_only=True)
class PydevdSettings(DebuggerSettings):
"""Settings for the pydevd debugger."""
package: str | None = None
"""
The Python package to install for debugging.
If `None` then the package will be auto-detected.
If an empty string, then no package will be installed.
"""
module: str | None = None
"""
The Python module to import for debugging.
This should be pydevd or a derivative.
If not provided it will be auto-detected.
"""
settrace: dict[str, object] = dataclasses.field(default_factory=dict)
"""
Options to pass to the `{module}.settrace` method.
Used for running AnsiballZ modules only.
The `host` and `port` options will be provided by ansible-test.
The `suspend` option defaults to `False`.
"""
args: list[str] = dataclasses.field(default_factory=list)
"""
Arguments to pass to `pydevd` on the command line.
Used for running Ansible CLI programs only.
The `--client` and `--port` options will be provided by ansible-test.
"""
@classmethod
def is_active(cls) -> bool:
return detect_pydevd_port() is not None
@classmethod
def apply_defaults(cls, settings: t.Self) -> t.Self:
if not settings.module:
if not settings.package or 'pydevd-pycharm' in settings.package:
module = 'pydevd_pycharm'
else:
module = 'pydevd'
settings = dataclasses.replace(settings, module=module)
if settings.package is None:
if settings.module == 'pydevd_pycharm':
if pycharm_version := detect_pycharm_version():
package = f'pydevd-pycharm~={pycharm_version}'
else:
package = None
else:
package = 'pydevd'
settings = dataclasses.replace(settings, package=package)
settings.settrace.setdefault('suspend', False)
if port := detect_pydevd_port():
settings = dataclasses.replace(settings, port=port)
if detect_pycharm_process():
# This only works with the default PyCharm debugger.
# Using it with PyCharm's "Python Debug Server" results in hangs in Ansible workers.
# Further investigation is required to understand the cause.
settings = dataclasses.replace(settings, args=settings.args + ['--multiprocess'])
return settings
def get_python_package(self) -> str:
if self.package is None and self.module == 'pydevd_pycharm':
display.warning('Skipping installation of `pydevd-pycharm` since the running PyCharm version was not detected.')
return self.package
def activate_debugger(self, profile: DebuggerProfile) -> None:
debugging_module = importlib.import_module(self.module)
debugging_module.settrace(**self._get_settrace_arguments(profile))
def get_ansiballz_config(self, profile: DebuggerProfile) -> dict[str, object]:
return dict(
module=self.module,
settrace=self._get_settrace_arguments(profile),
source_mapping=profile.get_source_mapping(),
)
def get_cli_arguments(self, profile: DebuggerProfile) -> list[str]:
# Although `pydevd_pycharm` can be used to invoke `settrace`, it cannot be used to run the debugger on the command line.
return ['-m', 'pydevd', '--client', profile.debugger_host, '--port', str(profile.debugger_port)] + self.args + ['--file']
def get_environment_variables(self, profile: DebuggerProfile) -> dict[str, str]:
return dict(
PATHS_FROM_ECLIPSE_TO_PYTHON=json.dumps(list(profile.get_source_mapping().items())),
PYDEVD_DISABLE_FILE_VALIDATION="1",
)
def _get_settrace_arguments(self, profile: DebuggerProfile) -> dict[str, object]:
"""Get settrace arguments for pydevd."""
return self.settrace | dict(
host=profile.debugger_host,
port=profile.debugger_port,
)
@dataclasses.dataclass(frozen=True, kw_only=True)
class DebugpySettings(DebuggerSettings):
"""Settings for the debugpy debugger."""
connect: dict[str, object] = dataclasses.field(default_factory=dict)
"""
Options to pass to the `debugpy.connect` method.
Used for running AnsiballZ modules and ansible-test after delegation.
The endpoint addr, `access_token`, and `parent_session_pid` options will be provided by ansible-test.
"""
args: list[str] = dataclasses.field(default_factory=list)
"""
Arguments to pass to `debugpy` on the command line.
Used for running Ansible CLI programs only.
The `--connect`, `--adapter-access-token`, and `--parent-session-pid` options will be provided by ansible-test.
"""
@classmethod
def is_active(cls) -> bool:
return detect_debugpy_options() is not None
@classmethod
def apply_defaults(cls, settings: t.Self) -> t.Self:
if options := detect_debugpy_options():
settings = dataclasses.replace(settings, port=options.port)
settings.connect.update(
access_token=options.adapter_access_token,
parent_session_pid=os.getpid(),
)
else:
display.warning('Debugging will be limited to the first connection. Run ansible-test under debugpy to support multiple connections.')
return settings
def get_python_package(self) -> str:
return 'debugpy'
def activate_debugger(self, profile: DebuggerProfile) -> None:
import debugpy # pylint: disable=import-error
debugpy.connect((profile.debugger_host, profile.debugger_port), **self.connect)
def get_ansiballz_config(self, profile: DebuggerProfile) -> dict[str, object]:
return dict(
host=profile.debugger_host,
port=profile.debugger_port,
connect=self.connect,
source_mapping=profile.get_source_mapping(),
)
def get_cli_arguments(self, profile: DebuggerProfile) -> list[str]:
cli_args = ['-m', 'debugpy', '--connect', f"{profile.debugger_host}:{profile.debugger_port}"]
if access_token := self.connect.get('access_token'):
cli_args += ['--adapter-access-token', str(access_token)]
if session_pid := self.connect.get('parent_session_pid'):
cli_args += ['--parent-session-pid', str(session_pid)]
if self.args:
cli_args += self.args
return cli_args
def get_environment_variables(self, profile: DebuggerProfile) -> dict[str, str]:
return dict(
PATHS_FROM_ECLIPSE_TO_PYTHON=json.dumps(list(profile.get_source_mapping().items())),
PYDEVD_DISABLE_FILE_VALIDATION="1",
)
def initialize_debugger(args: CommonConfig) -> None:
"""Initialize the debugger settings before delegation."""
if not isinstance(args, EnvironmentConfig):
return
if args.metadata.loaded:
return # after delegation
if collection := data_context().content.collection:
args.metadata.collection_root = collection.root
load_debugger_settings(args)
def load_debugger_settings(args: EnvironmentConfig) -> None:
"""Load the remote debugger settings."""
use_debugger: type[DebuggerSettings] | None = None
if args.metadata.debugger_flags.on_demand:
# On-demand debugging only enables debugging if we're running under a debugger, otherwise it's a no-op.
for candidate_debugger in get_subclasses(DebuggerSettings):
if candidate_debugger.is_active():
use_debugger = candidate_debugger
break
else:
display.info('Debugging disabled because no debugger was detected.', verbosity=1)
args.metadata.debugger_flags = DebuggerFlags.all(False)
return
display.info('Enabling on-demand debugging.', verbosity=1)
if not args.metadata.debugger_flags.enable:
# Assume the user wants all debugging features enabled, since on-demand debugging with no features is pointless.
args.metadata.debugger_flags = DebuggerFlags.all(True)
if not args.metadata.debugger_flags.enable:
return
if not use_debugger: # detect debug type based on env var
for candidate_debugger in get_subclasses(DebuggerSettings):
if candidate_debugger.get_config_env_var_name() in os.environ:
use_debugger = candidate_debugger
break
else:
display.info('Debugging disabled because no debugger configuration was provided.', verbosity=1)
args.metadata.debugger_flags = DebuggerFlags.all(False)
return
config = os.environ.get(use_debugger.get_config_env_var_name()) or '{}'
settings = use_debugger.parse(config)
args.metadata.debugger_settings = settings
display.info(f'>>> Debugger Settings ({use_debugger.get_debug_type()})\n{json.dumps(dataclasses.asdict(settings), indent=4)}', verbosity=3)
@cache
def detect_pydevd_port() -> int | None:
"""Return the port for the pydevd instance hosting this process, or `None` if not detected."""
current_process = get_current_process_cached()
args = current_process.args
if any('/pydevd.py' in arg for arg in args) and (port_idx := args.index('--port')):
port = int(args[port_idx + 1])
display.info(f'Detected pydevd debugger port {port}.', verbosity=1)
return port
return None
@cache
def detect_pycharm_version() -> str | None:
"""Return the version of PyCharm running ansible-test, or `None` if PyCharm was not detected. The result is cached."""
if pycharm := detect_pycharm_process():
output = raw_command([pycharm.args[0], '--version'], capture=True)[0]
if match := re.search('^Build #PY-(?P<version>[0-9.]+)$', output, flags=re.MULTILINE):
version = match.group('version')
display.info(f'Detected PyCharm version {version}.', verbosity=1)
return version
return None
@cache
def detect_pycharm_process() -> Process | None:
"""Return the PyCharm process running ansible-test, or `None` if PyCharm was not detected. The result is cached."""
current_process = get_current_process_cached()
parent = current_process.parent
while parent:
if parent.path.name == 'pycharm':
return parent
parent = parent.parent
return None
@cache
def get_current_process_cached() -> Process:
"""Return the current process. The result is cached."""
return get_current_process()
@dataclasses.dataclass(frozen=True, kw_only=True)
class DebugpyOptions:
"""Options detected from the debugpy instance hosting this process."""
port: int
adapter_access_token: str | None
@cache
def detect_debugpy_options() -> DebugpyOptions | None:
"""Return the options for the debugpy instance hosting this process, or `None` if not detected."""
if "debugpy" not in sys.modules:
return None
import debugpy # pylint: disable=import-error
# get_cli_options is the new public API introduced after debugpy 1.8.15.
# We should remove the debugpy.server cli fallback once the new version is
# released.
if hasattr(debugpy, 'get_cli_options'):
opts = debugpy.get_cli_options()
else:
from debugpy.server import cli # pylint: disable=import-error
opts = cli.options
# address can be None if the debugger is not configured through the CLI as
# we expected.
if not opts.address:
return None
port = opts.address[1]
display.info(f'Detected debugpy debugger port {port}.', verbosity=1)
return DebugpyOptions(
port=port,
adapter_access_token=opts.adapter_access_token,
)
| {
"repo_id": "ansible/ansible",
"file_path": "test/lib/ansible_test/_internal/debugging.py",
"license": "GNU General Public License v3.0",
"lines": 343,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.