Spaces:
Sleeping
Sleeping
File size: 10,879 Bytes
6a3de9e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 | """
Integration tests for the TodoAgent functionality.
These tests verify that the AI agent integrates properly with:
- Database operations
- Conversation management
- Task service
- API endpoints
- MCP server
"""
import pytest
from fastapi.testclient import TestClient
from unittest.mock import AsyncMock, MagicMock, patch
import asyncio
from main import app # Adjust import based on your main app location
from ai.agents.todo_agent import TodoAgent
from models.conversation import Conversation
from models.message import Message
from uuid import UUID, uuid4
@pytest.fixture
def client():
"""Create a test client for the API."""
return TestClient(app)
@pytest.fixture
def todo_agent():
"""Create a TodoAgent instance for testing."""
agent = TodoAgent()
# Mock the internal components to avoid actual API calls
agent.client = MagicMock()
agent.config = MagicMock()
agent._agent = MagicMock()
return agent
@pytest.mark.asyncio
async def test_full_chat_flow_integration(todo_agent):
"""Test the complete chat flow from request to response."""
user_id = "test-user-123"
message = "Add a task: Buy groceries"
conversation = MagicMock()
conversation.id = uuid4()
# Mock the agent response
mock_result = AsyncMock()
mock_result.final_output = "Task 'Buy groceries' added successfully"
with patch('ai.agents.todo_agent.Runner') as mock_runner:
mock_runner.run = AsyncMock(return_value=mock_result)
# Process the message
result = await todo_agent.process_message(user_id, message, conversation)
# Verify the result structure
assert isinstance(result, dict)
assert "response" in result
assert "conversation_id" in result
assert "tool_calls" in result
assert "requires_action" in result
assert result["response"] == "Task 'Buy groceries' added successfully"
assert str(conversation.id) == result["conversation_id"]
@pytest.mark.asyncio
async def test_chat_endpoint_integration(client):
"""Test the chat endpoint integration."""
with patch('ai.agents.conversation_manager.ConversationManager') as mock_conv_manager, \
patch('ai.agents.todo_agent.TodoAgent') as mock_agent_class:
# Mock conversation manager
mock_conv_manager_instance = MagicMock()
mock_conv_manager_instance.create_conversation = AsyncMock(return_value=MagicMock(id=uuid4()))
mock_conv_manager_instance.add_message = AsyncMock()
mock_conv_manager.return_value = mock_conv_manager_instance
# Mock agent
mock_agent_instance = MagicMock()
mock_agent_instance.process_message = AsyncMock(return_value={
"response": "Task added successfully",
"conversation_id": str(uuid4()),
"tool_calls": [],
"requires_action": False
})
mock_agent_class.return_value = mock_agent_instance
# Make a request to the chat endpoint
response = client.post(
"/api/test-user-123/chat",
json={
"message": "Add a task: Buy groceries"
},
headers={"Content-Type": "application/json"}
)
# Verify response
assert response.status_code == 200
data = response.json()
assert "response" in data
assert "conversation_id" in data
@pytest.mark.asyncio
async def test_conversation_creation_integration(todo_agent):
"""Test conversation creation and management integration."""
from ai.agents.conversation_manager import ConversationManager
from sqlmodel.ext.asyncio.session import AsyncSession
# Create a mock database session
mock_session = MagicMock(spec=AsyncSession)
# Create conversation manager
conv_manager = ConversationManager(mock_session)
# Test conversation creation
user_id = "test-user-123"
# Mock the database operations
with patch.object(mock_session, 'add'), \
patch.object(mock_session, 'commit', new_callable=AsyncMock), \
patch.object(mock_session, 'refresh', new_callable=AsyncMock):
conversation = await conv_manager.create_conversation(user_id)
# Verify conversation was created with proper properties
assert conversation.user_id == user_id
assert hasattr(conversation, 'expires_at')
@pytest.mark.asyncio
async def test_tool_execution_integration(todo_agent):
"""Test tool execution integration with the task service."""
# This test verifies that the agent can properly call tools
# when connected to the MCP server
user_id = "test-user-123"
conversation = MagicMock()
conversation.id = uuid4()
# Mock the runner to return a result with tool calls
mock_result = AsyncMock()
mock_result.final_output = "Processing your request..."
# Simulate that the agent identified tool calls to execute
mock_result.tool_calls = []
with patch('ai.agents.todo_agent.Runner') as mock_runner:
mock_runner.run = AsyncMock(return_value=mock_result)
result = await todo_agent.process_message(user_id, "Add a task: Buy groceries", conversation)
# Verify the response structure
assert "response" in result
assert "tool_calls" in result
assert isinstance(result["tool_calls"], list)
@pytest.mark.asyncio
async def test_command_recognition_integration(todo_agent):
"""Test command recognition with actual natural language processing."""
test_cases = [
("Add a task: Buy groceries", "add_task"),
("Create task: Clean the house", "add_task"),
("Show me my tasks", "list_tasks"),
("List all my tasks", "list_tasks"),
("Complete task 1", "complete_task"),
("Mark task as done", "complete_task"),
("Delete task 3", "delete_task"),
("Remove this task", "delete_task"),
("Update task 2", "update_task"),
("Change task details", "update_task"),
("Hello world", None), # Should not match any command
]
for message, expected_command in test_cases:
result = await todo_agent.recognize_command(message)
assert result == expected_command, f"Failed for message: {message}"
@pytest.mark.asyncio
async def test_task_extraction_integration(todo_agent):
"""Test task detail extraction from various message formats."""
test_cases = [
("Add task: Buy groceries", {"title": "Buy groceries"}),
("Create: Clean the house", {"title": "Clean the house"}),
("New task - Walk the dog", {"title": "Walk the dog"}),
("Task: Prepare dinner", {"title": "Prepare dinner"}),
("Add: Simple task", {"title": "Simple task"}),
]
for message, expected in test_cases:
result = todo_agent.extract_task_details(message)
assert "title" in result
assert expected["title"] in result["title"]
@pytest.mark.asyncio
async def test_multiple_conversation_integration(todo_agent):
"""Test handling multiple conversations simultaneously."""
user_ids = ["user-1", "user-2", "user-3"]
messages = [
"Add a task: User 1 task",
"Add a task: User 2 task",
"Add a task: User 3 task"
]
# Mock the runner response
mock_result = AsyncMock()
mock_result.final_output = "Task added successfully"
async def process_for_user(user_id, message):
conversation = MagicMock()
conversation.id = uuid4()
with patch('ai.agents.todo_agent.Runner') as mock_runner:
mock_runner.run = AsyncMock(return_value=mock_result)
result = await todo_agent.process_message(user_id, message, conversation)
return result
# Process all conversations concurrently
tasks = [process_for_user(uid, msg) for uid, msg in zip(user_ids, messages)]
results = await asyncio.gather(*tasks)
# Verify all results
assert len(results) == len(user_ids)
for result in results:
assert "response" in result
assert "conversation_id" in result
@pytest.mark.asyncio
async def test_error_recovery_integration(todo_agent):
"""Test that the agent can recover from errors and continue operating."""
user_id = "test-user-123"
conversation = MagicMock()
conversation.id = uuid4()
# First request - simulate success
mock_result_success = AsyncMock()
mock_result_success.final_output = "Task added successfully"
# Second request - simulate error
mock_result_error = AsyncMock()
mock_result_error.final_output = "Error processing request"
with patch('ai.agents.todo_agent.Runner') as mock_runner:
# Mock first call to succeed, second to have an issue
call_count = 0
def side_effect(*args, **kwargs):
nonlocal call_count
call_count += 1
if call_count == 1:
return mock_result_success
else:
# For the second call, simulate an error in the process_message method
raise Exception("API Error")
mock_runner.run = AsyncMock(side_effect=side_effect)
# First call should succeed
result1 = await todo_agent.process_message(user_id, "Add task: First task", conversation)
assert "response" in result1
# Second call should handle the error gracefully
try:
result2 = await todo_agent.process_message(user_id, "Add task: Second task", conversation)
# If no exception was raised, check if error response was returned
assert "response" in result2
except Exception:
# If an exception was raised, that's also acceptable behavior for error handling
pass
@pytest.mark.asyncio
async def test_mcp_server_connection_integration(todo_agent):
"""Test that the agent properly connects to the MCP server."""
# This test verifies that the agent can connect to the MCP server
# when properly configured (even with mocks)
# Verify that the agent has the required properties for MCP integration
assert hasattr(todo_agent, 'client')
assert hasattr(todo_agent, 'config')
# The agent should be able to process messages without immediate errors
# related to MCP server connection (these would occur at runtime)
user_id = "test-user-123"
conversation = MagicMock()
conversation.id = uuid4()
# Mock successful connection and processing
mock_result = AsyncMock()
mock_result.final_output = "Processed successfully"
with patch('ai.agents.todo_agent.Runner') as mock_runner:
mock_runner.run = AsyncMock(return_value=mock_result)
result = await todo_agent.process_message(user_id, "Add a task: Test", conversation)
assert result["response"] == "Processed successfully"
if __name__ == "__main__":
pytest.main([__file__]) |