File size: 9,168 Bytes
6a3de9e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
"""
Performance tests for the TodoAgent functionality.

These tests verify the performance characteristics of the AI agent including:
- Response time under various conditions
- Memory usage patterns
- Concurrency handling
- Resource utilization
"""
import asyncio
import time
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from ai.agents.todo_agent import TodoAgent
from models.conversation import Conversation
from uuid import UUID


@pytest.fixture
def todo_agent():
    """Create a TodoAgent instance for testing."""
    agent = TodoAgent()
    # Mock the internal components to avoid actual API calls
    agent.client = MagicMock()
    agent.config = MagicMock()
    agent._agent = MagicMock()
    return agent


@pytest.mark.asyncio
async def test_response_time_single_request(todo_agent):
    """Test response time for a single request."""
    # Mock data
    user_id = "test-user-123"
    message = "Add a task: Buy groceries"
    conversation = MagicMock()
    conversation.id = UUID("12345678-1234-5678-1234-567812345678")

    # Mock the runner response
    mock_result = AsyncMock()
    mock_result.final_output = "Task 'Buy groceries' added successfully"

    with patch('ai.agents.todo_agent.Runner') as mock_runner:
        mock_runner.run = AsyncMock(return_value=mock_result)

        start_time = time.time()
        result = await todo_agent.process_message(user_id, message, conversation)
        end_time = time.time()

        response_time = end_time - start_time

        # Verify response time is reasonable (should be under 5 seconds even with mocked API delay)
        assert response_time < 5.0
        assert result["response"] == "Task 'Buy groceries' added successfully"


@pytest.mark.asyncio
async def test_response_time_multiple_requests_sequential(todo_agent):
    """Test response time for multiple sequential requests."""
    messages = [
        "Add a task: Buy groceries",
        "Add a task: Clean the house",
        "List my tasks",
        "Complete task 1",
        "Update task 2: Clean the entire house"
    ]

    user_id = "test-user-123"
    conversation = MagicMock()
    conversation.id = UUID("12345678-1234-5678-1234-567812345678")

    # Mock the runner response
    mock_result = AsyncMock()
    mock_result.final_output = "Processed successfully"

    with patch('ai.agents.todo_agent.Runner') as mock_runner:
        mock_runner.run = AsyncMock(return_value=mock_result)

        total_start_time = time.time()
        for i, message in enumerate(messages):
            start_time = time.time()
            result = await todo_agent.process_message(user_id, message, conversation)
            end_time = time.time()

            response_time = end_time - start_time

            # Each individual request should be fast
            assert response_time < 5.0
            assert "response" in result

        total_end_time = time.time()
        total_time = total_end_time - total_start_time

        # Total time for 5 requests should be reasonable
        assert total_time < 25.0  # 5 requests * 5 seconds max each


@pytest.mark.asyncio
async def test_concurrent_request_handling(todo_agent):
    """Test how the agent handles concurrent requests."""
    user_id = "test-user-123"
    conversation = MagicMock()
    conversation.id = UUID("12345678-1234-5678-1234-567812345678")

    # Mock the runner response
    mock_result = AsyncMock()
    mock_result.final_output = "Processed successfully"

    async def process_single_request(message):
        with patch('ai.agents.todo_agent.Runner') as mock_runner:
            mock_runner.run = AsyncMock(return_value=mock_result)
            return await todo_agent.process_message(user_id, message, conversation)

    # Create multiple concurrent requests
    messages = [
        "Add a task: Task 1",
        "Add a task: Task 2",
        "Add a task: Task 3",
        "Add a task: Task 4",
        "Add a task: Task 5"
    ]

    start_time = time.time()
    # Execute all requests concurrently
    tasks = [process_single_request(msg) for msg in messages]
    results = await asyncio.gather(*tasks)
    end_time = time.time()

    total_time = end_time - start_time

    # Verify all requests completed successfully
    assert len(results) == len(messages)
    for result in results:
        assert "response" in result

    # Total time should be reasonable considering concurrency
    # This should ideally be faster than sequential processing
    assert total_time < 25.0  # Should be faster than 5 * 5 seconds sequential


@pytest.mark.asyncio
async def test_memory_usage_consistency(todo_agent):
    """Test that memory usage remains consistent across multiple requests."""
    user_id = "test-user-123"
    conversation = MagicMock()
    conversation.id = UUID("12345678-1234-5678-1234-567812345678")

    # Mock the runner response
    mock_result = AsyncMock()
    mock_result.final_output = "Processed successfully"

    # Process multiple requests and verify no memory leaks
    with patch('ai.agents.todo_agent.Runner') as mock_runner:
        mock_runner.run = AsyncMock(return_value=mock_result)

        for i in range(10):  # Process 10 requests
            message = f"Add a task: Test task {i}"
            result = await todo_agent.process_message(user_id, message, conversation)

            assert "response" in result
            assert isinstance(result, dict)

    # If we got here without memory issues, the test passes


@pytest.mark.asyncio
async def test_large_message_handling(todo_agent):
    """Test handling of large messages."""
    user_id = "test-user-123"
    conversation = MagicMock()
    conversation.id = UUID("12345678-1234-5678-1234-567812345678")

    # Create a large message
    large_message = "Add a task: " + "very long description " * 1000

    # Mock the runner response
    mock_result = AsyncMock()
    mock_result.final_output = "Task added successfully"

    with patch('ai.agents.todo_agent.Runner') as mock_runner:
        mock_runner.run = AsyncMock(return_value=mock_result)

        start_time = time.time()
        result = await todo_agent.process_message(user_id, large_message, conversation)
        end_time = time.time()

        response_time = end_time - start_time

        # Should handle large messages within reasonable time
        assert response_time < 10.0  # Allow more time for large messages
        assert "response" in result


@pytest.mark.asyncio
async def test_command_recognition_performance(todo_agent):
    """Test performance of command recognition function."""
    test_messages = [
        "Add a task: Buy groceries",
        "Show me my tasks",
        "Complete task 1",
        "Delete task 2",
        "Update task 3 with new details",
        "Random message that doesn't match anything",
        "Another random message",
        "Yet another test message",
        "More tasks to add",
        "Tasks to list"
    ]

    start_time = time.time()
    for msg in test_messages:
        command = await todo_agent.recognize_command(msg)
        # Verify command recognition doesn't throw errors
        assert command is None or isinstance(command, str)
    end_time = time.time()

    total_time = end_time - start_time
    avg_time_per_message = total_time / len(test_messages)

    # Average time per message should be very fast (under 100ms per message)
    assert avg_time_per_message < 0.1


@pytest.mark.asyncio
async def test_task_extraction_performance(todo_agent):
    """Test performance of task extraction function."""
    test_messages = [
        "Add task: Buy groceries",
        "Create: Clean the house",
        "New task - Walk the dog",
        "Task: Prepare dinner with ingredients: chicken, vegetables, rice",
        "Simple task: Read a book"
    ]

    start_time = time.time()
    for msg in test_messages:
        details = todo_agent.extract_task_details(msg)
        # Verify extraction doesn't throw errors
        assert isinstance(details, dict)
        assert "title" in details
    end_time = time.time()

    total_time = end_time - start_time
    avg_time_per_message = total_time / len(test_messages)

    # Average time per message should be very fast (under 10ms per message)
    assert avg_time_per_message < 0.01


@pytest.mark.asyncio
async def test_error_handling_performance(todo_agent):
    """Test performance when handling errors."""
    user_id = "test-user-123"
    message = "Add a task: Buy groceries"
    conversation = MagicMock()
    conversation.id = UUID("12345678-1234-5678-1234-567812345678")

    # Mock the runner to raise an exception
    with patch('ai.agents.todo_agent.Runner') as mock_runner:
        mock_runner.run = AsyncMock(side_effect=Exception("API Error"))

        start_time = time.time()
        result = await todo_agent.process_message(user_id, message, conversation)
        end_time = time.time()

        response_time = end_time - start_time

        # Error handling should be fast
        assert response_time < 2.0
        assert "response" in result
        assert "error" in result["response"]


if __name__ == "__main__":
    pytest.main([__file__])