insightfy-bloom-ms-mpms / test_login_performance.py
MukeshKapoor25's picture
feat(auth): Enhance Login API with Access Menu Support
5836efd
import pytest
import asyncio
import time
import statistics
from concurrent.futures import ThreadPoolExecutor, as_completed
from fastapi.testclient import TestClient
from unittest.mock import patch, AsyncMock
import threading
# Import the FastAPI app (adjust import path as needed)
from main import app # Assuming main.py contains the FastAPI app
from app.services.access_menu_service import AccessMenuService
from app.schemas.access_menu_schema import AccessMenuData, RoleInfo, WidgetInfo, MenuItem
class TestLoginPerformance:
"""Performance tests for enhanced login endpoint."""
@pytest.fixture
def client(self):
"""Create test client."""
return TestClient(app)
@pytest.fixture
def valid_login_data(self):
"""Valid login credentials."""
return {
"username": "admin@example.com",
"password": "admin123"
}
@pytest.fixture
def mock_access_menu_data(self):
"""Mock access menu data for testing."""
return AccessMenuData(
permissions=[
"dashboard.VIEW_DASHBOARD",
"settings.VIEW_MERCHANT_SETTING",
"users.VIEW_USERS",
"reports.VIEW_REPORTS"
],
dashboard_layout={
"layout_id": "admin_layout",
"widgets": [
{"widget_id": "sales_overview", "position": {"x": 0, "y": 0}},
{"widget_id": "orders_summary", "position": {"x": 3, "y": 0}},
{"widget_id": "revenue_chart", "position": {"x": 0, "y": 2}}
]
},
accessible_widgets=[
WidgetInfo(
widget_id=f"wid_widget_{i:03d}",
widget_type="kpi" if i % 2 == 0 else "chart",
title=f"Widget {i}",
accessible=True
) for i in range(10) # 10 widgets
],
menu_items=[
MenuItem(
id="dashboard",
label="Dashboard",
route="/dashboard",
icon="dashboard",
permissions_required=["VIEW_DASHBOARD"]
),
MenuItem(
id="users",
label="Users",
route="/users",
icon="people",
permissions_required=["VIEW_USERS"]
),
MenuItem(
id="settings",
label="Settings",
route="/settings",
icon="settings",
permissions_required=["VIEW_MERCHANT_SETTING"]
),
MenuItem(
id="reports",
label="Reports",
route="/reports",
icon="analytics",
permissions_required=["VIEW_REPORTS"]
),
MenuItem(
id="profile",
label="Profile",
route="/profile",
icon="account_circle",
permissions_required=[]
)
],
role_info=RoleInfo(
role_id="admin",
role_name="Administrator",
merchant_id="MERCHANT001",
permissions=[
"dashboard.VIEW_DASHBOARD",
"settings.VIEW_MERCHANT_SETTING",
"users.VIEW_USERS",
"reports.VIEW_REPORTS"
]
)
)
def test_login_response_time_requirement(self, client, valid_login_data, mock_access_menu_data):
"""Test that login completes within 2 seconds including access menu preparation."""
with patch.object(AccessMenuService, 'prepare_access_menu',
return_value=mock_access_menu_data) as mock_prepare:
# Measure multiple login attempts
response_times = []
for _ in range(5):
start_time = time.time()
response = client.post("/auth/login", data=valid_login_data)
end_time = time.time()
response_time = end_time - start_time
response_times.append(response_time)
# Verify successful response
assert response.status_code == 200
response_data = response.json()
assert response_data["success"] is True
assert "access_menu" in response_data["data"]
# Calculate statistics
avg_response_time = statistics.mean(response_times)
max_response_time = max(response_times)
min_response_time = min(response_times)
print(f"\nLogin Performance Statistics:")
print(f"Average response time: {avg_response_time:.3f}s")
print(f"Maximum response time: {max_response_time:.3f}s")
print(f"Minimum response time: {min_response_time:.3f}s")
# Performance requirements
assert avg_response_time < 2.0, f"Average response time {avg_response_time:.3f}s exceeds 2.0s requirement"
assert max_response_time < 3.0, f"Maximum response time {max_response_time:.3f}s exceeds 3.0s tolerance"
def test_access_menu_preparation_performance(self, mock_access_menu_data):
"""Test access menu preparation performance in isolation."""
with patch('app.services.role_service.RoleService.get_role') as mock_get_role, \
patch('app.services.role_service.RoleService.get_role_widget_access') as mock_get_widgets, \
patch('app.services.dashboard_service.DashboardService.get_dashboard_layout') as mock_get_layout:
# Setup mocks
mock_get_role.return_value = {
"role_id": "admin",
"name": "Administrator",
"permissions": {
"dashboard": ["VIEW_DASHBOARD"],
"settings": ["VIEW_MERCHANT_SETTING"],
"users": ["VIEW_USERS"]
}
}
mock_get_widgets.return_value = {
"role_id": "admin",
"widgets": [
{"widget_id": f"wid_{i}", "title": f"Widget {i}", "type": "kpi", "accessible": True}
for i in range(10)
]
}
mock_get_layout.return_value = {"layout": "test"}
# Clear cache to ensure fresh measurements
AccessMenuService.invalidate_cache()
# Measure access menu preparation times
preparation_times = []
for _ in range(10):
start_time = time.time()
# Run access menu preparation
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
access_menu = loop.run_until_complete(
AccessMenuService.prepare_access_menu("MERCHANT001", "admin", "admin_user")
)
finally:
loop.close()
end_time = time.time()
preparation_time = end_time - start_time
preparation_times.append(preparation_time)
# Verify successful preparation
assert isinstance(access_menu, AccessMenuData)
assert len(access_menu.permissions) > 0
# Calculate statistics
avg_preparation_time = statistics.mean(preparation_times)
max_preparation_time = max(preparation_times)
print(f"\nAccess Menu Preparation Performance:")
print(f"Average preparation time: {avg_preparation_time:.3f}s")
print(f"Maximum preparation time: {max_preparation_time:.3f}s")
# Performance requirements (should be much faster than total login time)
assert avg_preparation_time < 1.0, f"Average preparation time {avg_preparation_time:.3f}s exceeds 1.0s"
assert max_preparation_time < 1.5, f"Maximum preparation time {max_preparation_time:.3f}s exceeds 1.5s"
def test_concurrent_login_performance(self, client, valid_login_data, mock_access_menu_data):
"""Test performance under concurrent login requests."""
with patch.object(AccessMenuService, 'prepare_access_menu',
return_value=mock_access_menu_data) as mock_prepare:
def perform_login():
"""Perform a single login request."""
start_time = time.time()
response = client.post("/auth/login", data=valid_login_data)
end_time = time.time()
return {
'response': response,
'duration': end_time - start_time,
'success': response.status_code == 200 and response.json().get("success", False)
}
# Test with different concurrency levels
concurrency_levels = [1, 5, 10, 20]
for concurrency in concurrency_levels:
print(f"\nTesting with {concurrency} concurrent requests:")
start_time = time.time()
# Execute concurrent requests
with ThreadPoolExecutor(max_workers=concurrency) as executor:
futures = [executor.submit(perform_login) for _ in range(concurrency)]
results = [future.result() for future in as_completed(futures)]
total_time = time.time() - start_time
# Analyze results
successful_requests = sum(1 for r in results if r['success'])
response_times = [r['duration'] for r in results if r['success']]
if response_times:
avg_response_time = statistics.mean(response_times)
max_response_time = max(response_times)
throughput = successful_requests / total_time
print(f" Successful requests: {successful_requests}/{concurrency}")
print(f" Average response time: {avg_response_time:.3f}s")
print(f" Maximum response time: {max_response_time:.3f}s")
print(f" Throughput: {throughput:.2f} requests/second")
# Performance assertions
assert successful_requests == concurrency, f"Not all requests succeeded: {successful_requests}/{concurrency}"
assert avg_response_time < 3.0, f"Average response time {avg_response_time:.3f}s too high under concurrency"
assert max_response_time < 5.0, f"Maximum response time {max_response_time:.3f}s too high under concurrency"
def test_cache_performance_impact(self, client, valid_login_data, mock_access_menu_data):
"""Test performance impact of caching on login requests."""
with patch.object(AccessMenuService, 'prepare_access_menu',
return_value=mock_access_menu_data) as mock_prepare:
# Test without cache (first request)
AccessMenuService.invalidate_cache()
start_time = time.time()
response1 = client.post("/auth/login", data=valid_login_data)
uncached_time = time.time() - start_time
assert response1.status_code == 200
# Test with cache (subsequent requests)
cached_times = []
for _ in range(5):
start_time = time.time()
response = client.post("/auth/login", data=valid_login_data)
cached_time = time.time() - start_time
cached_times.append(cached_time)
assert response.status_code == 200
avg_cached_time = statistics.mean(cached_times)
print(f"\nCache Performance Impact:")
print(f"Uncached request time: {uncached_time:.3f}s")
print(f"Average cached request time: {avg_cached_time:.3f}s")
print(f"Performance improvement: {((uncached_time - avg_cached_time) / uncached_time * 100):.1f}%")
# Cache should provide some performance benefit
# Note: In this test setup, the improvement might be minimal due to mocking
assert avg_cached_time <= uncached_time * 1.1, "Cache should not significantly degrade performance"
def test_memory_usage_under_load(self, client, valid_login_data, mock_access_menu_data):
"""Test memory usage during sustained login requests."""
import psutil
import os
process = psutil.Process(os.getpid())
with patch.object(AccessMenuService, 'prepare_access_menu',
return_value=mock_access_menu_data) as mock_prepare:
# Measure initial memory usage
initial_memory = process.memory_info().rss / 1024 / 1024 # MB
# Perform many login requests
num_requests = 100
for i in range(num_requests):
response = client.post("/auth/login", data=valid_login_data)
assert response.status_code == 200
# Check memory every 20 requests
if i % 20 == 0:
current_memory = process.memory_info().rss / 1024 / 1024 # MB
memory_increase = current_memory - initial_memory
print(f"Request {i}: Memory usage: {current_memory:.1f}MB (+{memory_increase:.1f}MB)")
# Memory should not grow excessively
assert memory_increase < 100, f"Memory usage increased by {memory_increase:.1f}MB, possible memory leak"
# Final memory check
final_memory = process.memory_info().rss / 1024 / 1024 # MB
total_increase = final_memory - initial_memory
print(f"\nMemory Usage Summary:")
print(f"Initial memory: {initial_memory:.1f}MB")
print(f"Final memory: {final_memory:.1f}MB")
print(f"Total increase: {total_increase:.1f}MB")
# Memory increase should be reasonable
assert total_increase < 50, f"Memory increased by {total_increase:.1f}MB, check for memory leaks"
def test_cache_effectiveness_metrics(self):
"""Test cache effectiveness and statistics."""
# Clear cache
AccessMenuService.invalidate_cache()
# Initial cache stats
initial_stats = AccessMenuService.get_cache_stats()
assert initial_stats["cache_size"] == 0
assert initial_stats["active_entries"] == 0
# Simulate cache usage
with patch('app.services.role_service.RoleService.get_role') as mock_get_role:
mock_get_role.return_value = {
"role_id": "admin",
"name": "Administrator",
"permissions": {"dashboard": ["VIEW_DASHBOARD"]}
}
# Make requests to populate cache
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
# First request - should populate cache
loop.run_until_complete(
AccessMenuService.get_user_permissions("admin", "MERCHANT001")
)
# Check cache stats after population
populated_stats = AccessMenuService.get_cache_stats()
assert populated_stats["cache_size"] > 0
assert populated_stats["active_entries"] > 0
# Second request - should use cache
start_time = time.time()
loop.run_until_complete(
AccessMenuService.get_user_permissions("admin", "MERCHANT001")
)
cached_request_time = time.time() - start_time
print(f"\nCache Effectiveness:")
print(f"Cache size: {populated_stats['cache_size']}")
print(f"Active entries: {populated_stats['active_entries']}")
print(f"Cached request time: {cached_request_time:.4f}s")
# Cached request should be very fast
assert cached_request_time < 0.1, f"Cached request took {cached_request_time:.4f}s, should be much faster"
finally:
loop.close()
def test_timeout_handling_performance(self, client, valid_login_data):
"""Test performance when access menu preparation times out."""
async def slow_prepare_access_menu(*args, **kwargs):
await asyncio.sleep(6) # Longer than timeout
return None
with patch.object(AccessMenuService, 'prepare_access_menu',
side_effect=slow_prepare_access_menu) as mock_prepare:
start_time = time.time()
response = client.post("/auth/login", data=valid_login_data)
duration = time.time() - start_time
# Should complete successfully despite timeout
assert response.status_code == 200
response_data = response.json()
assert response_data["success"] is True
# Should not wait for full timeout - should handle gracefully
assert duration < 8.0, f"Login took {duration:.2f}s, timeout handling should be faster"
# Should have warnings about access menu failure
login_data = response_data["data"]
assert login_data.get("access_menu") is None
assert "warnings" in login_data
print(f"\nTimeout Handling Performance:")
print(f"Login duration with timeout: {duration:.3f}s")
class TestLoginScalabilityBenchmarks:
"""Scalability benchmarks for login endpoint."""
@pytest.fixture
def client(self):
"""Create test client."""
return TestClient(app)
@pytest.fixture
def valid_login_data(self):
"""Valid login credentials."""
return {
"username": "admin@example.com",
"password": "admin123"
}
@pytest.mark.slow
def test_sustained_load_performance(self, client, valid_login_data):
"""Test performance under sustained load."""
with patch.object(AccessMenuService, 'prepare_access_menu') as mock_prepare:
# Mock fast access menu preparation
mock_prepare.return_value = AccessMenuData(
permissions=["dashboard.VIEW_DASHBOARD"],
dashboard_layout=None,
accessible_widgets=[],
menu_items=[],
role_info=RoleInfo(
role_id="admin",
role_name="Administrator",
merchant_id="MERCHANT001",
permissions=["dashboard.VIEW_DASHBOARD"]
)
)
# Test parameters
duration_seconds = 30 # 30 second test
target_rps = 10 # 10 requests per second
start_time = time.time()
request_count = 0
response_times = []
errors = 0
print(f"\nSustained Load Test (Target: {target_rps} RPS for {duration_seconds}s)")
while time.time() - start_time < duration_seconds:
request_start = time.time()
try:
response = client.post("/auth/login", data=valid_login_data)
request_end = time.time()
request_count += 1
response_time = request_end - request_start
response_times.append(response_time)
if response.status_code != 200:
errors += 1
# Control request rate
elapsed = request_end - start_time
expected_requests = elapsed * target_rps
if request_count > expected_requests:
sleep_time = (request_count - expected_requests) / target_rps
time.sleep(sleep_time)
except Exception as e:
errors += 1
print(f"Request error: {e}")
total_duration = time.time() - start_time
actual_rps = request_count / total_duration
if response_times:
avg_response_time = statistics.mean(response_times)
p95_response_time = statistics.quantiles(response_times, n=20)[18] # 95th percentile
print(f"Results:")
print(f" Total requests: {request_count}")
print(f" Actual RPS: {actual_rps:.2f}")
print(f" Errors: {errors}")
print(f" Average response time: {avg_response_time:.3f}s")
print(f" 95th percentile response time: {p95_response_time:.3f}s")
# Performance assertions
assert errors == 0, f"Had {errors} errors during sustained load test"
assert actual_rps >= target_rps * 0.9, f"Actual RPS {actual_rps:.2f} below target {target_rps}"
assert avg_response_time < 2.0, f"Average response time {avg_response_time:.3f}s exceeds requirement"
assert p95_response_time < 3.0, f"95th percentile response time {p95_response_time:.3f}s too high"
if __name__ == "__main__":
# Run performance tests
pytest.main([__file__, "-v", "-s"]) # -s to see print statements