Spaces:
Runtime error
Runtime error
| """ | |
| Performance monitoring API routes. | |
| This module provides endpoints for monitoring system performance, | |
| connection pool status, cache statistics, and other performance metrics. | |
| """ | |
| import logging | |
| from datetime import datetime | |
| from typing import Dict, Any | |
| from flask import Blueprint, jsonify, current_app | |
| from flask_limiter import Limiter | |
| from ..utils.connection_pool import get_connection_pool_manager | |
| from ..services.cache_service import get_cache_service | |
| from .middleware import require_auth, create_limiter | |
| from ..utils.response_optimization import cache_response, compress_response | |
| logger = logging.getLogger(__name__) | |
| # Create blueprint | |
| performance_bp = Blueprint('performance', __name__, url_prefix='/api/v1/performance') | |
| # Initialize rate limiter | |
| limiter = create_limiter() | |
| # Cache for 1 minute | |
| def get_performance_status(): | |
| """ | |
| Get overall system performance status. | |
| Returns: | |
| JSON response with performance metrics | |
| """ | |
| try: | |
| status = { | |
| 'timestamp': datetime.utcnow().isoformat(), | |
| 'status': 'healthy', | |
| 'services': {} | |
| } | |
| # Database connection pool status | |
| connection_pool_manager = get_connection_pool_manager() | |
| if connection_pool_manager: | |
| pool_status = connection_pool_manager.get_status() | |
| status['services']['database'] = { | |
| 'status': 'connected', | |
| 'pool_stats': pool_status['database'] | |
| } | |
| if pool_status['redis']: | |
| status['services']['redis'] = { | |
| 'status': 'connected' if pool_status['redis']['healthy'] else 'unhealthy', | |
| 'pool_stats': pool_status['redis'] | |
| } | |
| else: | |
| status['services']['redis'] = { | |
| 'status': 'disabled' | |
| } | |
| else: | |
| status['services']['database'] = {'status': 'no_pool_manager'} | |
| status['services']['redis'] = {'status': 'no_pool_manager'} | |
| # Cache service status | |
| cache_service = get_cache_service() | |
| if cache_service: | |
| cache_stats = cache_service.get_cache_stats() | |
| status['services']['cache'] = { | |
| 'status': 'enabled' if cache_stats['redis_enabled'] else 'disabled', | |
| 'stats': cache_stats | |
| } | |
| else: | |
| status['services']['cache'] = {'status': 'not_initialized'} | |
| # Application configuration | |
| status['configuration'] = { | |
| 'compression_enabled': current_app.config.get('ENABLE_COMPRESSION', False), | |
| 'caching_enabled': current_app.config.get('ENABLE_CACHING', False), | |
| 'performance_monitoring': current_app.config.get('ENABLE_PERFORMANCE_MONITORING', False), | |
| 'db_pool_size': current_app.config.get('DB_POOL_SIZE', 10), | |
| 'redis_max_connections': current_app.config.get('REDIS_MAX_CONNECTIONS', 20) | |
| } | |
| return jsonify(status) | |
| except Exception as e: | |
| logger.error(f"Error getting performance status: {e}") | |
| return jsonify({ | |
| 'timestamp': datetime.utcnow().isoformat(), | |
| 'status': 'error', | |
| 'error': str(e) | |
| }), 500 | |
| def get_database_performance(): | |
| """ | |
| Get database performance metrics. | |
| Returns: | |
| JSON response with database performance data | |
| """ | |
| try: | |
| connection_pool_manager = get_connection_pool_manager() | |
| if not connection_pool_manager: | |
| return jsonify({ | |
| 'error': 'Connection pool manager not available' | |
| }), 503 | |
| # Get database pool status | |
| pool_status = connection_pool_manager.get_status() | |
| database_stats = pool_status['database'] | |
| # Calculate pool utilization | |
| total_connections = database_stats['pool_size'] + database_stats['overflow'] | |
| active_connections = database_stats['checked_out'] | |
| utilization = (active_connections / total_connections * 100) if total_connections > 0 else 0 | |
| response_data = { | |
| 'timestamp': datetime.utcnow().isoformat(), | |
| 'pool_stats': database_stats, | |
| 'utilization': { | |
| 'active_connections': active_connections, | |
| 'total_connections': total_connections, | |
| 'utilization_percent': round(utilization, 2) | |
| }, | |
| 'health': { | |
| 'status': 'healthy' if utilization < 80 else 'warning' if utilization < 95 else 'critical', | |
| 'invalid_connections': database_stats['invalid'] | |
| } | |
| } | |
| return jsonify(response_data) | |
| except Exception as e: | |
| logger.error(f"Error getting database performance: {e}") | |
| return jsonify({ | |
| 'timestamp': datetime.utcnow().isoformat(), | |
| 'error': str(e) | |
| }), 500 | |
| def get_redis_performance(): | |
| """ | |
| Get Redis performance metrics. | |
| Returns: | |
| JSON response with Redis performance data | |
| """ | |
| try: | |
| connection_pool_manager = get_connection_pool_manager() | |
| if not connection_pool_manager: | |
| return jsonify({ | |
| 'error': 'Connection pool manager not available' | |
| }), 503 | |
| redis_client = connection_pool_manager.get_redis_client() | |
| if not redis_client: | |
| return jsonify({ | |
| 'status': 'disabled', | |
| 'message': 'Redis is not configured' | |
| }) | |
| # Get Redis pool status | |
| pool_status = connection_pool_manager.get_status() | |
| redis_stats = pool_status['redis'] | |
| # Get Redis server info | |
| try: | |
| redis_info = redis_client.info() | |
| server_stats = { | |
| 'version': redis_info.get('redis_version', 'unknown'), | |
| 'uptime_seconds': redis_info.get('uptime_in_seconds', 0), | |
| 'connected_clients': redis_info.get('connected_clients', 0), | |
| 'used_memory': redis_info.get('used_memory_human', 'unknown'), | |
| 'keyspace_hits': redis_info.get('keyspace_hits', 0), | |
| 'keyspace_misses': redis_info.get('keyspace_misses', 0), | |
| 'total_commands_processed': redis_info.get('total_commands_processed', 0) | |
| } | |
| # Calculate hit rate | |
| total_keyspace_ops = server_stats['keyspace_hits'] + server_stats['keyspace_misses'] | |
| hit_rate = (server_stats['keyspace_hits'] / total_keyspace_ops * 100) if total_keyspace_ops > 0 else 0 | |
| except Exception as e: | |
| logger.warning(f"Failed to get Redis server info: {e}") | |
| server_stats = {'error': 'Failed to get server info'} | |
| hit_rate = 0 | |
| response_data = { | |
| 'timestamp': datetime.utcnow().isoformat(), | |
| 'pool_stats': redis_stats, | |
| 'server_stats': server_stats, | |
| 'performance': { | |
| 'hit_rate_percent': round(hit_rate, 2), | |
| 'health_status': redis_stats.get('healthy', False) | |
| } | |
| } | |
| return jsonify(response_data) | |
| except Exception as e: | |
| logger.error(f"Error getting Redis performance: {e}") | |
| return jsonify({ | |
| 'timestamp': datetime.utcnow().isoformat(), | |
| 'error': str(e) | |
| }), 500 | |
| def get_cache_performance(): | |
| """ | |
| Get cache performance metrics. | |
| Returns: | |
| JSON response with cache performance data | |
| """ | |
| try: | |
| cache_service = get_cache_service() | |
| if not cache_service: | |
| return jsonify({ | |
| 'status': 'not_initialized', | |
| 'message': 'Cache service not initialized' | |
| }) | |
| cache_stats = cache_service.get_cache_stats() | |
| # Add performance analysis | |
| performance_analysis = { | |
| 'efficiency': 'excellent' if cache_stats['hit_rate_percent'] >= 80 else | |
| 'good' if cache_stats['hit_rate_percent'] >= 60 else | |
| 'fair' if cache_stats['hit_rate_percent'] >= 40 else 'poor', | |
| 'recommendations': [] | |
| } | |
| if cache_stats['hit_rate_percent'] < 60: | |
| performance_analysis['recommendations'].append('Consider increasing cache TTL') | |
| performance_analysis['recommendations'].append('Review cache key strategies') | |
| if cache_stats['cache_errors'] > 0: | |
| performance_analysis['recommendations'].append('Investigate cache errors') | |
| response_data = { | |
| 'timestamp': datetime.utcnow().isoformat(), | |
| 'cache_stats': cache_stats, | |
| 'performance_analysis': performance_analysis | |
| } | |
| return jsonify(response_data) | |
| except Exception as e: | |
| logger.error(f"Error getting cache performance: {e}") | |
| return jsonify({ | |
| 'timestamp': datetime.utcnow().isoformat(), | |
| 'error': str(e) | |
| }), 500 | |
| # Cache for 2 minutes | |
| def get_comprehensive_metrics(): | |
| """ | |
| Get comprehensive performance metrics. | |
| Returns: | |
| JSON response with all performance metrics | |
| """ | |
| try: | |
| metrics = { | |
| 'timestamp': datetime.utcnow().isoformat(), | |
| 'system': {}, | |
| 'database': {}, | |
| 'redis': {}, | |
| 'cache': {}, | |
| 'application': {} | |
| } | |
| # System metrics | |
| try: | |
| import psutil | |
| import os | |
| process = psutil.Process(os.getpid()) | |
| cpu_percent = process.cpu_percent() | |
| memory_info = process.memory_info() | |
| metrics['system'] = { | |
| 'cpu_percent': cpu_percent, | |
| 'memory_rss_mb': round(memory_info.rss / 1024 / 1024, 2), | |
| 'memory_vms_mb': round(memory_info.vms / 1024 / 1024, 2), | |
| 'num_threads': process.num_threads(), | |
| 'num_fds': process.num_fds() if hasattr(process, 'num_fds') else 'N/A' | |
| } | |
| except ImportError: | |
| metrics['system'] = {'error': 'psutil not available'} | |
| except Exception as e: | |
| metrics['system'] = {'error': str(e)} | |
| # Database metrics | |
| connection_pool_manager = get_connection_pool_manager() | |
| if connection_pool_manager: | |
| pool_status = connection_pool_manager.get_status() | |
| metrics['database'] = pool_status['database'] | |
| metrics['redis'] = pool_status['redis'] or {'status': 'disabled'} | |
| # Cache metrics | |
| cache_service = get_cache_service() | |
| if cache_service: | |
| metrics['cache'] = cache_service.get_cache_stats() | |
| # Application metrics | |
| metrics['application'] = { | |
| 'config': { | |
| 'compression_enabled': current_app.config.get('ENABLE_COMPRESSION', False), | |
| 'caching_enabled': current_app.config.get('ENABLE_CACHING', False), | |
| 'debug_mode': current_app.config.get('DEBUG', False) | |
| }, | |
| 'flask': { | |
| 'testing': current_app.testing, | |
| 'debug': current_app.debug | |
| } | |
| } | |
| return jsonify(metrics) | |
| except Exception as e: | |
| logger.error(f"Error getting comprehensive metrics: {e}") | |
| return jsonify({ | |
| 'timestamp': datetime.utcnow().isoformat(), | |
| 'error': str(e) | |
| }), 500 | |
| def performance_health_check(): | |
| """ | |
| Quick health check for performance monitoring. | |
| Returns: | |
| JSON response with health status | |
| """ | |
| try: | |
| health_status = { | |
| 'timestamp': datetime.utcnow().isoformat(), | |
| 'status': 'healthy', | |
| 'checks': {} | |
| } | |
| # Database health | |
| try: | |
| from sqlalchemy import text | |
| from ..models.base import db | |
| db.session.execute(text('SELECT 1')) | |
| health_status['checks']['database'] = 'healthy' | |
| except Exception as e: | |
| health_status['checks']['database'] = f'unhealthy: {str(e)}' | |
| health_status['status'] = 'degraded' | |
| # Redis health | |
| connection_pool_manager = get_connection_pool_manager() | |
| if connection_pool_manager: | |
| redis_client = connection_pool_manager.get_redis_client() | |
| if redis_client: | |
| try: | |
| redis_client.ping() | |
| health_status['checks']['redis'] = 'healthy' | |
| except Exception as e: | |
| health_status['checks']['redis'] = f'unhealthy: {str(e)}' | |
| health_status['status'] = 'degraded' | |
| else: | |
| health_status['checks']['redis'] = 'disabled' | |
| else: | |
| health_status['checks']['redis'] = 'no_pool_manager' | |
| # Cache service health | |
| cache_service = get_cache_service() | |
| if cache_service: | |
| cache_stats = cache_service.get_cache_stats() | |
| if cache_stats['cache_errors'] > 100: # Arbitrary threshold | |
| health_status['checks']['cache'] = 'degraded' | |
| health_status['status'] = 'degraded' | |
| else: | |
| health_status['checks']['cache'] = 'healthy' | |
| else: | |
| health_status['checks']['cache'] = 'not_initialized' | |
| return jsonify(health_status) | |
| except Exception as e: | |
| logger.error(f"Performance health check failed: {e}") | |
| return jsonify({ | |
| 'timestamp': datetime.utcnow().isoformat(), | |
| 'status': 'unhealthy', | |
| 'error': str(e) | |
| }), 503 | |
| # Error handlers | |
| def handle_rate_limit_exceeded(error): | |
| """Handle rate limit exceeded errors.""" | |
| return jsonify({ | |
| 'error': 'Rate limit exceeded', | |
| 'message': 'Too many requests to performance endpoints. Please try again later.' | |
| }), 429 | |
| def handle_internal_error(error): | |
| """Handle internal server errors.""" | |
| logger.error(f"Performance endpoint internal error: {error}") | |
| return jsonify({ | |
| 'error': 'Internal server error', | |
| 'timestamp': datetime.utcnow().isoformat() | |
| }), 500 |