Spaces:
Sleeping
Sleeping
| """ | |
| Test script for the IIS log parser | |
| """ | |
| from log_parser import IISLogParser, LogAnalyzer | |
| import time | |
| def test_log_file(file_path: str): | |
| """Test parsing a single log file.""" | |
| print(f"\n{'='*80}") | |
| print(f"Testing: {file_path}") | |
| print(f"{'='*80}") | |
| start_time = time.time() | |
| # Parse | |
| parser = IISLogParser(file_path) | |
| df = parser.parse() | |
| parse_time = time.time() - start_time | |
| print(f"Service Name: {parser.service_name}") | |
| print(f"✓ Parsed {df.height:,} log entries in {parse_time:.2f}s") | |
| # Analyze | |
| analyzer = LogAnalyzer(df, parser.service_name) | |
| stats = analyzer.get_summary_stats() | |
| analyze_time = time.time() - start_time - parse_time | |
| print(f"✓ Analyzed in {analyze_time:.2f}s") | |
| # Display summary | |
| print(f"\n📊 Summary Statistics:") | |
| print(f" Total Requests (before): {stats['total_requests_before']:,}") | |
| print(f" Excluded Requests: {stats['excluded_requests']:,}") | |
| print(f" Total Requests (after): {stats['total_requests_after']:,}") | |
| print(f" Errors (≠200,≠401): {stats['errors']:,}") | |
| print(f" Slow Requests (>3s): {stats['slow_requests']:,}") | |
| print(f" Peak RPS: {stats['peak_rps']:,} @ {stats['peak_timestamp']}") | |
| print(f" Avg Response Time: {stats['avg_time_ms']:,}ms") | |
| print(f" Max Response Time: {stats['max_time_ms']:,}ms") | |
| print(f" Min Response Time: {stats['min_time_ms']:,}ms") | |
| # Top methods | |
| print(f"\n🔝 Top 5 Methods:") | |
| top_methods = analyzer.get_top_methods(5) | |
| for i, method in enumerate(top_methods, 1): | |
| print(f" {i}. {method['method_name']}") | |
| print(f" Count: {method['count']:,} | Avg Time: {method['avg_time']:.1f}ms | Errors: {method['errors']}") | |
| # Error breakdown | |
| errors = analyzer.get_error_breakdown() | |
| if errors: | |
| print(f"\n❌ Error Breakdown:") | |
| for error in errors: | |
| print(f" Status {error['sc_status']}: {error['count']:,} occurrences") | |
| else: | |
| print(f"\n✓ No errors found!") | |
| # Errors by method | |
| errors_by_method = analyzer.get_errors_by_method(5) | |
| if errors_by_method: | |
| print(f"\n⚠️ Top 5 Error-Prone Methods:") | |
| for i, method_error in enumerate(errors_by_method, 1): | |
| print(f" {i}. {method_error['full_method_path']}") | |
| print(f" Total Calls: {method_error['total_calls']:,} | Errors: {method_error['error_count']:,} | " | |
| f"Error Rate: {method_error['error_rate_percent']:.2f}% | " | |
| f"Most Common Error: {method_error.get('most_common_error_status', 'N/A')}") | |
| else: | |
| print(f"\n✓ No method errors found!") | |
| # Response time distribution | |
| dist = analyzer.get_response_time_distribution() | |
| print(f"\n⏱️ Response Time Distribution:") | |
| for bucket, count in dist.items(): | |
| print(f" {bucket}: {count:,}") | |
| total_time = time.time() - start_time | |
| print(f"\n⏱️ Total processing time: {total_time:.2f}s") | |
| return stats | |
| if __name__ == "__main__": | |
| import sys | |
| # Test with both log files | |
| files = [ | |
| "administrator_rhr_ex250922.log", | |
| "customer_rhr_ex250922.log" | |
| ] | |
| all_stats = [] | |
| total_start = time.time() | |
| for file_path in files: | |
| try: | |
| stats = test_log_file(file_path) | |
| all_stats.append(stats) | |
| except Exception as e: | |
| print(f"\n❌ Error processing {file_path}: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| # Combined summary | |
| if len(all_stats) > 1: | |
| print(f"\n{'='*80}") | |
| print(f"COMBINED STATISTICS") | |
| print(f"{'='*80}") | |
| total_requests = sum(s['total_requests_after'] for s in all_stats) | |
| total_errors = sum(s['errors'] for s in all_stats) | |
| total_slow = sum(s['slow_requests'] for s in all_stats) | |
| print(f"Total Requests (all services): {total_requests:,}") | |
| print(f"Total Errors (all services): {total_errors:,}") | |
| print(f"Total Slow Requests (all services): {total_slow:,}") | |
| total_elapsed = time.time() - total_start | |
| print(f"\n⏱️ Total elapsed time: {total_elapsed:.2f}s") | |
| print(f"\n✓ All tests completed successfully!") | |