File size: 1,464 Bytes
188ec8d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40

import os
import sys
import argparse

# Add src to path
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(current_dir)
sys.path.insert(0, os.path.join(project_root, "services", "ai-service", "src"))

from ai_med_extract.utils.benchmark_analysis import load_logs, get_benchmark_stats

def print_analysis(stats: dict):
    print(f"\n{'='*90}")
    print(f"{'Activity':<30} | {'Count':<5} | {'Avg Time':<8} | {'Avg RAM Delta':<13} | {'Avg GPU Delta':<13}")
    print(f"{'-'*90}")

    for activity, data in stats.items():
        print(f"{activity:<30} | {data['count']:<5} | {data['avg_time_sec']:>7.2f}s | {data['avg_ram_delta_mb']:>9.1f}MB | {data['avg_gpu_delta_mb']:>9.1f}MB")
        
        if "avg_output_len_chars" in data:
            print(f"  -> Avg Output Len: {data['avg_output_len_chars']} chars")
        if "avg_input_tokens" in data:
            print(f"  -> Avg Input Tokens: {data['avg_input_tokens']}")

    print(f"{'='*90}\n")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Analyze benchmark logs")
    parser.add_argument("--log_file", type=str, default="logs/benchmark.jsonl", help="Path to log file")
    args = parser.parse_args()
    
    logs = load_logs(args.log_file)
    if logs:
        print(f"Loaded {len(logs)} log entries from {args.log_file}")
        stats = get_benchmark_stats(logs)
        print_analysis(stats)
    else:
        print("No logs found.")