HNTAI / scripts /analyze_benchmarks.py
sachinchandrankallar's picture
bench mark
188ec8d
import os
import sys
import argparse
# Add src to path
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(current_dir)
sys.path.insert(0, os.path.join(project_root, "services", "ai-service", "src"))
from ai_med_extract.utils.benchmark_analysis import load_logs, get_benchmark_stats
def print_analysis(stats: dict):
print(f"\n{'='*90}")
print(f"{'Activity':<30} | {'Count':<5} | {'Avg Time':<8} | {'Avg RAM Delta':<13} | {'Avg GPU Delta':<13}")
print(f"{'-'*90}")
for activity, data in stats.items():
print(f"{activity:<30} | {data['count']:<5} | {data['avg_time_sec']:>7.2f}s | {data['avg_ram_delta_mb']:>9.1f}MB | {data['avg_gpu_delta_mb']:>9.1f}MB")
if "avg_output_len_chars" in data:
print(f" -> Avg Output Len: {data['avg_output_len_chars']} chars")
if "avg_input_tokens" in data:
print(f" -> Avg Input Tokens: {data['avg_input_tokens']}")
print(f"{'='*90}\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Analyze benchmark logs")
parser.add_argument("--log_file", type=str, default="logs/benchmark.jsonl", help="Path to log file")
args = parser.parse_args()
logs = load_logs(args.log_file)
if logs:
print(f"Loaded {len(logs)} log entries from {args.log_file}")
stats = get_benchmark_stats(logs)
print_analysis(stats)
else:
print("No logs found.")