File size: 3,091 Bytes
0161e74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/usr/bin/env python3
"""Aggregate cell-eval comparison results across all perturbation conditions.

Reads per-perturbation comparison_mean.csv files and produces:
  1. all_comparison.csv — full table with perturbation column
  2. Summary statistics printed to stdout

Usage:
    python code/prompt_selection/aggregate_results.py
"""
from __future__ import annotations

import logging
import sys
from pathlib import Path

_THIS_DIR = Path(__file__).resolve().parent
if str(_THIS_DIR.parent) not in sys.path:
    sys.path.insert(0, str(_THIS_DIR.parent))

import pandas as pd

from prompt_selection import config as cfg

logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
)
LOGGER = logging.getLogger("aggregate_results")


def main():
    all_dfs = []

    for pert_name in cfg.ALL_PERTURBATIONS:
        pcfg = cfg.get_pert_config(pert_name)
        csv_path = pcfg.eval_dir / "comparison_mean.csv"

        if not csv_path.exists():
            LOGGER.warning("No comparison_mean.csv for %s, skipping.", pert_name)
            continue

        df = pd.read_csv(csv_path)
        df["perturbation"] = pert_name
        all_dfs.append(df)
        LOGGER.info("Loaded %s (%d metrics)", pert_name, len(df))

    if not all_dfs:
        LOGGER.error("No comparison results found. Run evaluation first.")
        return

    combined = pd.concat(all_dfs, ignore_index=True)

    # Save full table
    output_path = cfg.EVAL_DIR / "all_comparison.csv"
    cfg.EVAL_DIR.mkdir(parents=True, exist_ok=True)
    combined.to_csv(output_path, index=False)
    LOGGER.info("Saved aggregated results: %s (%d rows)", output_path, len(combined))

    # Summary: mean across perturbations per metric
    print("\n" + "=" * 80)
    print("SUMMARY: Mean across all perturbations")
    print("=" * 80)

    summary = combined.groupby("metric")[["prompt_selection", "random_baseline", "diff"]].agg(
        ["mean", "std"]
    )
    summary.columns = [f"{col}_{stat}" for col, stat in summary.columns]
    summary = summary.sort_values("diff_mean", ascending=False)

    print(summary.to_string())

    summary_path = cfg.EVAL_DIR / "summary_statistics.csv"
    summary.to_csv(summary_path)
    LOGGER.info("Saved summary statistics: %s", summary_path)

    # Count wins
    print("\n" + "=" * 80)
    print("WIN COUNTS (per metric, across perturbations)")
    print("=" * 80)

    lower_is_better = {"mse", "mae", "mse_delta", "mae_delta"}
    for metric_name, group in combined.groupby("metric"):
        ps_wins = 0
        bl_wins = 0
        ties = 0
        for _, row in group.iterrows():
            diff = row["diff"]
            if metric_name in lower_is_better:
                diff = -diff
            if abs(diff) < 1e-12:
                ties += 1
            elif diff > 0:
                ps_wins += 1
            else:
                bl_wins += 1
        total = len(group)
        print(f"  {metric_name:35s}  PS wins: {ps_wins}/{total}  BL wins: {bl_wins}/{total}  Ties: {ties}/{total}")


if __name__ == "__main__":
    main()