| |
| """PROTAC-Bench standardized evaluation script. |
| |
| Evaluates predictions under the Leave-One-Target-Out (LOTO) protocol |
| with 65 cold-target folds and paired Wilcoxon significance testing. |
| |
| Usage: |
| python evaluate.py --predictions my_preds.csv --output results.json |
| python evaluate.py --predictions my_preds.csv --baseline baseline_predictions.csv --output results.json |
| """ |
| import argparse |
| import json |
| import sys |
| from pathlib import Path |
|
|
| import numpy as np |
| import pandas as pd |
| from scipy import stats as sp_stats |
| from sklearn.metrics import roc_auc_score |
|
|
|
|
| def _find_data_dir(): |
| """Locate data directory: supports repo layout (../data/) or flat copy (.).""" |
| candidates = [ |
| Path(__file__).resolve().parent.parent / "data", |
| Path(__file__).resolve().parent / "data", |
| Path(__file__).resolve().parent, |
| ] |
| for d in candidates: |
| if (d / "protac_bench.csv").exists() and (d / "loto_folds.json").exists(): |
| return d |
| return candidates[0] |
|
|
| DATA_DIR = _find_data_dir() |
|
|
|
|
| def auroc_safe(y_true, y_prob): |
| """AUROC that returns 0.5 for degenerate (single-class) folds.""" |
| if len(np.unique(y_true)) < 2: |
| return 0.5 |
| return roc_auc_score(y_true, y_prob) |
|
|
|
|
| def load_folds(): |
| with open(DATA_DIR / "loto_folds.json") as f: |
| return json.load(f) |
|
|
|
|
| def load_labels(): |
| df = pd.read_csv(DATA_DIR / "protac_bench.csv") |
| return df["label"].values |
|
|
|
|
| def evaluate_predictions(pred_probs, labels, folds): |
| """Compute per-fold and overall LOTO AUROC.""" |
| per_fold = [] |
| for uid, fold in sorted(folds.items()): |
| idx = fold["test_indices"] |
| y_true = labels[idx] |
| y_pred = pred_probs[idx] |
| auc = auroc_safe(y_true, y_pred) |
| per_fold.append({ |
| "target_uniprot": uid, |
| "n_entries": fold["n_entries"], |
| "activity_rate": fold["activity_rate"], |
| "auroc": round(auc, 6), |
| }) |
| aurocs = [f["auroc"] for f in per_fold] |
| |
| sizes = [f["n_entries"] for f in per_fold] |
| sorted_by_size = sorted(zip(sizes, aurocs)) |
| n = len(sorted_by_size) |
| t1 = [a for _, a in sorted_by_size[: n // 3]] |
| t2 = [a for _, a in sorted_by_size[n // 3: 2 * n // 3]] |
| t3 = [a for _, a in sorted_by_size[2 * n // 3:]] |
| summary = { |
| "mean_auroc": round(float(np.mean(aurocs)), 6), |
| "std_auroc": round(float(np.std(aurocs)), 6), |
| "median_auroc": round(float(np.median(aurocs)), 6), |
| "n_folds": len(per_fold), |
| "tertile_small": round(float(np.mean(t1)), 4), |
| "tertile_medium": round(float(np.mean(t2)), 4), |
| "tertile_large": round(float(np.mean(t3)), 4), |
| } |
| return summary, per_fold |
|
|
|
|
| def paired_wilcoxon(aurocs_a, aurocs_b): |
| """Two-sided paired Wilcoxon signed-rank test.""" |
| diff = np.array(aurocs_a) - np.array(aurocs_b) |
| if np.all(diff == 0): |
| return {"delta": 0.0, "p_value": 1.0} |
| try: |
| stat, pval = sp_stats.wilcoxon(diff, alternative="two-sided") |
| except Exception: |
| pval = 1.0 |
| return { |
| "delta": round(float(np.mean(diff)), 6), |
| "p_value": round(float(pval), 6), |
| } |
|
|
|
|
| def load_predictions(path): |
| """Load predictions CSV (columns: index, predicted_probability).""" |
| df = pd.read_csv(path) |
| if "predicted_probability" not in df.columns: |
| sys.exit(f"Error: predictions file must have 'predicted_probability' column. " |
| f"Found: {list(df.columns)}") |
| if "index" in df.columns: |
| df = df.sort_values("index").reset_index(drop=True) |
| return df["predicted_probability"].values |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="PROTAC-Bench LOTO Evaluation") |
| parser.add_argument("--predictions", required=True, help="Path to predictions CSV") |
| parser.add_argument("--baseline", default=None, |
| help="Path to baseline predictions CSV (for Wilcoxon test)") |
| parser.add_argument("--output", default="results.json", help="Output JSON path") |
| args = parser.parse_args() |
|
|
| labels = load_labels() |
| folds = load_folds() |
|
|
| |
| pred_probs = load_predictions(args.predictions) |
| if len(pred_probs) != len(labels): |
| sys.exit(f"Error: predictions ({len(pred_probs)}) != dataset ({len(labels)})") |
| summary, per_fold = evaluate_predictions(pred_probs, labels, folds) |
|
|
| result = {"summary": summary, "per_fold": per_fold} |
|
|
| |
| if args.baseline: |
| base_probs = load_predictions(args.baseline) |
| if len(base_probs) != len(labels): |
| sys.exit(f"Error: baseline predictions ({len(base_probs)}) != dataset ({len(labels)})") |
| base_summary, base_per_fold = evaluate_predictions(base_probs, labels, folds) |
| aurocs_pred = [f["auroc"] for f in per_fold] |
| aurocs_base = [f["auroc"] for f in base_per_fold] |
| wilcoxon = paired_wilcoxon(aurocs_pred, aurocs_base) |
| result["vs_baseline"] = { |
| "baseline_mean_auroc": base_summary["mean_auroc"], |
| "submitted_mean_auroc": summary["mean_auroc"], |
| **wilcoxon, |
| } |
| print(f"Submitted: {summary['mean_auroc']:.4f} " |
| f"Baseline: {base_summary['mean_auroc']:.4f} " |
| f"Delta: {wilcoxon['delta']:+.4f} " |
| f"p={wilcoxon['p_value']:.4f}") |
| else: |
| print(f"LOTO AUROC: {summary['mean_auroc']:.4f} ± {summary['std_auroc']:.4f} " |
| f"(median={summary['median_auroc']:.4f}, n={summary['n_folds']} folds)") |
|
|
| with open(args.output, "w") as f: |
| json.dump(result, f, indent=2) |
| print(f"Results saved to {args.output}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|