Datasets:
File size: 5,810 Bytes
124eb57 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 | #!/usr/bin/env python3
"""PROTAC-Bench standardized evaluation script.
Evaluates predictions under the Leave-One-Target-Out (LOTO) protocol
with 65 cold-target folds and paired Wilcoxon significance testing.
Usage:
python evaluate.py --predictions my_preds.csv --output results.json
python evaluate.py --predictions my_preds.csv --baseline baseline_predictions.csv --output results.json
"""
import argparse
import json
import sys
from pathlib import Path
import numpy as np
import pandas as pd
from scipy import stats as sp_stats
from sklearn.metrics import roc_auc_score
def _find_data_dir():
"""Locate data directory: supports repo layout (../data/) or flat copy (.)."""
candidates = [
Path(__file__).resolve().parent.parent / "data",
Path(__file__).resolve().parent / "data",
Path(__file__).resolve().parent, # flat: CSV/JSON beside script
]
for d in candidates:
if (d / "protac_bench.csv").exists() and (d / "loto_folds.json").exists():
return d
return candidates[0] # fallback to original
DATA_DIR = _find_data_dir()
def auroc_safe(y_true, y_prob):
"""AUROC that returns 0.5 for degenerate (single-class) folds."""
if len(np.unique(y_true)) < 2:
return 0.5
return roc_auc_score(y_true, y_prob)
def load_folds():
with open(DATA_DIR / "loto_folds.json") as f:
return json.load(f)
def load_labels():
df = pd.read_csv(DATA_DIR / "protac_bench.csv")
return df["label"].values
def evaluate_predictions(pred_probs, labels, folds):
"""Compute per-fold and overall LOTO AUROC."""
per_fold = []
for uid, fold in sorted(folds.items()):
idx = fold["test_indices"]
y_true = labels[idx]
y_pred = pred_probs[idx]
auc = auroc_safe(y_true, y_pred)
per_fold.append({
"target_uniprot": uid,
"n_entries": fold["n_entries"],
"activity_rate": fold["activity_rate"],
"auroc": round(auc, 6),
})
aurocs = [f["auroc"] for f in per_fold]
# Tertile analysis by target size
sizes = [f["n_entries"] for f in per_fold]
sorted_by_size = sorted(zip(sizes, aurocs))
n = len(sorted_by_size)
t1 = [a for _, a in sorted_by_size[: n // 3]]
t2 = [a for _, a in sorted_by_size[n // 3: 2 * n // 3]]
t3 = [a for _, a in sorted_by_size[2 * n // 3:]]
summary = {
"mean_auroc": round(float(np.mean(aurocs)), 6),
"std_auroc": round(float(np.std(aurocs)), 6),
"median_auroc": round(float(np.median(aurocs)), 6),
"n_folds": len(per_fold),
"tertile_small": round(float(np.mean(t1)), 4),
"tertile_medium": round(float(np.mean(t2)), 4),
"tertile_large": round(float(np.mean(t3)), 4),
}
return summary, per_fold
def paired_wilcoxon(aurocs_a, aurocs_b):
"""Two-sided paired Wilcoxon signed-rank test."""
diff = np.array(aurocs_a) - np.array(aurocs_b)
if np.all(diff == 0):
return {"delta": 0.0, "p_value": 1.0}
try:
stat, pval = sp_stats.wilcoxon(diff, alternative="two-sided")
except Exception:
pval = 1.0
return {
"delta": round(float(np.mean(diff)), 6),
"p_value": round(float(pval), 6),
}
def load_predictions(path):
"""Load predictions CSV (columns: index, predicted_probability)."""
df = pd.read_csv(path)
if "predicted_probability" not in df.columns:
sys.exit(f"Error: predictions file must have 'predicted_probability' column. "
f"Found: {list(df.columns)}")
if "index" in df.columns:
df = df.sort_values("index").reset_index(drop=True)
return df["predicted_probability"].values
def main():
parser = argparse.ArgumentParser(description="PROTAC-Bench LOTO Evaluation")
parser.add_argument("--predictions", required=True, help="Path to predictions CSV")
parser.add_argument("--baseline", default=None,
help="Path to baseline predictions CSV (for Wilcoxon test)")
parser.add_argument("--output", default="results.json", help="Output JSON path")
args = parser.parse_args()
labels = load_labels()
folds = load_folds()
# Evaluate submitted predictions
pred_probs = load_predictions(args.predictions)
if len(pred_probs) != len(labels):
sys.exit(f"Error: predictions ({len(pred_probs)}) != dataset ({len(labels)})")
summary, per_fold = evaluate_predictions(pred_probs, labels, folds)
result = {"summary": summary, "per_fold": per_fold}
# Wilcoxon test vs baseline if provided
if args.baseline:
base_probs = load_predictions(args.baseline)
if len(base_probs) != len(labels):
sys.exit(f"Error: baseline predictions ({len(base_probs)}) != dataset ({len(labels)})")
base_summary, base_per_fold = evaluate_predictions(base_probs, labels, folds)
aurocs_pred = [f["auroc"] for f in per_fold]
aurocs_base = [f["auroc"] for f in base_per_fold]
wilcoxon = paired_wilcoxon(aurocs_pred, aurocs_base)
result["vs_baseline"] = {
"baseline_mean_auroc": base_summary["mean_auroc"],
"submitted_mean_auroc": summary["mean_auroc"],
**wilcoxon,
}
print(f"Submitted: {summary['mean_auroc']:.4f} "
f"Baseline: {base_summary['mean_auroc']:.4f} "
f"Delta: {wilcoxon['delta']:+.4f} "
f"p={wilcoxon['p_value']:.4f}")
else:
print(f"LOTO AUROC: {summary['mean_auroc']:.4f} ± {summary['std_auroc']:.4f} "
f"(median={summary['median_auroc']:.4f}, n={summary['n_folds']} folds)")
with open(args.output, "w") as f:
json.dump(result, f, indent=2)
print(f"Results saved to {args.output}")
if __name__ == "__main__":
main()
|