| |
| """PROTAC-Bench RF+Morgan baseline reproduction. |
| |
| Runs 65-fold LOTO evaluation with RandomForest on Morgan fingerprints (1024-bit, radius 2). |
| Saves predictions to baseline_predictions.csv and runs evaluate.py. |
| |
| Expected result: AUROC ~0.666 |
| |
| Usage: |
| python baselines.py |
| """ |
| import json |
| import subprocess |
| import sys |
| from pathlib import Path |
|
|
| import numpy as np |
| import pandas as pd |
| from rdkit import Chem, RDLogger |
| from rdkit.Chem import AllChem |
| from sklearn.ensemble import RandomForestClassifier |
| from sklearn.metrics import roc_auc_score |
|
|
| RDLogger.DisableLog('rdApp.*') |
|
|
| def _find_data_dir(): |
| """Locate data directory: supports repo layout (../data/) or flat copy (.).""" |
| candidates = [ |
| Path(__file__).resolve().parent.parent / "data", |
| Path(__file__).resolve().parent / "data", |
| Path(__file__).resolve().parent, |
| ] |
| for d in candidates: |
| if (d / "protac_bench.csv").exists() and (d / "loto_folds.json").exists(): |
| return d |
| return candidates[0] |
|
|
| DATA_DIR = _find_data_dir() |
| SEED = 42 |
| N_SEEDS = 3 |
|
|
|
|
| def auroc_safe(y_true, y_prob): |
| if len(np.unique(y_true)) < 2: |
| return 0.5 |
| return roc_auc_score(y_true, y_prob) |
|
|
|
|
| def compute_morgan(smiles_list, nbits=2048, radius=2): |
| X = np.zeros((len(smiles_list), nbits), dtype=np.float32) |
| for i, smi in enumerate(smiles_list): |
| mol = Chem.MolFromSmiles(str(smi)) |
| if mol: |
| fp = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=nbits) |
| X[i] = np.array(fp) |
| return X |
|
|
|
|
| def main(): |
| |
| df = pd.read_csv(DATA_DIR / "protac_bench.csv") |
| with open(DATA_DIR / "loto_folds.json") as f: |
| folds = json.load(f) |
| print(f"Dataset: {len(df)} entries, {len(folds)} LOTO folds") |
|
|
| |
| print("Computing Morgan fingerprints (1024-bit, radius 2)...") |
| X = compute_morgan(list(df["smiles"].values), nbits=1024, radius=2) |
| labels = df["label"].values |
|
|
| |
| all_probs = np.zeros(len(df), dtype=np.float64) |
| all_counts = np.zeros(len(df), dtype=np.int32) |
| fold_results = [] |
|
|
| for uid, fold in sorted(folds.items()): |
| te_idx = np.array(fold["test_indices"]) |
| tr_idx = np.array([i for i in range(len(df)) if i not in set(te_idx)]) |
| y_te = labels[te_idx] |
| seed_aurocs = [] |
| seed_probs_list = [] |
| for s in range(N_SEEDS): |
| rf = RandomForestClassifier( |
| n_estimators=200, max_depth=None, min_samples_leaf=3, |
| random_state=SEED + s, n_jobs=-1 |
| ) |
| rf.fit(X[tr_idx], labels[tr_idx]) |
| prob = rf.predict_proba(X[te_idx]) |
| prob = prob[:, 1] if rf.classes_[1] == 1 else 1 - prob[:, 0] |
| seed_probs_list.append(prob) |
| seed_aurocs.append(auroc_safe(y_te, prob)) |
| avg_probs = np.mean(seed_probs_list, axis=0) |
| all_probs[te_idx] = avg_probs |
| all_counts[te_idx] = 1 |
| mean_auc = float(np.mean(seed_aurocs)) |
| fold_results.append({"target": uid, "n": fold["n_entries"], "auroc": mean_auc}) |
| print(f" {uid}: n={fold['n_entries']:3d} AUROC={mean_auc:.4f}") |
|
|
| |
| missing = all_counts == 0 |
| if missing.any(): |
| rf = RandomForestClassifier( |
| n_estimators=200, max_depth=None, min_samples_leaf=3, |
| random_state=SEED, n_jobs=-1 |
| ) |
| rf.fit(X[~missing], labels[~missing]) |
| prob = rf.predict_proba(X[missing]) |
| prob = prob[:, 1] if rf.classes_[1] == 1 else 1 - prob[:, 0] |
| all_probs[missing] = prob |
|
|
| |
| aurocs = [r["auroc"] for r in fold_results] |
| print(f"\nRF+Morgan baseline: mean AUROC = {np.mean(aurocs):.4f} " |
| f"± {np.std(aurocs):.4f} (n={len(fold_results)} folds)") |
|
|
| |
| script_dir = Path(__file__).resolve().parent |
| out_path = script_dir / "baseline_predictions.csv" |
| pred_df = pd.DataFrame({ |
| "index": range(len(df)), |
| "predicted_probability": all_probs, |
| }) |
| pred_df.to_csv(out_path, index=False) |
| print(f"Predictions saved to {out_path}") |
|
|
| |
| eval_script = script_dir / "evaluate.py" |
| result_path = script_dir / "baseline_results.json" |
| print(f"\nRunning evaluation...") |
| subprocess.run([ |
| sys.executable, str(eval_script), |
| "--predictions", str(out_path), |
| "--output", str(result_path), |
| ], check=True) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|