File size: 6,296 Bytes
9f9fb84 8b239ea 9f9fb84 8b239ea 9f9fb84 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | from pathlib import Path
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from sklearn.metrics import average_precision_score
from ..atlas.dataset import PairDataset
from ..atlas.model_mlp import AtlasMLP
from ..utils.io import load_cfg, set_seed, save_json
def _train_model(ds: PairDataset, lr: float, epochs: int, batch_size: int,
device: str) -> AtlasMLP:
model = AtlasMLP().to(device).train()
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
loader = DataLoader(ds, batch_size=batch_size, shuffle=True)
for _ in range(epochs):
for p, l, y, _, _ in loader:
p, l, y = p.to(device), l.to(device), y.squeeze(-1).to(device)
loss = nn.functional.binary_cross_entropy_with_logits(model(p, l), y)
optimizer.zero_grad(); loss.backward(); optimizer.step()
return model.eval()
@torch.no_grad()
def _predict_proba(model: AtlasMLP, ds: PairDataset, batch_size: int,
device: str) -> np.ndarray:
loader = DataLoader(ds, batch_size=batch_size, shuffle=False)
probs = []
for p, l, _, _, _ in loader:
probs.append(torch.sigmoid(model(p.to(device), l.to(device))).cpu().numpy())
return np.concatenate(probs)
def _scores_uncertainty(model, pool_ds, batch_size, device):
"""Predictive entropy: max at p=0.5."""
p = _predict_proba(model, pool_ds, batch_size, device)
entropy = -p * np.log(p + 1e-9) - (1 - p) * np.log(1 - p + 1e-9)
return entropy
def _scores_diversity(model, pool_ds, labeled_ds, batch_size, device):
"""
Mean embedding distance from pool point to nearest labeled point.
Uses concatenated (protein, ligand) embeddings as feature space.
"""
def _embeddings(ds):
embs = []
for p, l, _, _, _ in DataLoader(ds, batch_size=batch_size):
embs.append(torch.cat([p, l], dim=-1).numpy())
return np.concatenate(embs)
pool_emb = _embeddings(pool_ds)
labeled_emb = _embeddings(labeled_ds)
pool_n = pool_emb / (np.linalg.norm(pool_emb, axis=1, keepdims=True) + 1e-9)
labeled_n = labeled_emb / (np.linalg.norm(labeled_emb, axis=1, keepdims=True) + 1e-9)
sims = pool_n @ labeled_n.T
return 1.0 - sims.max(axis=1)
def _scores_causal(pool_ds, causal_effects: dict) -> np.ndarray:
"""
Causal weight: prioritize pairs from high-ATE transporters.
causal_effects : {gene_name: ATE_value} (positive = protective)
"""
weights = np.zeros(len(pool_ds.pairs))
for i, (ti, _ci, _y) in enumerate(pool_ds.pairs):
gene = pool_ds.Tnames[ti] if hasattr(pool_ds, "Tnames") else str(ti)
weights[i] = abs(causal_effects.get(gene, 0.0))
return weights / (weights.max() + 1e-9)
def run_active_learning(
cfg_path: str = "env/config.yaml",
strategy: str = "uncertainty",
causal_csv: str = "results/causal_effects.csv",
) -> dict:
"""
Run a pool-based active learning simulation.
Returns a dict with AUPRC at each round for the chosen strategy.
"""
cfg = load_cfg(cfg_path)
set_seed(cfg["training"]["seed"])
device = "cuda" if torch.cuda.is_available() else "cpu"
proc = Path(cfg["paths"]["processed"])
res = Path(cfg["paths"]["results"])
res.mkdir(parents=True, exist_ok=True)
al_cfg = cfg["active_learning"]
tr_cfg = cfg["training"]
full_ds = PairDataset(proc)
n = len(full_ds.pairs)
rng = np.random.default_rng(tr_cfg["seed"])
causal_effects = {}
if strategy in ("causal", "hybrid") and Path(causal_csv).exists():
df_c = pd.read_csv(causal_csv)
causal_effects = dict(zip(df_c["gene"], df_c["ATE"].abs()))
init_k = int(al_cfg["init_frac"] * n)
acquire_k = int(al_cfg["acquire_per_iter"] * n)
labeled = set(rng.choice(n, size=init_k, replace=False).tolist())
pool = set(range(n)) - labeled
curve_fracs, curve_auprc = [], []
for it in range(al_cfg["iters"]):
labeled_list = sorted(labeled)
pool_list = sorted(pool)
ds_labeled = PairDataset(proc, labeled_list)
ds_pool = PairDataset(proc, pool_list)
model = _train_model(ds_labeled, tr_cfg["lr"], epochs=8,
batch_size=tr_cfg["batch_size"], device=device)
if strategy == "random":
scores = rng.random(len(pool_list))
elif strategy == "uncertainty":
scores = _scores_uncertainty(model, ds_pool, tr_cfg["batch_size"], device)
elif strategy == "diversity":
scores = _scores_diversity(model, ds_pool, ds_labeled, tr_cfg["batch_size"], device)
elif strategy == "causal":
scores = _scores_causal(ds_pool, causal_effects)
elif strategy == "hybrid":
s_unc = _scores_uncertainty(model, ds_pool, tr_cfg["batch_size"], device)
s_causal = _scores_causal(ds_pool, causal_effects)
scores = 0.5 * s_unc / (s_unc.max() + 1e-9) + 0.5 * s_causal
else:
raise ValueError(f"Unknown strategy: {strategy!r}")
acquire_k_actual = min(acquire_k, len(pool_list))
top_local = np.argsort(scores)[::-1][:acquire_k_actual]
newly_labeled = {pool_list[i] for i in top_local}
labeled |= newly_labeled
pool -= newly_labeled
hold_size = min(int(0.2 * n), len(pool))
if hold_size > 0:
hold_idx = rng.choice(sorted(pool), size=hold_size, replace=False)
ds_hold = PairDataset(proc, hold_idx.tolist())
probs = _predict_proba(model, ds_hold, tr_cfg["batch_size"] * 2, device)
y_hold = np.array([y for _, _, y in ds_hold.pairs])
ap = float(average_precision_score(y_hold, probs))
else:
ap = float("nan")
frac = len(labeled) / n
curve_fracs.append(frac)
curve_auprc.append(ap)
print(f" iter={it+1} labeled={len(labeled)}/{n} ({frac:.2%}) AUPRC={ap:.4f}")
snapshot = {
"strategy": strategy,
"curves": {"fracs": curve_fracs, "auprc": curve_auprc},
}
save_json(snapshot, res / f"al_section4_{strategy}_snapshot.json")
return snapshot
|