| import os, json, math, numpy as np, pandas as pd, torch, torch.nn as nn |
| from pathlib import Path |
| from sklearn.metrics import roc_auc_score, average_precision_score, brier_score_loss |
| from sklearn.calibration import calibration_curve |
| from sklearn.model_selection import train_test_split |
| import matplotlib.pyplot as plt |
|
|
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
| PROC = Path("data/processed"); RES = Path("results"); RES.mkdir(parents=True, exist_ok=True) |
|
|
| P = pd.read_csv(PROC/"protein.csv") |
| L = pd.read_csv(PROC/"ligand.csv") |
| Y = pd.read_csv(PROC/"labels.csv") |
|
|
| def is_ethanol(s): return isinstance(s,str) and ("etoh" in s.lower() or "ethanol" in s.lower()) |
| def is_oxid(s): return isinstance(s,str) and ("h2o2" in s.lower() or "oxid" in s.lower()) |
|
|
| Y["is_ethanol"] = Y["condition"].map(is_ethanol).fillna(False) |
| Y["is_oxidative"] = Y["condition"].map(is_oxid).fillna(False) |
|
|
| pcols = [c for c in P.columns if c!="transporter"] |
| lcols = [c for c in L.columns if c.startswith("d")] |
|
|
| P_map = {r.transporter: r[pcols].to_numpy(dtype=np.float32) for _,r in P.iterrows()} |
| L_map = {r.compound: r[lcols].to_numpy(dtype=np.float32) for _,r in L.iterrows()} |
|
|
| def make_Xy(df_pairs): |
| keep = df_pairs["transporter"].isin(P_map.keys()) & df_pairs["compound"].isin(L_map.keys()) |
| df = df_pairs.loc[keep].copy() |
| X = np.stack([np.concatenate([P_map[t], L_map[c]]) for t,c in zip(df.transporter, df.compound)]).astype(np.float32) |
| y = df["y"].astype(np.float32).to_numpy() |
| return X, y, df |
|
|
| class MLP(nn.Module): |
| def __init__(self, in_dim): |
| super().__init__() |
| self.net = nn.Sequential( |
| nn.Linear(in_dim, 512), nn.ReLU(), nn.Dropout(0.2), |
| nn.Linear(512, 128), nn.ReLU(), nn.Dropout(0.2), |
| nn.Linear(128, 1) |
| ) |
| def forward(self, x): return self.net(x).squeeze(-1) |
|
|
| def fit_mlp(Xtr, ytr, Xva, yva, max_epochs=50, bs=512, lr=1e-3, patience=6): |
| Xtr=torch.from_numpy(Xtr).to(DEVICE); ytr=torch.from_numpy(ytr).to(DEVICE) |
| Xva=torch.from_numpy(Xva).to(DEVICE); yva=torch.from_numpy(yva).to(DEVICE) |
| model = MLP(Xtr.shape[1]).to(DEVICE) |
| opt = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=1e-4) |
| bce = nn.BCEWithLogitsLoss() |
| best, best_state, bad= 1e9, None, 0 |
| for ep in range(max_epochs): |
| model.train() |
| perm = torch.randperm(len(Xtr), device=DEVICE) |
| for i in range(0, len(Xtr), bs): |
| idx = perm[i:i+bs] |
| logits = model(Xtr[idx]) |
| loss = bce(logits, ytr[idx]) |
| opt.zero_grad(); loss.backward(); opt.step() |
| |
| model.eval() |
| with torch.no_grad(): |
| val = bce(model(Xva), yva).item() |
| if val < best - 1e-4: |
| best, best_state, bad = val, {k:v.detach().cpu() for k,v in model.state_dict().items()}, 0 |
| else: |
| bad += 1 |
| if bad >= patience: break |
| model.load_state_dict({k:v.to(DEVICE) for k,v in best_state.items()}) |
| model.eval() |
| return model |
|
|
| def evaluate(model, Xte, yte, tag, out_prefix): |
| Xte_t = torch.from_numpy(Xte).to(DEVICE) |
| with torch.no_grad(): p = torch.sigmoid(model(Xte_t)).cpu().numpy() |
| auroc = roc_auc_score(yte, p) if len(np.unique(yte))>1 else np.nan |
| auprc = average_precision_score(yte, p) |
| brier = brier_score_loss(yte, p) |
| prob_true, prob_pred = calibration_curve(yte, p, n_bins=10, strategy="quantile") |
| plt.figure(figsize=(3.6,3.6)) |
| plt.plot(prob_pred, prob_true, "o-", label=tag) |
| plt.plot([0,1],[0,1],"k--",lw=1) |
| plt.xlabel("Predicted"); plt.ylabel("Observed"); plt.title(f"Calibration — {tag}") |
| fn = RES/f"section5_calibration_{out_prefix}.png"; plt.tight_layout(); plt.savefig(fn, dpi=200); plt.close() |
| return {"AUROC":float(auroc), "AUPRC":float(auprc), "Brier":float(brier), "calibration_png":str(fn)} |
|
|
| train_df = Y[Y["is_ethanol"]].copy() |
| test_df = Y[Y["is_oxidative"]].copy() |
| Xtr, ytr, _ = make_Xy(train_df) |
| Xte, yte, _ = make_Xy(test_df) |
| idx_tr, idx_va = train_test_split(np.arange(len(Xtr)), test_size=0.2, random_state=42, stratify=(ytr>0)) |
| model_e2o = fit_mlp(Xtr[idx_tr], ytr[idx_tr], Xtr[idx_va], ytr[idx_va], max_epochs=60) |
| metrics_e2o = evaluate(model_e2o, Xte, yte, "Ethanol→Oxidative", "e2o") |
|
|
| train_df2 = Y[Y["is_oxidative"]].copy() |
| test_df2 = Y[Y["is_ethanol"]].copy() |
| Xtr2, ytr2, _ = make_Xy(train_df2) |
| Xte2, yte2, _ = make_Xy(test_df2) |
| idx_tr2, idx_va2 = train_test_split(np.arange(len(Xtr2)), test_size=0.2, random_state=42, stratify=(ytr2>0)) |
| model_o2e = fit_mlp(Xtr2[idx_tr2], ytr2[idx_tr2], Xtr2[idx_va2], ytr2[idx_va2], max_epochs=60) |
| metrics_o2e = evaluate(model_o2e, Xte2, yte2, "Oxidative→Ethanol", "o2e") |
|
|
| plt.figure(figsize=(4.5,3.2)) |
| plt.bar(["EtOH→Ox"], [metrics_e2o["AUPRC"]]) |
| plt.bar(["Ox→EtOH"], [metrics_o2e["AUPRC"]]) |
| plt.ylabel("AUPRC (transfer)"); plt.title("Stress transfer performance") |
| plt.tight_layout(); plt.savefig(RES/"section5_transfer_auprc.png", dpi=200); plt.close() |
|
|
| snapshot = { |
| "device": DEVICE, |
| "transfer": {"EtOH_to_Ox": metrics_e2o, "Ox_to_EtOH": metrics_o2e}, |
| "notes": "Section 5 stress transfer using Section-2 MLP and current labels.csv" |
| } |
| with open(RES/"section5_transfer_snapshot.json","w") as f: json.dump(snapshot, f, indent=2) |
| print(" Saved transfer figs & snapshot:", |
| RES/"section5_calibration_e2o.png", |
| RES/"section5_calibration_o2e.png", |
| RES/"section5_transfer_auprc.png", |
| RES/"section5_transfer_snapshot.json") |