BULMA / src /causal /robustness.py
HarriziSaad's picture
Update src/causal/robustness.py
77ad7a9 verified
from sklearn.linear_model import LogisticRegression
from sklearn.utils import resample
def make_dr_balanced():
return DRLearner(
model_regression=RandomForestRegressor(
n_estimators=300, min_samples_leaf=5, random_state=42, n_jobs=-1
),
model_propensity=LogisticRegression(max_iter=1000, solver="lbfgs", class_weight="balanced"),
cv=1,
random_state=42
)
robust_results = {}
placebo = {}
rng = np.random.default_rng(123)
for t in T_cols:
T_all = C[t].astype(float).to_numpy()
thresh = np.median(T_all)
T_bin = (T_all > thresh).astype(int)
T_tr, T_test = T_bin[idx_tr], T_bin[idx_te]
dr = make_dr_balanced()
dr.fit(y_tr, T_tr, X=X_tr)
prop = LogisticRegression(max_iter=1000, solver="lbfgs", class_weight="balanced")
prop.fit(X_tr, T_tr)
e_hat = prop.predict_proba(X_te)[:, 1]
mask = (e_hat > 0.05) & (e_hat < 0.95)
if mask.sum() < max(50, int(0.2 * len(X_te))):
mask = (e_hat > 0.02) & (e_hat < 0.98)
ate_trim = float(dr.ate(X_te[mask]))
boots = []
n_trim = int(mask.sum())
for _ in range(300):
b = rng.integers(0, n_trim, size=n_trim)
boots.append(float(dr.ate(X_te[mask][b])))
ci_trim = (float(np.percentile(boots, 2.5)), float(np.percentile(boots, 97.5)))
robust_results[t] = {"ATE_trim": ate_trim, "CI_trim": ci_trim, "trim_keep": int(mask.sum())}
T_tr_shuff = rng.permutation(T_tr)
dr_p = make_dr_balanced()
dr_p.fit(y_tr, T_tr_shuff, X=X_tr)
ate_placebo = float(dr_p.ate(X_te))
placebo[t] = ate_placebo
out = {"robust": robust_results, "placebo": placebo}
with open("results/causal_section3_robustness.json", "w") as f:
json.dump(out, f, indent=2)
print(" Robustness checks saved → results/causal_section3_robustness.json")
for k, v in robust_results.items():
print(f"{k}: ATE_trim={v['ATE_trim']:.3f}, CI_trim={tuple(np.round(v['CI_trim'],3))}, kept={v['trim_keep']}")
print("Placebo (ATE; expected ≈ 0):", {k: round(v, 3) for k, v in placebo.items()})
import json, numpy as np, pandas as pd, matplotlib.pyplot as plt, seaborn as sns, pathlib as p
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LinearRegression
plt.rcParams["figure.dpi"] = 150
sns.set_style("whitegrid")
RES = p.Path("results"); RES.mkdir(exist_ok=True, parents=True)
rob_path = RES/"causal_section3_robustness.json"
rob = json.load(open(rob_path))
rows=[]
for k,v in rob.get("robust", {}).items():
rows.append({
"transporter": k,
"ATE_trim": float(v["ATE_trim"]),
"CI_low": float(v["CI_trim"][0]),
"CI_high": float(v["CI_trim"][1]),
"kept_n": int(v.get("trim_keep", v.get("kept", np.nan)))
})
df = pd.DataFrame(rows).sort_values("ATE_trim", ascending=False)
df.to_csv(RES/"ED_Table_S3_causal_robustness.csv", index=False)
plt.figure(figsize=(8, max(3.5, 0.42*len(df))))
ax = sns.barplot(data=df, y="transporter", x="ATE_trim", color="steelblue", orient="h")
for i, r in df.reset_index(drop=True).iterrows():
plt.plot([r.CI_low, r.CI_high], [i, i], color="k", lw=1)
plt.axvline(0, color="red", ls="--", lw=1)
plt.xlabel("ATE (trimmed)"); plt.ylabel("")
plt.title("Trimmed ATEs (95% CI)")
plt.tight_layout()
plt.savefig(RES/"ED_Fig_trimmed_ATEs.png", dpi=300)
plt.show()
placebo_samples = []
if "placebo_samples" in rob and rob["placebo_samples"]:
for t, arr in rob["placebo_samples"].items():
vals = np.array(arr, dtype=float).ravel().tolist()
placebo_samples.append(pd.DataFrame({"transporter": t, "ATE_placebo": vals}))
elif "placebo" in rob and rob["placebo"]:
for t, val in rob["placebo"].items():
v = float(val)
jitter = v + 0.002*np.random.default_rng(123).standard_normal(50)
placebo_samples.append(pd.DataFrame({"transporter": t, "ATE_placebo": jitter.tolist()}))
else:
import pandas as pd
C = pd.read_csv("data/processed/causal_table.csv")
cov_cont = ["ethanol_pct","ROS","NaCl_mM","H2O2_uM","PDR1_reg","YAP1_reg"]
cov_cat = ["batch"]
X_df = C[cov_cont+cov_cat].copy()
X_df[cov_cont] = X_df[cov_cont].astype(float)
X_df[cov_cat] = X_df[cov_cat].astype(str)
ct = ColumnTransformer([
("num", StandardScaler(), cov_cont),
("cat", OneHotEncoder(sparse_output=False, handle_unknown="ignore"), cov_cat)
])
X_all = ct.fit_transform(X_df)
y = C["outcome"].astype(float).to_numpy()
idx = np.arange(len(y))
idx_tr, idx_te = train_test_split(idx, test_size=0.2, random_state=42)
X_tr, X_te = X_all[idx_tr], X_all[idx_te]
y_tr, y_te = y[idx_tr], y[idx_te]
ols = LinearRegression().fit(X_tr, y_tr)
r_te = y_te - ols.predict(X_te)
rng = np.random.default_rng(123)
T_cols = [c for c in C.columns if c.endswith("_expr")]
for t in T_cols:
T = C[t].astype(float).to_numpy()
# binarize like Section 3
T_bin = (T > np.median(T)).astype(int)
T_tr = T_bin[idx_tr]
vals=[]
for _ in range(200):
T_perm = rng.permutation(T_tr)
T_te = rng.permutation(T_bin[idx_te])
m1 = r_te[T_te==1].mean() if (T_te==1).any() else 0.0
m0 = r_te[T_te==0].mean() if (T_te==0).any() else 0.0
vals.append(m1 - m0)
placebo_samples.append(pd.DataFrame({"transporter": t, "ATE_placebo": vals}))
P = pd.concat(placebo_samples, ignore_index=True)
plt.figure(figsize=(8, 4.5))
ax = sns.histplot(P, x="ATE_placebo", hue="transporter", element="step",
stat="density", common_norm=False, bins=30, alpha=0.35)
ax.set_xlabel("Placebo ATE"); ax.set_ylabel("Density")
ax.set_title("Placebo ATE distributions")
plt.tight_layout()
plt.savefig(RES/"ED_Fig_placebo_hist.png", dpi=300)
plt.show()
stats = P.groupby("transporter")["ATE_placebo"].agg(["mean","std"]).reset_index()
stats.to_csv(RES/"ED_Table_placebo_stats.csv", index=False)
print("Saved:",
RES/"ED_Table_S3_causal_robustness.csv",
RES/"ED_Fig_trimmed_ATEs.png",
RES/"ED_Fig_placebo_hist.png",
RES/"ED_Table_placebo_stats.csv")