File size: 8,736 Bytes
0161e74 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 | #!/usr/bin/env python3
"""Evaluate Prompt Selection vs Random Baseline using cell-eval.
Compares predicted B-cell perturbation results from two methods:
1. Prompt Selection (embedding-based prompt ordering)
2. Random Baseline (default Stack random prompt ordering)
Usage:
python code/prompt_selection/evaluate_results.py --perturbation Dabrafenib
"""
from __future__ import annotations
import gc
import logging
import sys
from pathlib import Path
_THIS_DIR = Path(__file__).resolve().parent
_REPO_ROOT = _THIS_DIR.parents[1] # lfj/transfer/
for _p in [
str(_REPO_ROOT / "code" / "cell-eval" / "src"),
str(_THIS_DIR.parent),
]:
if _p not in sys.path:
sys.path.insert(0, _p)
import argparse
import anndata as ad
import numpy as np
import polars as pl
from scipy.sparse import issparse
from cell_eval import MetricsEvaluator
from prompt_selection import config as cfg
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
)
LOGGER = logging.getLogger("evaluate_results")
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
PERTURBATION_COL = "sm_name"
CONTROL_NAME = "Dimethyl Sulfoxide"
CELL_TYPE_FILTER = {"broad_cell_class": "lymphocyte of b lineage"}
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def build_real_combined(pert_name: str, output_dir: Path) -> ad.AnnData:
"""Extract ground truth B cells (control + perturbation) from source data."""
LOGGER.info("Loading source data: %s", cfg.SOURCE_ADATA)
source = ad.read_h5ad(str(cfg.SOURCE_ADATA))
LOGGER.info("Source shape: %s", source.shape)
broad = source.obs["broad_cell_class"]
sm = source.obs[PERTURBATION_COL]
b_lineage = CELL_TYPE_FILTER["broad_cell_class"]
ctrl_mask = (broad == b_lineage) & (sm == CONTROL_NAME)
pert_mask = (broad == b_lineage) & (sm == pert_name)
control_B = source[ctrl_mask].copy()
real_pert_B = source[pert_mask].copy()
del source
gc.collect()
LOGGER.info("Control B cells (DMSO): %d", control_B.n_obs)
LOGGER.info("Real perturbed B cells (%s): %d", pert_name, real_pert_B.n_obs)
if real_pert_B.n_obs == 0:
raise ValueError(f"No ground truth B cells found for {pert_name}")
real_combined = ad.concat([control_B, real_pert_B], join="inner")
LOGGER.info(
"real_combined: %d cells, sm_name: %s",
real_combined.n_obs,
real_combined.obs[PERTURBATION_COL].value_counts().to_dict(),
)
real_path = output_dir / "real_combined.h5ad"
real_combined.write_h5ad(real_path)
LOGGER.info("Saved real_combined to %s", real_path)
return real_combined
def build_pred_combined(pred_path: Path, label: str) -> ad.AnnData:
"""Combine control B cells with predictions into a single AnnData."""
ctrl = ad.read_h5ad(str(cfg.RESULTS_DIR / cfg.QUERY_CTRL_H5AD))
pred = ad.read_h5ad(str(pred_path))
LOGGER.info("[%s] Control: %d cells, Pred: %d cells", label, ctrl.n_obs, pred.n_obs)
combined = ad.concat([ctrl, pred], join="inner")
combined.obs_names_make_unique()
LOGGER.info(
"[%s] combined: %d cells, sm_name: %s",
label, combined.n_obs,
combined.obs[PERTURBATION_COL].value_counts().to_dict(),
)
return combined
def align_genes(adata_pred: ad.AnnData, adata_real: ad.AnnData):
"""Ensure pred and real have identical var_names in the same order."""
common = adata_pred.var_names.intersection(adata_real.var_names)
if len(common) == 0:
raise ValueError("No common genes between predicted and real data")
LOGGER.info(
"Gene alignment: pred=%d, real=%d, common=%d",
adata_pred.n_vars, adata_real.n_vars, len(common),
)
return adata_pred[:, common].copy(), adata_real[:, common].copy()
def densify_X(adata: ad.AnnData) -> None:
"""Convert sparse X to dense in-place to avoid repeated sparse→dense."""
if issparse(adata.X):
adata.X = np.asarray(adata.X.todense(), dtype=np.float32)
def evaluate_one(pred_path, real_combined, label, output_dir):
"""Build pred_combined, align genes, run cell-eval, free memory."""
LOGGER.info("=" * 60)
LOGGER.info("Evaluating: %s", label)
LOGGER.info("=" * 60)
pred_combined = build_pred_combined(pred_path, label)
pred_al, real_al = align_genes(pred_combined, real_combined)
del pred_combined
gc.collect()
LOGGER.info("[%s] Densifying expression matrices...", label)
densify_X(pred_al)
densify_X(real_al)
eval_dir = str(output_dir / f"celleval_{label}")
evaluator = MetricsEvaluator(
adata_pred=pred_al,
adata_real=real_al,
control_pert=CONTROL_NAME,
pert_col=PERTURBATION_COL,
outdir=eval_dir,
allow_discrete=True,
num_threads=4,
)
results, agg_results = evaluator.compute(
profile="full",
write_csv=True,
break_on_error=False,
)
del pred_al, real_al, evaluator
gc.collect()
return results, agg_results
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="Evaluate Prompt Selection vs Random Baseline")
parser.add_argument(
"--perturbation", type=str, required=True,
help="Perturbation name (e.g., Dabrafenib).",
)
parser.add_argument(
"--output-dir", type=Path, default=None,
help="Output directory for evaluation results (default: eval_results/<perturbation>).",
)
args = parser.parse_args()
pert_name = args.perturbation
pcfg = cfg.get_pert_config(pert_name)
output_dir = args.output_dir if args.output_dir else pcfg.eval_dir
output_dir.mkdir(parents=True, exist_ok=True)
pred_ps_path = pcfg.results_dir / pcfg.final_result_h5ad
pred_bl_path = pcfg.baseline_dir / pcfg.baseline_result_h5ad
# ---- Step 1: Build ground truth ----
LOGGER.info("Step 1: Preparing ground truth data for %s", pert_name)
real_combined = build_real_combined(pert_name, output_dir)
# ---- Step 2+3: Evaluate each method ----
agg_ps = None
agg_bl = None
if pred_ps_path.exists():
try:
_, agg_ps = evaluate_one(pred_ps_path, real_combined, "prompt_selection", output_dir)
except Exception as e:
LOGGER.error("Evaluation (prompt_selection) failed: %s", e, exc_info=True)
else:
LOGGER.warning("Prompt selection result not found: %s", pred_ps_path)
if pred_bl_path.exists():
try:
_, agg_bl = evaluate_one(pred_bl_path, real_combined, "baseline", output_dir)
except Exception as e:
LOGGER.error("Evaluation (baseline) failed: %s", e, exc_info=True)
else:
LOGGER.warning("Baseline result not found: %s", pred_bl_path)
# ---- Step 4: Compare results ----
LOGGER.info("=" * 60)
LOGGER.info("Comparing results for %s", pert_name)
LOGGER.info("=" * 60)
if agg_ps is not None:
print(f"\n--- Prompt Selection ({pert_name}, aggregated) ---")
print(agg_ps)
if agg_bl is not None:
print(f"\n--- Random Baseline ({pert_name}, aggregated) ---")
print(agg_bl)
if agg_ps is not None and agg_bl is not None:
try:
mean_ps = agg_ps.filter(pl.col("statistic") == "mean").drop("statistic")
mean_bl = agg_bl.filter(pl.col("statistic") == "mean").drop("statistic")
ps_long = mean_ps.unpivot(variable_name="metric", value_name="prompt_selection")
bl_long = mean_bl.unpivot(variable_name="metric", value_name="random_baseline")
comparison = ps_long.join(bl_long, on="metric")
comparison = comparison.with_columns(
(pl.col("prompt_selection") - pl.col("random_baseline")).alias("diff")
)
comparison_path = output_dir / "comparison_mean.csv"
comparison.write_csv(str(comparison_path))
print("\n" + "=" * 70)
print(f"COMPARISON ({pert_name}): Prompt Selection vs Random Baseline (mean)")
print("=" * 70)
print(comparison)
print(f"\nSaved to: {comparison_path}")
except Exception as e:
LOGGER.warning("Could not build comparison table: %s", e)
LOGGER.info("Evaluation complete for %s. Results in %s", pert_name, output_dir)
if __name__ == "__main__":
main()
|