File size: 4,966 Bytes
0161e74 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 | """Random baseline generation using Stack's get_incontext_generation().
Uses random prompt ordering (the default Stack behaviour) instead of
embedding-based selection. Provides a baseline for comparison.
Usage:
python code/prompt_selection/run_baseline.py --perturbation Dabrafenib
"""
from __future__ import annotations
import argparse
import gc
import logging
import sys
from pathlib import Path
_THIS_DIR = Path(__file__).resolve().parent
if str(_THIS_DIR.parent) not in sys.path:
sys.path.insert(0, str(_THIS_DIR.parent))
import anndata as ad
import numpy as np
import torch
from scipy.sparse import csr_matrix, issparse
from stack.model_loading import load_model_from_checkpoint
from prompt_selection import config as cfg
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
)
LOGGER = logging.getLogger("prompt_selection.baseline")
def _filter_adata(adata: ad.AnnData, filters: dict) -> ad.AnnData:
"""Subset AnnData by column-value filters."""
mask = np.ones(adata.n_obs, dtype=bool)
for col, val in filters.items():
mask &= (adata.obs[col] == val).values
return adata[mask].copy()
def ensure_prompt_pert(pcfg: cfg.PertConfig):
"""Extract prompt_pert.h5ad if it doesn't exist yet."""
pcfg.results_dir.mkdir(parents=True, exist_ok=True)
pert_path = pcfg.results_dir / cfg.PROMPT_PERT_H5AD
if pert_path.exists():
return True
LOGGER.info("prompt_pert not found, extracting from source data...")
adata = ad.read_h5ad(str(cfg.SOURCE_ADATA))
pert = _filter_adata(adata, pcfg.prompt_pert_filter)
LOGGER.info("prompt_pert (%s): %d cells", pcfg.perturbation_name, pert.n_obs)
if pert.n_obs == 0:
LOGGER.warning("No T cells found for '%s'. Skipping.", pcfg.perturbation_name)
del adata, pert
gc.collect()
return False
pert.write_h5ad(pert_path)
del adata, pert
gc.collect()
return True
def main():
parser = argparse.ArgumentParser(description="Random Baseline Generation")
parser.add_argument(
"--perturbation", type=str, required=True,
help="Perturbation name (e.g., Dabrafenib).",
)
args = parser.parse_args()
pert_name = args.perturbation
pcfg = cfg.get_pert_config(pert_name)
LOGGER.info("=" * 60)
LOGGER.info("Random Baseline Generation — %s", pert_name)
LOGGER.info("=" * 60)
pcfg.baseline_dir.mkdir(parents=True, exist_ok=True)
output_path = pcfg.baseline_dir / pcfg.baseline_result_h5ad
if output_path.exists():
LOGGER.info("Baseline result already exists: %s — skipping.", output_path)
return
# Ensure prompt_pert data exists
has_data = ensure_prompt_pert(pcfg)
if not has_data:
LOGGER.warning("Skipping baseline for %s (no T cell data).", pert_name)
return
# --- Load model ---
LOGGER.info("Loading model: %s", cfg.ALIGNED_CKPT)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = load_model_from_checkpoint(
str(cfg.ALIGNED_CKPT),
model_class="ICL_FinetunedModel",
device=device,
)
# --- Load data ---
query_ctrl_path = str(cfg.RESULTS_DIR / cfg.QUERY_CTRL_H5AD)
prompt_pert_path = str(pcfg.results_dir / cfg.PROMPT_PERT_H5AD)
LOGGER.info("Query (test): %s", query_ctrl_path)
LOGGER.info("Prompt (base): %s", prompt_pert_path)
# --- Run random-prompt generation ---
LOGGER.info("Running get_incontext_generation (random prompt baseline)...")
result = model.get_incontext_generation(
base_adata_or_path=prompt_pert_path,
test_adata_or_path=query_ctrl_path,
genelist_path=str(cfg.GENELIST_PATH),
mode="mdm",
num_steps=cfg.NUM_STEPS,
prompt_ratio=cfg.PROMPT_RATIO,
context_ratio=cfg.CONTEXT_RATIO,
context_ratio_min=cfg.CONTEXT_RATIO_MIN,
batch_size=cfg.BATCH_SIZE,
num_workers=cfg.NUM_WORKERS,
)
if isinstance(result, tuple):
predictions, test_logit = result
else:
predictions, test_logit = result, None
# --- Build output AnnData ---
query_ctrl = ad.read_h5ad(query_ctrl_path)
if issparse(predictions):
pred_X = predictions
else:
pred_X = csr_matrix(np.asarray(predictions, dtype=np.float32))
result_adata = ad.AnnData(
X=pred_X,
obs=query_ctrl.obs.copy(),
var=query_ctrl.var.copy(),
)
result_adata.obs["sm_name"] = pert_name
result_adata.obs["control"] = False
if test_logit is not None:
result_adata.obs["gen_logit"] = np.asarray(test_logit)
result_adata.write_h5ad(output_path)
LOGGER.info("Saved baseline result: %s shape=%s", output_path, result_adata.shape)
LOGGER.info("=" * 60)
LOGGER.info("Random Baseline Generation — %s — Done", pert_name)
LOGGER.info("=" * 60)
if __name__ == "__main__":
main()
|