File size: 4,354 Bytes
dcce49b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 | """
Data loading for grn_svd.
Imports scDFM Data/PerturbationDataset by temporarily swapping sys.modules
so that scDFM's 'src.*' packages are visible during import.
"""
import sys
import os
import torch
from torch.utils.data import Dataset
_SCDFM_ROOT = os.path.normpath(
os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "transfer", "code", "scDFM")
)
# Cache to avoid repeated imports
_cached_classes = {}
def get_data_classes():
"""Lazily import scDFM data classes with proper module isolation."""
if _cached_classes:
return (
_cached_classes["Data"],
_cached_classes["PerturbationDataset"],
_cached_classes["TrainSampler"],
_cached_classes["TestDataset"],
)
# Save CCFM's src modules
saved = {}
for key in list(sys.modules.keys()):
if key == "src" or key.startswith("src."):
saved[key] = sys.modules.pop(key)
# Ensure __init__.py exists for scDFM data_process
for d in ["src", "src/data_process", "src/utils", "src/tokenizer"]:
init_path = os.path.join(_SCDFM_ROOT, d, "__init__.py")
if not os.path.exists(init_path):
os.makedirs(os.path.dirname(init_path), exist_ok=True)
with open(init_path, "w") as f:
f.write("# Auto-created by CCFM\n")
sys.path.insert(0, _SCDFM_ROOT)
try:
from src.data_process.data import Data, PerturbationDataset, TrainSampler, TestDataset
_cached_classes["Data"] = Data
_cached_classes["PerturbationDataset"] = PerturbationDataset
_cached_classes["TrainSampler"] = TrainSampler
_cached_classes["TestDataset"] = TestDataset
finally:
# Remove scDFM's src.* entries
for key in list(sys.modules.keys()):
if (key == "src" or key.startswith("src.")) and not key.startswith("scdfm_"):
del sys.modules[key]
# Restore CCFM's src modules
for key, mod in saved.items():
sys.modules[key] = mod
if _SCDFM_ROOT in sys.path:
sys.path.remove(_SCDFM_ROOT)
return Data, PerturbationDataset, TrainSampler, TestDataset
class GRNDatasetWrapper(Dataset):
"""
Wraps scDFM PerturbationDataset to produce sparse delta triplets.
Returns delta_values (B, G_sub, K) and delta_indices (B, G_sub, K)
instead of dense z_target (B, G_sub, G_sub).
SVD projection happens on GPU in denoiser.train_step().
"""
def __init__(self, base_dataset, sparse_cache, gene_ids_cpu, infer_top_gene):
self.base = base_dataset # scDFM PerturbationDataset
self.sparse_cache = sparse_cache # SparseDeltaCache (multi-process safe)
self.gene_ids = gene_ids_cpu # (G_full,) CPU tensor — vocab-encoded gene IDs
self.infer_top_gene = infer_top_gene
def __len__(self):
return len(self.base)
def __getitem__(self, idx):
batch = self.base[idx]
# 1. Random gene subset
G_full = batch["src_cell_data"].shape[-1]
input_gene_ids = torch.randperm(G_full)[:self.infer_top_gene]
# 2. Sparse cache lookup → sparse triplets (runs in worker process)
src_names = batch["src_cell_id"]
tgt_names = batch["tgt_cell_id"]
if src_names and isinstance(src_names[0], (tuple, list)):
src_names = [n[0] for n in src_names]
tgt_names = [n[0] for n in tgt_names]
delta_values, delta_indices = self.sparse_cache.lookup_delta(
src_names, tgt_names, input_gene_ids, device=torch.device("cpu")
) # delta_values: (B, G_sub, K), delta_indices: (B, G_sub, K) int16
# 3. Subset expression data
return {
"src_cell_data": batch["src_cell_data"][:, input_gene_ids], # (B, G_sub)
"tgt_cell_data": batch["tgt_cell_data"][:, input_gene_ids], # (B, G_sub)
"condition_id": batch["condition_id"], # (B, 2)
"delta_values": delta_values, # (B, G_sub, K)
"delta_indices": delta_indices, # (B, G_sub, K) int16
"gene_ids_sub": self.gene_ids[input_gene_ids], # (G_sub,)
"input_gene_ids": input_gene_ids, # (G_sub,)
}
|