| """ |
| Data loading for grn_svd. |
| Imports scDFM Data/PerturbationDataset by temporarily swapping sys.modules |
| so that scDFM's 'src.*' packages are visible during import. |
| """ |
|
|
| import sys |
| import os |
|
|
| import torch |
| from torch.utils.data import Dataset |
|
|
| _SCDFM_ROOT = os.path.normpath( |
| os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "transfer", "code", "scDFM") |
| ) |
|
|
| |
| _cached_classes = {} |
|
|
|
|
| def get_data_classes(): |
| """Lazily import scDFM data classes with proper module isolation.""" |
| if _cached_classes: |
| return ( |
| _cached_classes["Data"], |
| _cached_classes["PerturbationDataset"], |
| _cached_classes["TrainSampler"], |
| _cached_classes["TestDataset"], |
| ) |
|
|
| |
| saved = {} |
| for key in list(sys.modules.keys()): |
| if key == "src" or key.startswith("src."): |
| saved[key] = sys.modules.pop(key) |
|
|
| |
| for d in ["src", "src/data_process", "src/utils", "src/tokenizer"]: |
| init_path = os.path.join(_SCDFM_ROOT, d, "__init__.py") |
| if not os.path.exists(init_path): |
| os.makedirs(os.path.dirname(init_path), exist_ok=True) |
| with open(init_path, "w") as f: |
| f.write("# Auto-created by CCFM\n") |
|
|
| sys.path.insert(0, _SCDFM_ROOT) |
| try: |
| from src.data_process.data import Data, PerturbationDataset, TrainSampler, TestDataset |
| _cached_classes["Data"] = Data |
| _cached_classes["PerturbationDataset"] = PerturbationDataset |
| _cached_classes["TrainSampler"] = TrainSampler |
| _cached_classes["TestDataset"] = TestDataset |
| finally: |
| |
| for key in list(sys.modules.keys()): |
| if (key == "src" or key.startswith("src.")) and not key.startswith("scdfm_"): |
| del sys.modules[key] |
|
|
| |
| for key, mod in saved.items(): |
| sys.modules[key] = mod |
|
|
| if _SCDFM_ROOT in sys.path: |
| sys.path.remove(_SCDFM_ROOT) |
|
|
| return Data, PerturbationDataset, TrainSampler, TestDataset |
|
|
|
|
| class GRNDatasetWrapper(Dataset): |
| """ |
| Wraps scDFM PerturbationDataset to produce sparse delta triplets. |
| |
| Returns delta_values (B, G_sub, K) and delta_indices (B, G_sub, K) |
| instead of dense z_target (B, G_sub, G_sub). |
| SVD projection happens on GPU in denoiser.train_step(). |
| """ |
|
|
| def __init__(self, base_dataset, sparse_cache, gene_ids_cpu, infer_top_gene): |
| self.base = base_dataset |
| self.sparse_cache = sparse_cache |
| self.gene_ids = gene_ids_cpu |
| self.infer_top_gene = infer_top_gene |
|
|
| def __len__(self): |
| return len(self.base) |
|
|
| def __getitem__(self, idx): |
| batch = self.base[idx] |
|
|
| |
| G_full = batch["src_cell_data"].shape[-1] |
| input_gene_ids = torch.randperm(G_full)[:self.infer_top_gene] |
|
|
| |
| src_names = batch["src_cell_id"] |
| tgt_names = batch["tgt_cell_id"] |
| if src_names and isinstance(src_names[0], (tuple, list)): |
| src_names = [n[0] for n in src_names] |
| tgt_names = [n[0] for n in tgt_names] |
| delta_values, delta_indices = self.sparse_cache.lookup_delta( |
| src_names, tgt_names, input_gene_ids, device=torch.device("cpu") |
| ) |
|
|
| |
| return { |
| "src_cell_data": batch["src_cell_data"][:, input_gene_ids], |
| "tgt_cell_data": batch["tgt_cell_data"][:, input_gene_ids], |
| "condition_id": batch["condition_id"], |
| "delta_values": delta_values, |
| "delta_indices": delta_indices, |
| "gene_ids_sub": self.gene_ids[input_gene_ids], |
| "input_gene_ids": input_gene_ids, |
| } |
|
|