Datasets:

Languages:
English
License:
psui3905 commited on
Commit
ef78b20
·
verified ·
1 Parent(s): f42b18a

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +6 -4
  2. README.md +85 -3
  3. compass/checkpoint/pft_leave_IMVigor210.pt +3 -0
  4. compass/checkpoint/pretrainer.pt +3 -0
  5. depmap_24q2/corr_matrix.npy +3 -0
  6. depmap_24q2/gene_correlations.h5 +3 -0
  7. depmap_24q2/gene_idx_array.npy +3 -0
  8. depmap_24q2/gene_names.txt +0 -0
  9. depmap_24q2/p_adj_matrix.npy +3 -0
  10. depmap_24q2/p_val_matrix.npy +3 -0
  11. pinnacle_embeds/pinnacle_labels_dict.txt +3 -0
  12. pinnacle_embeds/pinnacle_mg_embed.pth +3 -0
  13. pinnacle_embeds/pinnacle_protein_embed.pth +3 -0
  14. pinnacle_embeds/ppi_embed_dict.pth +3 -0
  15. transcriptformer_embedding/embedding_generation/README.md +23 -0
  16. transcriptformer_embedding/embedding_generation/celltype_disease_cge_inference.py +586 -0
  17. transcriptformer_embedding/embedding_generation/preprocess_adata.py +165 -0
  18. transcriptformer_embedding/embedding_store/follicular_lymphoma/b_cell_follicular_lymphoma.npy +3 -0
  19. transcriptformer_embedding/embedding_store/follicular_lymphoma/b_cell_normal.npy +3 -0
  20. transcriptformer_embedding/embedding_store/follicular_lymphoma/cd4_positive_alpha_beta_cytotoxic_t_cell_follicular_lymphoma.npy +3 -0
  21. transcriptformer_embedding/embedding_store/follicular_lymphoma/cd4_positive_alpha_beta_cytotoxic_t_cell_normal.npy +3 -0
  22. transcriptformer_embedding/embedding_store/follicular_lymphoma/cd4_positive_alpha_beta_t_cell_follicular_lymphoma.npy +3 -0
  23. transcriptformer_embedding/embedding_store/follicular_lymphoma/cd4_positive_alpha_beta_t_cell_normal.npy +3 -0
  24. transcriptformer_embedding/embedding_store/follicular_lymphoma/cd8_positive_alpha_beta_t_cell_follicular_lymphoma.npy +3 -0
  25. transcriptformer_embedding/embedding_store/follicular_lymphoma/cd8_positive_alpha_beta_t_cell_normal.npy +3 -0
  26. transcriptformer_embedding/embedding_store/follicular_lymphoma/effector_cd8_positive_alpha_beta_t_cell_follicular_lymphoma.npy +3 -0
  27. transcriptformer_embedding/embedding_store/follicular_lymphoma/effector_cd8_positive_alpha_beta_t_cell_normal.npy +3 -0
  28. transcriptformer_embedding/embedding_store/follicular_lymphoma/erythrocyte_follicular_lymphoma.npy +3 -0
  29. transcriptformer_embedding/embedding_store/follicular_lymphoma/erythrocyte_normal.npy +3 -0
  30. transcriptformer_embedding/embedding_store/follicular_lymphoma/exhausted_t_cell_follicular_lymphoma.npy +3 -0
  31. transcriptformer_embedding/embedding_store/follicular_lymphoma/exhausted_t_cell_normal.npy +3 -0
  32. transcriptformer_embedding/embedding_store/follicular_lymphoma/follicular_dendritic_cell_follicular_lymphoma.npy +3 -0
  33. transcriptformer_embedding/embedding_store/follicular_lymphoma/follicular_dendritic_cell_normal.npy +3 -0
  34. transcriptformer_embedding/embedding_store/follicular_lymphoma/malignant_cell_follicular_lymphoma.npy +3 -0
  35. transcriptformer_embedding/embedding_store/follicular_lymphoma/mature_nk_t_cell_follicular_lymphoma.npy +3 -0
  36. transcriptformer_embedding/embedding_store/follicular_lymphoma/mature_nk_t_cell_normal.npy +3 -0
  37. transcriptformer_embedding/embedding_store/follicular_lymphoma/metadata.json.gz +3 -0
  38. transcriptformer_embedding/embedding_store/follicular_lymphoma/myeloid_cell_follicular_lymphoma.npy +3 -0
  39. transcriptformer_embedding/embedding_store/follicular_lymphoma/myeloid_cell_normal.npy +3 -0
  40. transcriptformer_embedding/embedding_store/follicular_lymphoma/naive_thymus_derived_cd4_positive_alpha_beta_t_cell_follicular_lymphoma.npy +3 -0
  41. transcriptformer_embedding/embedding_store/follicular_lymphoma/naive_thymus_derived_cd4_positive_alpha_beta_t_cell_normal.npy +3 -0
  42. transcriptformer_embedding/embedding_store/follicular_lymphoma/naive_thymus_derived_cd8_positive_alpha_beta_t_cell_follicular_lymphoma.npy +3 -0
  43. transcriptformer_embedding/embedding_store/follicular_lymphoma/naive_thymus_derived_cd8_positive_alpha_beta_t_cell_normal.npy +3 -0
  44. transcriptformer_embedding/embedding_store/follicular_lymphoma/plasma_cell_follicular_lymphoma.npy +3 -0
  45. transcriptformer_embedding/embedding_store/follicular_lymphoma/plasma_cell_normal.npy +3 -0
  46. transcriptformer_embedding/embedding_store/follicular_lymphoma/plasmacytoid_dendritic_cell_follicular_lymphoma.npy +3 -0
  47. transcriptformer_embedding/embedding_store/follicular_lymphoma/plasmacytoid_dendritic_cell_normal.npy +3 -0
  48. transcriptformer_embedding/embedding_store/follicular_lymphoma/regulatory_t_cell_follicular_lymphoma.npy +3 -0
  49. transcriptformer_embedding/embedding_store/follicular_lymphoma/regulatory_t_cell_normal.npy +3 -0
  50. transcriptformer_embedding/embedding_store/follicular_lymphoma/t_cell_follicular_lymphoma.npy +3 -0
.gitattributes CHANGED
@@ -9,7 +9,6 @@
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
  *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
  *.model filter=lfs diff=lfs merge=lfs -text
15
  *.msgpack filter=lfs diff=lfs merge=lfs -text
@@ -54,6 +53,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
54
  *.jpg filter=lfs diff=lfs merge=lfs -text
55
  *.jpeg filter=lfs diff=lfs merge=lfs -text
56
  *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
  *.lz4 filter=lfs diff=lfs merge=lfs -text
 
12
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
  *.model filter=lfs diff=lfs merge=lfs -text
14
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ pinnacle_embeds/pinnacle_labels_dict.txt filter=lfs diff=lfs merge=lfs -text
57
+ *.psd filter=lfs diff=lfs merge=lfs -text
58
+ *.csv filter=lfs diff=lfs merge=lfs -text
59
+ *.json filter=lfs diff=lfs merge=lfs -text
60
+ depmap_24q2 filter=lfs diff=lfs merge=lfs -text
61
+ *.h5ad filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,85 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ configs:
3
+ - config_name: labels_dict
4
+ data_files:
5
+ - path: pinnacle_embeds/pinnacle_labels_dict.txt
6
+ split: train
7
+ - config_name: pinnacle_protein_embed
8
+ data_files:
9
+ - path: pinnacle_embeds/pinnacle_protein_embed.pth
10
+ split: train
11
+ license: apache-2.0
12
+ language:
13
+ - en
14
+ tags:
15
+ - chemistry
16
+ pretty_name: medea_db
17
+ ---
18
+
19
+ # MEDEA-DB
20
+
21
+ This database contains curated datasets and pre-trained model weights across multiple domains of tools leveraged by Medea, including:
22
+
23
+ - Protein-protein interaction networks & Multi-scale gene/protein embeddings (PINNACLE, TranscriptFormer, etc.)
24
+ - Co-dependency statistics for disease gene pair (Chronos gene-effect profiles from DepMap 24Q2 CRISPR)
25
+ - Immunotherapy response prediction models (COMPASS pretrain checkpoint)
26
+
27
+ ---
28
+
29
+ ## Available Data & Resources
30
+
31
+ ### 1. Gene/Protein Embeddings
32
+
33
+ #### **PINNACLE Embeddings** (`pinnacle_embeds/`)
34
+ - **Model**: PINNACLE
35
+ - **Files**:
36
+ - `pinnacle_protein_embed.pth`: Protein-level embeddings with cell type specificity
37
+ - `pinnacle_mg_embed.pth`: Meta-graph level embeddings on cellular interactions and tissue hierarchy
38
+ - `ppi_embed_dict.pth`: PPI-based embeddings
39
+ - `pinnacle_labels_dict.txt`: Gene/protein labels
40
+ - **Config Names**: `pinnacle_protein_embed`, `labels_dict`
41
+ - **Format**: PyTorch tensors
42
+
43
+ #### **Transcriptformer Embeddings** (`transcriptformer_embedding/`)
44
+ - **Model**: Transcriptformer (Transcriptomics transformer)
45
+ - **Structure**:
46
+ - `embedding_generation/`: Scripts for generating embeddings
47
+ - `embedding_store/`: Pre-computed embeddings (138 `.npy` files)
48
+ - `processor/`: Data processing utilities
49
+ - **Format**: NumPy arrays, compressed archives
50
+
51
+ ---
52
+
53
+ ### 2. Gene Dependency & Correlation Data
54
+
55
+ #### **DepMap 24Q2** (`depmap_24q2/`)
56
+ - **Release**: DepMap Public 24Q2
57
+ - **Files**:
58
+ - `corr_matrix.npy`: Gene correlation matrix
59
+ - `p_val_matrix.npy`: Statistical significance values
60
+ - `p_adj_matrix.npy`: Adjusted p-values (multiple testing correction)
61
+ - `gene_correlations.h5`: HDF5 format correlations
62
+ - `gene_idx_array.npy`: Gene index mappings
63
+ - `gene_names.txt`: Gene identifiers
64
+
65
+ ---
66
+
67
+ ### 3. Immunotherapy Response Prediction Models
68
+
69
+ #### **COMPASS Checkpoints** (`compass/checkpoint/`)
70
+ - **Model**: COMPASS
71
+ - **Checkpoints**:
72
+ - `pretrainer.pt`: Pre-trained base model
73
+ - `pft_leave_IMVigor210.pt`: Leave-one-cohort-out (IMVigor210) fintuned model
74
+
75
+ ---
76
+
77
+ ## Data Sources & Citations
78
+
79
+ Please cite the original sources when using specific datasets or models.
80
+
81
+ ---
82
+
83
+ ## License
84
+
85
+ This dataset is released under the [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/) license.
compass/checkpoint/pft_leave_IMVigor210.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:029facac49083e1a5f263eff336f4c8e0fc99ddc458806f06c927fab53df1706
3
+ size 35114491
compass/checkpoint/pretrainer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:661ae4d838775c553d3d1c787fabbfed504d7b7924e77ff0aaae00bcbabde8e8
3
+ size 26259771
depmap_24q2/corr_matrix.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6033ec557f34fd11b42df02bf1383d03ef416d637c10214ffd07492f2021dd0e
3
+ size 1360577124
depmap_24q2/gene_correlations.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2d300c94ed9f6b05d9a9aa1af31b176267d49f7d78137238e6e54c904c1a1bd
3
+ size 8163913196
depmap_24q2/gene_idx_array.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07a35cc87c97a6d8e98b978cff59fa816b729bcb929d56531cd7ed6e37ca03c4
3
+ size 885392
depmap_24q2/gene_names.txt ADDED
The diff for this file is too large to render. See raw diff
 
depmap_24q2/p_adj_matrix.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdedd5c491a8219206c27b93ded8593090056a3d24c231086964a9f8f5492113
3
+ size 1360577124
depmap_24q2/p_val_matrix.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ffb2ac5e9dbd6c7ed3b246c2d92a0b2f5abe80a1c4cee071bbcd363db212678
3
+ size 1360577124
pinnacle_embeds/pinnacle_labels_dict.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:898e18eb847840f9fe481ea65ddf0a1a5cf9783eecfef03031f7b97837106abe
3
+ size 20543790
pinnacle_embeds/pinnacle_mg_embed.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f089e64e7e6f8c9bb138e4e4f3276d571398d70c0a8c6fd46c74a72c80294593
3
+ size 112363
pinnacle_embeds/pinnacle_protein_embed.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76c362f4ee6de778650468ab8fb8e463d91f81335474f31069a0e61249c67a89
3
+ size 202158537
pinnacle_embeds/ppi_embed_dict.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f765323ece424a46cdbcd7c6920204d3d1f4d75accd9316a832c354c7f1ad78
3
+ size 235630939
transcriptformer_embedding/embedding_generation/README.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Step 1 - Download TranscriptFormer
2
+ git clone https://github.com/czi-ai/transcriptformer.git
3
+
4
+ ## Step 2 - Create a local folder structure
5
+ Disease-atlas/
6
+ |-- fl
7
+ | `-- fixed.h5ad
8
+ |-- ra
9
+ | |-- e04346ba-cdc5-418b-81e6-2f896696e3dd.h5ad
10
+ | `-- fixed.h5ad
11
+ |-- ss
12
+ | |-- 4e6c8033-87d5-45e6-a240-10281074d440.h5ad
13
+ | `-- fixed.h5ad
14
+ `-- t1dm
15
+ |-- 5378ac26-e216-41e8-b171-a7f4d819a9ff.h5ad
16
+ `-- fixed.h5ad
17
+
18
+ ## Step 3 - Download and Check the Disease Atlas
19
+ python preprocess_adata.py /root/Disease-atlas/t1dm/5378ac26-e216-41e8-b171-a7f4d819a9ff.h5ad /root/Disease-atlas
20
+ /t1dm/fixed.h5ad
21
+
22
+ ## Step 4 - Run Inference
23
+ python celltype_disease_cge_inference.py /root/Disease-atlas/t1dm/fixed.h5ad t1dm_cge_embeddings.h5ad ~/transcriptformer/checkpoints/tf_sapiens/ 1000 100 1
transcriptformer_embedding/embedding_generation/celltype_disease_cge_inference.py ADDED
@@ -0,0 +1,586 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Cell-type and Disease-state specific CGE inference.
4
+ Generates separate averaged embeddings for each cell-type + disease-state combination.
5
+ """
6
+
7
+ import sys
8
+ import os
9
+ import gc
10
+ import numpy as np
11
+ import pandas as pd
12
+ import anndata as ad
13
+ import subprocess
14
+ import tempfile
15
+ from pathlib import Path
16
+ import shutil
17
+ import time
18
+ from collections import defaultdict
19
+ from joblib import Parallel, delayed
20
+ import re
21
+
22
+ def check_disk_space():
23
+ """Check available disk space."""
24
+ try:
25
+ usage = shutil.disk_usage('/')
26
+ free_gb = usage.free / (1024**3)
27
+ return free_gb
28
+ except:
29
+ return 0
30
+
31
+ def analyze_cell_groups(adata):
32
+ """Analyze cell type and disease state combinations."""
33
+
34
+ print("🔍 Analyzing Cell Groups")
35
+ print("=" * 40)
36
+
37
+ # Get unique combinations
38
+ groups = adata.obs.groupby(['cell_type', 'disease']).size().reset_index(name='cell_count')
39
+ groups = groups.sort_values('cell_count', ascending=False)
40
+
41
+ print(f"📊 Found {len(groups)} cell-type + disease combinations:")
42
+ print("-" * 50)
43
+
44
+ memory_estimates = []
45
+ groups_needing_chunks = 0
46
+
47
+ for _, row in groups.iterrows():
48
+ cell_type = row['cell_type']
49
+ disease = row['disease']
50
+ count = row['cell_count']
51
+ percentage = (count / adata.n_obs) * 100
52
+
53
+ # Memory estimate (float32 = 4 bytes per value)
54
+ memory_mb = (count * adata.n_vars * 4) / (1024**2) if count > 0 else 0
55
+ memory_estimates.append(memory_mb)
56
+
57
+ # Check if chunking needed
58
+ needs_chunking = memory_mb > 2000
59
+ if needs_chunking:
60
+ groups_needing_chunks += 1
61
+
62
+ chunk_symbol = "🔸" if needs_chunking else "🔹"
63
+ print(f" {chunk_symbol} {cell_type} ({disease}): {count:,} cells ({percentage:.1f}%) - ~{memory_mb:.1f} MB")
64
+
65
+ print()
66
+ print("🧠 Memory-Adaptive Processing Strategy:")
67
+ print(f" • Groups processed as single units: {len(groups) - groups_needing_chunks}/{len(groups)}")
68
+ print(f" • Groups requiring chunking: {groups_needing_chunks}/{len(groups)}")
69
+ print(f" • Memory threshold: 2GB per chunk")
70
+
71
+ return groups
72
+
73
+ def get_cell_indices_for_group(adata, cell_type, disease):
74
+ """Get cell indices for a specific cell-type + disease combination."""
75
+ mask = (adata.obs['cell_type'] == cell_type) & (adata.obs['disease'] == disease)
76
+ indices = np.where(mask)[0]
77
+ return indices
78
+
79
+ def create_chunk(adata, cell_indices, chunk_start, chunk_end, chunk_path):
80
+ """Create a chunk from specific cell indices."""
81
+
82
+ # Get the actual indices for this chunk
83
+ chunk_indices = cell_indices[chunk_start:chunk_end]
84
+
85
+ # Create chunk from these specific cells
86
+ chunk_adata = adata[chunk_indices, :].copy()
87
+ chunk_adata.write(chunk_path)
88
+
89
+ size_mb = os.path.getsize(chunk_path) / (1024**2)
90
+ print(f" 📦 Chunk file: {size_mb:.1f} MB ({len(chunk_indices)} cells)")
91
+
92
+ return len(chunk_indices)
93
+
94
+ def run_inference_chunk(chunk_path, output_path, checkpoint_path, batch_size=1, max_retries=3):
95
+ """Run inference on a single chunk with automatic batch size reduction."""
96
+
97
+ abs_checkpoint_path = os.path.abspath(checkpoint_path)
98
+ abs_chunk_path = os.path.abspath(chunk_path)
99
+ output_dir = os.path.dirname(output_path)
100
+
101
+ # Debug information
102
+ print(f" 🐛 Debug info:")
103
+ print(f" Checkpoint: {abs_checkpoint_path}")
104
+ print(f" Input: {abs_chunk_path}")
105
+ print(f" Output dir: {output_dir}")
106
+ print(f" Expected output: {output_path}")
107
+
108
+ # Try with automatic batch size reduction
109
+ current_batch_size = batch_size
110
+
111
+ for attempt in range(max_retries):
112
+ # Update command with current batch size
113
+ cmd = [
114
+ "transcriptformer", "inference",
115
+ "--checkpoint-path", abs_checkpoint_path,
116
+ "--data-file", abs_chunk_path,
117
+ "--emb-type", "cge",
118
+ "--output-filename", os.path.basename(output_path),
119
+ "--batch-size", str(current_batch_size),
120
+ "--precision", "16-mixed"
121
+ ]
122
+
123
+ print(f" 🚀 Command: {' '.join(cmd)}")
124
+ print(f" 📦 Attempt {attempt + 1}/{max_retries} with batch_size={current_batch_size}")
125
+
126
+ try:
127
+ # Run with real-time streaming output
128
+ print(f" 🔍 Starting transcriptformer inference...")
129
+ print(f" 📺 Streaming logs (press Ctrl+C to stop):")
130
+ print(f" {'='*50}")
131
+
132
+ # Use Popen for real-time streaming
133
+ process = subprocess.Popen(
134
+ cmd,
135
+ cwd=output_dir,
136
+ stdout=subprocess.PIPE,
137
+ stderr=subprocess.STDOUT, # Combine stderr into stdout
138
+ text=True,
139
+ bufsize=1, # Line buffered
140
+ universal_newlines=True
141
+ )
142
+
143
+ # Stream output in real-time
144
+ stdout_lines = []
145
+ stderr_lines = []
146
+
147
+ try:
148
+ while True:
149
+ output = process.stdout.readline()
150
+ if output == '' and process.poll() is not None:
151
+ break
152
+ if output:
153
+ # Print in real-time with prefix
154
+ print(f" 📺 {output.rstrip()}")
155
+ stdout_lines.append(output)
156
+ except KeyboardInterrupt:
157
+ print(f" ⚠️ Process interrupted by user")
158
+ process.terminate()
159
+ process.wait()
160
+ return False
161
+
162
+ # Wait for process to complete
163
+ return_code = process.wait()
164
+
165
+ print(f" {'='*50}")
166
+ print(f" 📊 Process completed with return code: {return_code}")
167
+
168
+ # Store result for compatibility
169
+ result = type('Result', (), {
170
+ 'returncode': return_code,
171
+ 'stdout': ''.join(stdout_lines),
172
+ 'stderr': ''.join(stderr_lines)
173
+ })()
174
+
175
+ if result.returncode == 0:
176
+ # Check for output file in multiple possible locations
177
+ possible_outputs = []
178
+
179
+ # 1. Check in inference_results subdirectory (original expectation)
180
+ inference_results_output = os.path.join(output_dir, "inference_results", os.path.basename(output_path))
181
+ possible_outputs.append(inference_results_output)
182
+
183
+ # 2. Check directly in output directory
184
+ direct_output = os.path.join(output_dir, os.path.basename(output_path))
185
+ possible_outputs.append(direct_output)
186
+
187
+ # 3. Check for .h5ad files in output directory (in case filename differs)
188
+ if os.path.exists(output_dir):
189
+ for file in os.listdir(output_dir):
190
+ if file.endswith('.h5ad'):
191
+ possible_outputs.append(os.path.join(output_dir, file))
192
+
193
+ # 4. Check for .h5ad files in inference_results subdirectory
194
+ inference_dir = os.path.join(output_dir, "inference_results")
195
+ if os.path.exists(inference_dir):
196
+ for file in os.listdir(inference_dir):
197
+ if file.endswith('.h5ad'):
198
+ possible_outputs.append(os.path.join(inference_dir, file))
199
+
200
+ # Try to find the actual output file
201
+ print(f" 🔍 Searching for output files...")
202
+ found_output = None
203
+ for i, possible_output in enumerate(possible_outputs):
204
+ exists = os.path.exists(possible_output)
205
+ print(f" {i+1:2d}. {possible_output} {'✅' if exists else '❌'}")
206
+ if exists:
207
+ found_output = possible_output
208
+ break
209
+
210
+ if found_output:
211
+ print(f" ✅ Found output: {found_output}")
212
+ # Move to expected location if different
213
+ if found_output != output_path:
214
+ shutil.move(found_output, output_path)
215
+ print(f" 📁 Moved to: {output_path}")
216
+
217
+ # Clean up inference_results directory if it exists
218
+ if os.path.exists(inference_dir):
219
+ shutil.rmtree(inference_dir)
220
+ print(f" 🧹 Cleaned up inference_results directory")
221
+
222
+ return True
223
+ else:
224
+ print(f" ❌ Output not found in any expected location")
225
+ print(f" 🔍 Searched locations:")
226
+ for loc in possible_outputs:
227
+ print(f" • {loc} {'✅' if os.path.exists(loc) else '❌'}")
228
+
229
+ # List all files in output directory for debugging
230
+ if os.path.exists(output_dir):
231
+ print(f" 📁 All files in output_dir: {os.listdir(output_dir)}")
232
+ # Check for subdirectories
233
+ for item in os.listdir(output_dir):
234
+ item_path = os.path.join(output_dir, item)
235
+ if os.path.isdir(item_path):
236
+ print(f" 📁 Subdirectory '{item}': {os.listdir(item_path)}")
237
+
238
+ return False
239
+ else:
240
+ if result.returncode == -9:
241
+ print(f" ❌ Process killed by system (SIGKILL) - likely out of memory")
242
+ print(f" 💡 Reducing batch size and retrying...")
243
+ current_batch_size = max(1, current_batch_size // 2)
244
+ continue
245
+ elif result.returncode == -11:
246
+ print(f" ❌ Process crashed with segmentation fault")
247
+ return False
248
+ else:
249
+ print(f" ❌ Inference failed (code {result.returncode})")
250
+ return False
251
+
252
+ except subprocess.TimeoutExpired:
253
+ print(f" ❌ Inference timed out")
254
+ return False
255
+ except Exception as e:
256
+ print(f" ❌ Error: {str(e)}")
257
+ return False
258
+
259
+ print(f" ❌ All {max_retries} attempts failed")
260
+ return False
261
+
262
+ def load_cge_embeddings(chunk_path):
263
+ """Load CGE embeddings from a chunk result."""
264
+ try:
265
+ chunk_adata = ad.read_h5ad(chunk_path)
266
+
267
+ if 'cge_embeddings' not in chunk_adata.uns:
268
+ print(f" ⚠️ No CGE embeddings found")
269
+ return None, None, None
270
+
271
+ embeddings = chunk_adata.uns['cge_embeddings']
272
+ gene_names = chunk_adata.uns.get('cge_gene_names', None)
273
+ cell_indices = chunk_adata.uns.get('cge_cell_indices', None)
274
+
275
+ print(f" 📊 Loaded: {embeddings.shape[0]} embeddings, {embeddings.shape[1]} dims")
276
+
277
+ return embeddings, gene_names, cell_indices
278
+
279
+ except Exception as e:
280
+ print(f" ❌ Error loading embeddings: {e}")
281
+ return None, None, None
282
+
283
+ def merge_cge_embeddings(running_avg, running_counts, new_embeddings, new_gene_names):
284
+ """
285
+ Incrementally update running average of CGE embeddings using a much faster,
286
+ pandas-based vectorized approach.
287
+ """
288
+ if running_avg is None:
289
+ running_avg = {}
290
+ running_counts = {}
291
+
292
+ if len(new_gene_names) == 0:
293
+ return running_avg, running_counts
294
+
295
+ # Create a DataFrame for efficient processing
296
+ df = pd.DataFrame({
297
+ 'gene': new_gene_names,
298
+ 'embedding': list(new_embeddings)
299
+ })
300
+
301
+ # Group by gene and aggregate embeddings
302
+ # - Sum all embeddings for each gene
303
+ # - Count occurrences of each gene
304
+ agg_df = df.groupby('gene')['embedding'].agg(['sum', 'count'])
305
+
306
+ # Update running averages
307
+ for gene, row in agg_df.iterrows():
308
+ gene_str = str(gene)
309
+ gene_sum = row['sum']
310
+ n_new = row['count']
311
+
312
+ if gene_str in running_avg:
313
+ old_avg = running_avg[gene_str]
314
+ old_count = running_counts[gene_str]
315
+
316
+ new_count = old_count + n_new
317
+ # Efficiently calculate the new average
318
+ running_avg[gene_str] = old_avg * (old_count / new_count) + gene_sum / new_count
319
+ running_counts[gene_str] = new_count
320
+ else:
321
+ # This is a new gene
322
+ running_avg[gene_str] = gene_sum / n_new
323
+ running_counts[gene_str] = n_new
324
+
325
+ return running_avg, running_counts
326
+
327
+ def process_cell_group(adata, cell_type, disease, cell_indices, checkpoint_path,
328
+ temp_dir, chunk_size=500, batch_size=1, memory_threshold_mb=2000):
329
+ """
330
+ Process a single cell-type + disease group with simplified, more efficient adaptive chunking.
331
+ """
332
+ print(f"\n🎯 Processing: {cell_type} ({disease})")
333
+ print(f" 📊 {len(cell_indices)} cells")
334
+ print("-" * 50)
335
+
336
+ if len(cell_indices) == 0:
337
+ print(" ⚠️ No cells to process")
338
+ return None, None, 0
339
+
340
+ # Simplified chunking logic
341
+ n_cells = len(cell_indices)
342
+ # Estimate memory based on a sample, assuming float32
343
+ est_mem_per_cell = (adata.n_vars * 4) / (1024**2)
344
+ est_total_mem = n_cells * est_mem_per_cell
345
+
346
+ chunk_size = 1000
347
+ n_chunks = int(np.ceil(n_cells / chunk_size))
348
+ print(f" ⚠️ Large group, using {n_chunks} chunks of ~{chunk_size} cells.")
349
+
350
+ running_avg, running_counts = None, None
351
+ total_cells_processed, successful_chunks = 0, 0
352
+
353
+ for i in range(n_chunks):
354
+ start_idx, end_idx = i * chunk_size, min((i + 1) * chunk_size, n_cells)
355
+
356
+ if n_chunks > 1:
357
+ print(f" 🔄 Chunk {i+1}/{n_chunks} ({end_idx - start_idx} cells)")
358
+
359
+ if check_disk_space() < 3:
360
+ print(f" 🚨 STOPPING: Low disk space.")
361
+ break
362
+
363
+ # Create a safe group name for file naming
364
+ group_name = re.sub(r'[^\w.-]+', '_', f"{cell_type}_{disease}")
365
+ group_name = group_name.replace(" ", "_").replace("-", "_").lower()
366
+ # Truncate if too long, but ensure uniqueness by adding a hash
367
+ # if len(group_name) > 40:
368
+ # import hashlib
369
+ # hash_suffix = hashlib.md5(group_name.encode()).hexdigest()[:8]
370
+ # group_name = group_name[:32] + "_" + hash_suffix
371
+ chunk_input = os.path.join(temp_dir, f"{group_name}_chunk_{i}_input.h5ad")
372
+ chunk_output_dir = os.path.join(temp_dir, f"{group_name}_chunk_{i}_output")
373
+ chunk_output = os.path.join(chunk_output_dir, f"{group_name}_chunk_{i}_cge.h5ad")
374
+ os.makedirs(chunk_output_dir, exist_ok=True)
375
+
376
+ actual_cells = create_chunk(adata, cell_indices, start_idx, end_idx, chunk_input)
377
+
378
+ success = run_inference_chunk(chunk_input, chunk_output, checkpoint_path, batch_size)
379
+
380
+ if success and os.path.exists(chunk_output):
381
+ embeddings, gene_names, _ = load_cge_embeddings(chunk_output)
382
+ if embeddings is not None and gene_names is not None:
383
+ running_avg, running_counts = merge_cge_embeddings(
384
+ running_avg, running_counts, embeddings, gene_names
385
+ )
386
+ total_cells_processed += actual_cells
387
+ successful_chunks += 1
388
+ print(f" ✅ Chunk {i+1} merged (running genes: {len(running_avg)})")
389
+ else:
390
+ print(f" ❌ Chunk {i+1} - no valid embeddings found.")
391
+ else:
392
+ print(f" ❌ Chunk {i+1} failed.")
393
+
394
+ # Cleanup
395
+ if os.path.exists(chunk_input): os.remove(chunk_input)
396
+ if os.path.exists(chunk_output_dir): shutil.rmtree(chunk_output_dir)
397
+ gc.collect()
398
+
399
+ print(f" ✅ Group completed: {successful_chunks}/{n_chunks} chunks processed, {total_cells_processed} cells.")
400
+
401
+ return (running_avg, running_counts, total_cells_processed) if running_avg else (None, None, 0)
402
+
403
+ def save_celltype_disease_embeddings(all_group_embeddings, output_path, original_adata):
404
+ """
405
+ Save all cell-type + disease specific embeddings using a more efficient,
406
+ vectorized approach.
407
+ """
408
+ print(f"\n💾 Saving Cell-Type + Disease Specific CGE Embeddings")
409
+ print("=" * 60)
410
+
411
+ # Create master gene list from all groups
412
+ all_genes = sorted(list(set.union(*(set(d[0].keys()) for d in all_group_embeddings.values() if d[0]))))
413
+ gene_to_idx = {gene: i for i, gene in enumerate(all_genes)}
414
+ print(f" 📊 Total unique genes across all groups: {len(all_genes)}")
415
+
416
+ # Prepare group metadata
417
+ group_keys = list(all_group_embeddings.keys())
418
+
419
+ # Create unique group names
420
+ group_names = []
421
+ seen_names = set()
422
+ for ct, ds in group_keys:
423
+ ct = ct.replace(" ", "_").replace("-", "_").replace(".", "_").replace(",", "_").lower()
424
+ ds = ds.replace(" ", "_").replace("-", "_").replace(".", "_").replace(",", "_").lower()
425
+ base_name = re.sub(r'[^\w.-]+', '_', f"{ct}_{ds}") # Leave room for suffix
426
+ name = base_name
427
+ counter = 1
428
+ while name in seen_names:
429
+ name = f"{base_name}_{counter}"
430
+ counter += 1
431
+ seen_names.add(name)
432
+ group_names.append(name)
433
+
434
+ var_data = pd.DataFrame({
435
+ 'cell_type': [k[0] for k in group_keys],
436
+ 'disease_state': [k[1] for k in group_keys],
437
+ 'group_name': group_names,
438
+ 'total_cells': [all_group_embeddings[k][2] for k in group_keys]
439
+ }, index=group_names)
440
+
441
+ # Get embedding dimension
442
+ first_valid_group = next((g for g in all_group_embeddings.values() if g[0]), None)
443
+ if not first_valid_group:
444
+ print(" ⚠️ No valid embeddings found to save.")
445
+ return None
446
+ embedding_dim = len(next(iter(first_valid_group[0].values())))
447
+
448
+ # Efficiently create embedding and count matrices
449
+ n_genes = len(all_genes)
450
+ n_groups = len(group_keys)
451
+
452
+ # Initialize matrices
453
+ all_embeddings = np.zeros((n_groups, n_genes, embedding_dim), dtype=np.float32)
454
+ all_counts = np.zeros((n_groups, n_genes), dtype=np.int32)
455
+
456
+ for i, group_key in enumerate(group_keys):
457
+ running_avg, running_counts, _ = all_group_embeddings[group_key]
458
+ if not running_avg:
459
+ continue
460
+
461
+ print(f" 📋 Processing group: {group_names[i]}")
462
+
463
+ # Get gene indices for this group
464
+ group_genes = list(running_avg.keys())
465
+ indices = [gene_to_idx[g] for g in group_genes]
466
+
467
+ # Directly place data into pre-allocated arrays
468
+ all_embeddings[i, indices, :] = np.array(list(running_avg.values()))
469
+ all_counts[i, indices] = np.array(list(running_counts.values()))
470
+
471
+ # Create comprehensive AnnData object
472
+ obs_data = pd.DataFrame(index=all_genes)
473
+ final_adata = ad.AnnData(X=np.zeros((n_genes, n_groups)), obs=obs_data, var=var_data)
474
+
475
+ # Store embeddings and counts in uns
476
+ final_adata.uns['celltype_disease_embeddings'] = {
477
+ group_name: all_embeddings[i] for i, group_name in enumerate(group_names)
478
+ }
479
+ final_adata.uns['celltype_disease_counts'] = {
480
+ group_name: all_counts[i] for i, group_name in enumerate(group_names)
481
+ }
482
+
483
+ # Store metadata
484
+ final_adata.uns['gene_names'] = all_genes
485
+ final_adata.uns['group_info'] = var_data.to_dict('index')
486
+ final_adata.uns['original_data_shape'] = list(original_adata.shape)
487
+ final_adata.uns['averaging_method'] = 'celltype_disease_specific'
488
+ final_adata.uns['embedding_dimensions'] = embedding_dim
489
+
490
+ # Save
491
+ final_adata.write(output_path)
492
+ file_size = os.path.getsize(output_path) / (1024**2)
493
+ print(f" 💾 Final file size: {file_size:.1f} MB")
494
+
495
+ # Summary
496
+ print(f"\n📋 Summary:")
497
+ print(f" • {n_genes:,} genes")
498
+ print(f" • {n_groups} cell-type + disease groups")
499
+ print(f" • {embedding_dim} embedding dimensions")
500
+ print(f" • File: {output_path}")
501
+
502
+ return output_path
503
+
504
+ def celltype_disease_cge_inference(input_path, output_path, checkpoint_path,
505
+ chunk_size=500, batch_size=1, n_jobs=-1):
506
+ """
507
+ Run cell-type and disease-state specific CGE inference with parallel processing.
508
+ """
509
+ print(f"🧬 Cell-Type + Disease-State Specific CGE Inference (Parallelized)")
510
+ print(f" Input: {input_path}")
511
+ print(f" Output: {output_path}")
512
+ print(f" Parallel Jobs: {n_jobs}")
513
+ print("=" * 80)
514
+
515
+ if check_disk_space() < 10:
516
+ raise Exception("Insufficient disk space (need at least 10GB)")
517
+
518
+ print("📖 Loading AnnData...")
519
+ adata = ad.read_h5ad(input_path)
520
+ print(f"✅ Loaded: {adata.shape[0]} cells × {adata.shape[1]} genes")
521
+
522
+ groups_df = analyze_cell_groups(adata)
523
+
524
+ output_dir = os.path.dirname(os.path.abspath(output_path))
525
+ temp_dir = tempfile.mkdtemp(prefix="celltype_disease_cge_", dir=output_dir)
526
+ print(f"\n📁 Temp directory: {temp_dir}")
527
+
528
+ try:
529
+ tasks = [
530
+ delayed(process_cell_group)(
531
+ adata, row['cell_type'], row['disease'],
532
+ get_cell_indices_for_group(adata, row['cell_type'], row['disease']),
533
+ checkpoint_path, temp_dir, chunk_size, batch_size
534
+ ) for _, row in groups_df.iterrows()
535
+ ]
536
+
537
+ results = Parallel(n_jobs=n_jobs)(tasks)
538
+
539
+ all_group_embeddings = {
540
+ (row['cell_type'], row['disease']): result
541
+ for (_, row), result in zip(groups_df.iterrows(), results) if result[0]
542
+ }
543
+
544
+ if all_group_embeddings:
545
+ print(f"\n✅ Successfully processed {len(all_group_embeddings)} groups")
546
+ return save_celltype_disease_embeddings(all_group_embeddings, output_path, adata)
547
+ else:
548
+ raise Exception("No valid group embeddings were processed")
549
+
550
+ finally:
551
+ if os.path.exists(temp_dir):
552
+ print(f"\n🧹 Cleaning temp directory...")
553
+ shutil.rmtree(temp_dir)
554
+
555
+ def main():
556
+ if len(sys.argv) < 4:
557
+ print("Usage: python celltype_disease_cge_inference.py <input_h5ad> <output_h5ad> <checkpoint_path> [chunk_size] [batch_size] [n_jobs]")
558
+ print("Example: python celltype_disease_cge_inference.py data.h5ad cge.h5ad ./ckpt/ 500 1 -1")
559
+ sys.exit(1)
560
+
561
+ input_path = sys.argv[1]
562
+ output_path = sys.argv[2]
563
+ checkpoint_path = sys.argv[3]
564
+ chunk_size = int(sys.argv[4]) if len(sys.argv) > 4 else 500
565
+ batch_size = int(sys.argv[5]) if len(sys.argv) > 5 else 1
566
+ n_jobs = int(sys.argv[6]) if len(sys.argv) > 6 else -1
567
+
568
+ try:
569
+ result = celltype_disease_cge_inference(
570
+ os.path.abspath(input_path),
571
+ os.path.abspath(output_path),
572
+ os.path.abspath(checkpoint_path),
573
+ chunk_size,
574
+ batch_size,
575
+ n_jobs
576
+ )
577
+ print(f"\n🎉 SUCCESS! Cell-type + Disease-state specific CGE embeddings: {result}")
578
+
579
+ except Exception as e:
580
+ print(f"\n❌ ERROR: {str(e)}")
581
+ import traceback
582
+ traceback.print_exc()
583
+ sys.exit(1)
584
+
585
+ if __name__ == "__main__":
586
+ main()
transcriptformer_embedding/embedding_generation/preprocess_adata.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Consolidated script to diagnose and fix h5ad files for transcriptformer.
4
+
5
+ This script performs a series of checks to validate an AnnData object and
6
+ automatically applies fixes for common issues, preparing the data for
7
+ inference with transcriptformer.
8
+
9
+ Usage:
10
+ python preprocess_adata.py <input_h5ad_file> <output_h5ad_file>
11
+ """
12
+
13
+ import sys
14
+ import os
15
+ import numpy as np
16
+ import anndata as ad
17
+ import scanpy as sc
18
+ from pathlib import Path
19
+
20
+ def preprocess_adata(input_path, output_path):
21
+ """
22
+ Diagnose and fix an h5ad file for transcriptformer compatibility.
23
+ """
24
+ print(f"🚀 Starting preprocessing for: {input_path}")
25
+ print("=" * 70)
26
+
27
+ # 1. Load Data
28
+ print("📖 1. Loading AnnData object...")
29
+ if not os.path.exists(input_path):
30
+ print(f"❌ ERROR: Input file not found: {input_path}")
31
+ return False
32
+
33
+ try:
34
+ adata = ad.read_h5ad(input_path)
35
+ print(f"✅ Loaded: {adata.shape[0]} cells × {adata.shape[1]} genes")
36
+ except Exception as e:
37
+ print(f"❌ ERROR: Could not load AnnData file. Reason: {e}")
38
+ return False
39
+
40
+ original_shape = adata.shape
41
+
42
+ # 2. Run Diagnostics
43
+ print("\n🔬 2. Running Diagnostics...")
44
+ issues_found = []
45
+
46
+ # Check for NaN/Inf values
47
+ has_nan = np.isnan(adata.X.data).any() if hasattr(adata.X, 'data') else np.isnan(adata.X).any()
48
+ has_inf = np.isinf(adata.X.data).any() if hasattr(adata.X, 'data') else np.isinf(adata.X).any()
49
+ if has_nan: issues_found.append("NaN values found in data matrix.")
50
+ if has_inf: issues_found.append("Infinite values found in data matrix.")
51
+ print(f" - NaN/Inf values: {'❌ Found' if has_nan or has_inf else '✅ None'}")
52
+
53
+ # Check for unique gene indices
54
+ if adata.var.index.nunique() < len(adata.var.index):
55
+ issues_found.append("Duplicate gene indices (var_names) found.")
56
+ print(" - Duplicate gene indices: ❌ Found")
57
+ else:
58
+ print(" - Duplicate gene indices: ✅ Unique")
59
+
60
+ # Check for ensembl_id column
61
+ if 'ensembl_id' not in adata.var.columns:
62
+ issues_found.append("'ensembl_id' column missing in var.")
63
+ print(" - 'ensembl_id' column: ❌ Missing")
64
+ else:
65
+ print(" - 'ensembl_id' column: ✅ Present")
66
+
67
+ # Check for zero-expression genes
68
+ genes_before_filter = adata.n_vars
69
+ sc.pp.filter_genes(adata, min_cells=1)
70
+ if adata.n_vars < genes_before_filter:
71
+ num_removed = genes_before_filter - adata.n_vars
72
+ issues_found.append(f"{num_removed} genes with zero expression found.")
73
+ print(f" - Zero-expression genes: ❌ Found ({num_removed} genes)")
74
+ else:
75
+ print(" - Zero-expression genes: ✅ None")
76
+
77
+ # Restore original object for fixing step
78
+ adata = ad.read_h5ad(input_path)
79
+
80
+ # 3. Apply Fixes
81
+ print("\n🔧 3. Applying Fixes...")
82
+ fixes_applied = []
83
+
84
+ # Fix: Ensure var_names are unique
85
+ if adata.var.index.nunique() < len(adata.var.index):
86
+ adata.var_names_make_unique()
87
+ fixes_applied.append("Made var_names unique using .var_names_make_unique()")
88
+ print(" - ✅ Made gene indices (var_names) unique.")
89
+ else:
90
+ print(" - ✅ Gene indices are already unique.")
91
+
92
+ # Fix: Add ensembl_id column if it's missing
93
+ if 'ensembl_id' not in adata.var.columns:
94
+ print(" - Adding 'ensembl_id' column from var.index.")
95
+ adata.var['ensembl_id'] = adata.var.index
96
+ fixes_applied.append("Added 'ensembl_id' column from var.index.")
97
+ else:
98
+ print(" - ✅ 'ensembl_id' column already exists.")
99
+
100
+ # Fix: Filter out genes with zero expression
101
+ genes_before_filter = adata.n_vars
102
+ sc.pp.filter_genes(adata, min_cells=1)
103
+ if adata.n_vars < genes_before_filter:
104
+ num_removed = genes_before_filter - adata.n_vars
105
+ fixes_applied.append(f"Removed {num_removed} genes with no expression.")
106
+ print(f" - ✅ Removed {num_removed} zero-expression genes.")
107
+ else:
108
+ print(" - ✅ No zero-expression genes to remove.")
109
+
110
+ # 4. Save Processed File
111
+ print("\n💾 4. Saving Processed File...")
112
+ try:
113
+ adata.write(output_path)
114
+ print(f" - ✅ Successfully saved to: {output_path}")
115
+ except Exception as e:
116
+ print(f"❌ ERROR: Could not save file. Reason: {e}")
117
+ return False
118
+
119
+ # 5. Final Summary
120
+ print("\n📋 5. Summary")
121
+ print("-" * 70)
122
+ print(f" - Original shape: {original_shape[0]} cells × {original_shape[1]} genes")
123
+ print(f" - Final shape: {adata.shape[0]} cells × {adata.shape[1]} genes")
124
+ print("\n - Issues Found:")
125
+ if issues_found:
126
+ for issue in issues_found:
127
+ print(f" - {issue}")
128
+ else:
129
+ print(" - None")
130
+
131
+ print("\n - Fixes Applied:")
132
+ if fixes_applied:
133
+ for fix in fixes_applied:
134
+ print(f" - {fix}")
135
+ else:
136
+ print(" - None")
137
+
138
+ print("\n🎉 Preprocessing complete!")
139
+ return True
140
+
141
+ def main():
142
+ if len(sys.argv) != 3:
143
+ print("Usage: python preprocess_adata.py <input_h5ad_file> <output_h5ad_file>")
144
+ sys.exit(1)
145
+
146
+ input_path = sys.argv[1]
147
+ output_path = sys.argv[2]
148
+
149
+ if os.path.abspath(input_path) == os.path.abspath(output_path):
150
+ print("❌ ERROR: Input and output paths cannot be the same.")
151
+ sys.exit(1)
152
+
153
+ if os.path.exists(output_path):
154
+ response = input(f"⚠️ Output file already exists: {output_path}\nOverwrite? (y/N): ")
155
+ if response.lower() != 'y':
156
+ print("Operation cancelled.")
157
+ sys.exit(1)
158
+
159
+ success = preprocess_adata(input_path, output_path)
160
+
161
+ if not success:
162
+ sys.exit(1)
163
+
164
+ if __name__ == "__main__":
165
+ main()
transcriptformer_embedding/embedding_store/follicular_lymphoma/b_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdb448171dd35c06c3dcd520fdd4bbfc24a86cb27d982e547f918c3b05e04d70
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/b_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc023c1200fcb8c9e6f040a03f4e77f7de7a4560395565a4658fa10565bf3a7c
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/cd4_positive_alpha_beta_cytotoxic_t_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cca783b52db2afa249edcc13fee3e225250e0817d30f1563c7f8a7ff7cd1a19d
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/cd4_positive_alpha_beta_cytotoxic_t_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec95201626f1d71e15b227737fcdcc0be2baba844cfa862237658c3cee245391
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/cd4_positive_alpha_beta_t_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cb6bbd3622dc758bf987bb3e7c177cd0b4c4f39604a755a6396984d6481e502
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/cd4_positive_alpha_beta_t_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7476c51fe28eae70fea2a7de6f938dc0b7dae63e2d52ebfcf0d0ca65a7f4aa2
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/cd8_positive_alpha_beta_t_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2d1456c9ba5430a85996747e04e65624a879a66fc291d4200433104a232db1e
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/cd8_positive_alpha_beta_t_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:868b3dad81017c02a7cbd390461ee5658e10479e806265e020726890a4ea33bf
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/effector_cd8_positive_alpha_beta_t_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb7ef200bdbbb5f57f5e5a505d8d94e05fed3346c4097fbd08fa7d2280a6dc5f
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/effector_cd8_positive_alpha_beta_t_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f475be02636726f01636abad57cdc238b13aab4f66c9abd20d6c0dc78373f0a0
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/erythrocyte_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32a6ee72d19496cebfb27bcc23dbe8466ad3c08ac7ad646d191feb03c2eac900
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/erythrocyte_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01b806d1216ca6b9313b38ffb1adc19c32910b3273fb5d41ece8388af01bb180
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/exhausted_t_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4eebd9b2d1a90c76350db00711543aeaab440e55b4165698edf6fc90238350f1
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/exhausted_t_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9aa6634eecb3938b8a2957d746749d9826a1427b12032eb7f9fbc4ec7640358c
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/follicular_dendritic_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceeaf9dadcd02dc1a47246c7db01a38abeeab4f69e60396d1a31019a0fde5836
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/follicular_dendritic_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c39a6e9e038603d6fccbf77e53c58b2527ccd5cc001f05407315ff68dcbd6ce6
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/malignant_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d21b813806ade13251d3cecfa3888578319ba3d5fe1a935cebc68dcb38c6fe8
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/mature_nk_t_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05adff506406e5b5035be1c21dc239200078dd698bce575243b1dc7f90e01cca
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/mature_nk_t_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:522d6ccf552f6a02d38c5ae9b51529515c55bcc1837854837d4e8994b6078dfa
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/metadata.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c93a511dae9b848104f3671f1030b82aeb7bbfe3385940e34eddc2ed6cdbf556
3
+ size 237412
transcriptformer_embedding/embedding_store/follicular_lymphoma/myeloid_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:913710c72a236491313a28148165db4455a9f1449df60b07363e0d335cf93d24
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/myeloid_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c39a8d1515e76a06da570c1aa8fca8fb369dc1f992c765f44db16539fc958753
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/naive_thymus_derived_cd4_positive_alpha_beta_t_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64a1fbee0cd341fcd684f7eadbb33062c57ecc86885f4a837c44a2460192aa80
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/naive_thymus_derived_cd4_positive_alpha_beta_t_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1101b2fb0e9e204d13d7c1ebf7c3ae396e41a4a67ed241d3489cf8e9522193d
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/naive_thymus_derived_cd8_positive_alpha_beta_t_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16360bd70d0497e35d57a8b4480fb53e88c3807fa6ffe5d52caa36e51ae15e7d
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/naive_thymus_derived_cd8_positive_alpha_beta_t_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9e42423eb4ca31be72c5b864feb158deab418eff4b3e636c7188f8b715d2bd8
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/plasma_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7e48f79816fb10ef72db50aebf558d4bb626d099c4c5eae52f750323cd20f0c
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/plasma_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdd0187d0789fb94044016f4010a025c4516f9b0e4fa29250af1321d836f8850
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/plasmacytoid_dendritic_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:381166fb6ee8979dacafdc3b71ca0219eff7fd14e792244c61c76d3c07f7e615
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/plasmacytoid_dendritic_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1b5602f51f5bacd63f0a8e2cf26dd4cf1489458fe4097b19661a1ae5d8a73a8
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/regulatory_t_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bdb6982d5ef5bc83e73f7922bdb9ea723b59403c5ec1861be46c95da3fbf783
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/regulatory_t_cell_normal.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b82b5afafa7db6be371cb884b706ed7184dce8260f9ac99512639fe2a0fb188d
3
+ size 57147520
transcriptformer_embedding/embedding_store/follicular_lymphoma/t_cell_follicular_lymphoma.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76856a97fdd2fa2e6c360757681d81e10d984ec731687c6b655f3353ad7eab7c
3
+ size 57147520