Spaces:
Running
Running
Commit
·
3bb66e2
0
Parent(s):
Initial commit with HF dataset integration
Browse files- Gradio app loads data from HF dataset repository
- No large files in repo (uses chanzuckerberg/DynaCLR-data)
- Requires HF_TOKEN for private dataset access
- .gitattributes +44 -0
- .gitignore +63 -0
- README.md +129 -0
- app.py +25 -0
- dynaclr_viz/README.md +133 -0
- dynaclr_viz/__init__.py +67 -0
- dynaclr_viz/config.py +37 -0
- dynaclr_viz/data.py +263 -0
- dynaclr_viz/handlers.py +370 -0
- dynaclr_viz/image.py +154 -0
- dynaclr_viz/plot.py +197 -0
- dynaclr_viz/ui.py +267 -0
- requirements.txt +29 -0
.gitattributes
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
# Zarr datasets in data directory
|
| 37 |
+
# OME-Zarr binary chunks
|
| 38 |
+
data/dataset.zarr/[A-Z]/*/[0-9]*/0/c/0/0/0/0/0 filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
# AnnData zarr - exclude metadata files (.z*), track binary chunks
|
| 40 |
+
data/annotations.zarr/**/* filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
data/annotations.zarr/**/.z* !filter !diff !merge text
|
| 42 |
+
data/annotations.zarr/**/*.json !filter !diff !merge text
|
| 43 |
+
# CSV files
|
| 44 |
+
data/*.csv filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
.Python
|
| 7 |
+
build/
|
| 8 |
+
develop-eggs/
|
| 9 |
+
dist/
|
| 10 |
+
downloads/
|
| 11 |
+
eggs/
|
| 12 |
+
.eggs/
|
| 13 |
+
lib/
|
| 14 |
+
lib64/
|
| 15 |
+
parts/
|
| 16 |
+
sdist/
|
| 17 |
+
var/
|
| 18 |
+
wheels/
|
| 19 |
+
*.egg-info/
|
| 20 |
+
.installed.cfg
|
| 21 |
+
*.egg
|
| 22 |
+
|
| 23 |
+
# Virtual environments
|
| 24 |
+
venv/
|
| 25 |
+
ENV/
|
| 26 |
+
env/
|
| 27 |
+
.venv
|
| 28 |
+
|
| 29 |
+
# IDE
|
| 30 |
+
.vscode/
|
| 31 |
+
.idea/
|
| 32 |
+
*.swp
|
| 33 |
+
*.swo
|
| 34 |
+
*~
|
| 35 |
+
.DS_Store
|
| 36 |
+
|
| 37 |
+
# Jupyter
|
| 38 |
+
.ipynb_checkpoints
|
| 39 |
+
*.ipynb
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Logs
|
| 43 |
+
*.log
|
| 44 |
+
logs/
|
| 45 |
+
|
| 46 |
+
# Gradio
|
| 47 |
+
gradio_queue.db
|
| 48 |
+
flagged/
|
| 49 |
+
|
| 50 |
+
# Testing
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
.coverage
|
| 53 |
+
htmlcov/
|
| 54 |
+
|
| 55 |
+
# Environment variables
|
| 56 |
+
.env
|
| 57 |
+
.env.local
|
| 58 |
+
|
| 59 |
+
# Other
|
| 60 |
+
claudedocs/
|
| 61 |
+
.claude/
|
| 62 |
+
concatenate_dataset.yml
|
| 63 |
+
.hf_cache/
|
README.md
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: DynaCLR
|
| 3 |
+
emoji: 🔬
|
| 4 |
+
colorFrom: gray
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 6.0.2
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
license: bsd-3-clause
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
# DynaCLR Visualization
|
| 14 |
+
|
| 15 |
+
Interactive visualization of cell embeddings and infection status with microscopy image viewing.
|
| 16 |
+
|
| 17 |
+
## Overview
|
| 18 |
+
|
| 19 |
+
This application provides an interactive interface for exploring:
|
| 20 |
+
|
| 21 |
+
- Cell embeddings (PCA, projections, and other dimensionality reductions)
|
| 22 |
+
- Infection status annotations
|
| 23 |
+
- Multi-channel microscopy images
|
| 24 |
+
- Time-series cell tracking
|
| 25 |
+
|
| 26 |
+
## Features
|
| 27 |
+
|
| 28 |
+
### Interactive Embedding Plot
|
| 29 |
+
|
| 30 |
+
- Visualize cells in 2D embedding space
|
| 31 |
+
- Color-coded by infection status (infected, uninfected, unknown)
|
| 32 |
+
- Select any embedding dimensions for X and Y axes
|
| 33 |
+
- Click on cells to view detailed microscopy images
|
| 34 |
+
|
| 35 |
+
### Microscopy Image Viewer
|
| 36 |
+
|
| 37 |
+
- View multi-channel images for selected cells
|
| 38 |
+
- Toggle between Phase3D, GFP, and mCherry channels
|
| 39 |
+
- Adjustable channel opacity for image composition
|
| 40 |
+
- Track time-series visualization
|
| 41 |
+
|
| 42 |
+
### Infection Status Tracking
|
| 43 |
+
|
| 44 |
+
- Annotated infection status for cells
|
| 45 |
+
- Visual highlighting of selected tracks
|
| 46 |
+
- FOV-specific track identification
|
| 47 |
+
|
| 48 |
+
## Configuration
|
| 49 |
+
|
| 50 |
+
### HuggingFace Dataset Integration
|
| 51 |
+
|
| 52 |
+
This application automatically loads data from the private HuggingFace dataset repository `chanzuckerberg/DynaCLR-data`. To deploy on HuggingFace Spaces:
|
| 53 |
+
|
| 54 |
+
1. **Set the HF_TOKEN secret** in your Space settings:
|
| 55 |
+
- Go to your Space settings → Repository secrets
|
| 56 |
+
- Add a new secret named `HF_TOKEN`
|
| 57 |
+
- Set the value to a HuggingFace access token with read permissions for the dataset repository
|
| 58 |
+
- Get a token from: https://huggingface.co/settings/tokens
|
| 59 |
+
|
| 60 |
+
2. **Environment Variables** (optional):
|
| 61 |
+
- `USE_HF_DATASET`: Set to "true" (default) to load from HF dataset, or "false" to use local data
|
| 62 |
+
- `HF_TOKEN`: HuggingFace access token (required for private dataset repositories)
|
| 63 |
+
|
| 64 |
+
### Local Development
|
| 65 |
+
|
| 66 |
+
For local development without HuggingFace dataset:
|
| 67 |
+
|
| 68 |
+
```bash
|
| 69 |
+
# Disable HF dataset loading
|
| 70 |
+
export USE_HF_DATASET=false
|
| 71 |
+
|
| 72 |
+
# Ensure local data files are present in data/ directory:
|
| 73 |
+
# - data/dataset.zarr/
|
| 74 |
+
# - data/annotations_filtered.zarr/
|
| 75 |
+
# - data/track_infection_annotation.csv
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
## Usage
|
| 79 |
+
|
| 80 |
+
### Exploring Embeddings
|
| 81 |
+
|
| 82 |
+
1. Select embedding dimensions from the X-axis and Y-axis dropdowns (e.g., PC1, PC2)
|
| 83 |
+
2. The plot will update to show all cells in that embedding space
|
| 84 |
+
3. Cells are colored by infection status:
|
| 85 |
+
- 🔴 Red: Infected
|
| 86 |
+
- 🟢 Green: Uninfected
|
| 87 |
+
- ⚪ Gray: Unknown
|
| 88 |
+
|
| 89 |
+
### Viewing Cell Images
|
| 90 |
+
|
| 91 |
+
2. Select a track from the dropdown menu
|
| 92 |
+
3. The image gallery will display microscopy images for that cell
|
| 93 |
+
4. Use the channel checkboxes to toggle different imaging channels
|
| 94 |
+
5. Adjust opacity sliders to control channel visibility
|
| 95 |
+
|
| 96 |
+
### Channel Information
|
| 97 |
+
|
| 98 |
+
- **Phase3D**: Phase contrast imaging showing cell morphology
|
| 99 |
+
- **GFP**: Green fluorescent protein channel
|
| 100 |
+
- **mCherry**: Fluorescent protein channel
|
| 101 |
+
|
| 102 |
+
## Citation
|
| 103 |
+
|
| 104 |
+
If you use this visualization tool in your research, please cite:
|
| 105 |
+
|
| 106 |
+
```bibtex
|
| 107 |
+
@misc{hiratamiyasaki2025dynaclrcontrastivelearningcellular,
|
| 108 |
+
title={DynaCLR: Contrastive Learning of Cellular Dynamics with Temporal Regularization},
|
| 109 |
+
author={Eduardo Hirata-Miyasaki and Soorya Pradeep and Ziwen Liu and Alishba Imran and Taylla Milena Theodoro and Ivan E. Ivanov and Sudip Khadka and See-Chi Lee and Michelle Grunberg and Hunter Woosley and Madhura Bhave and Carolina Arias and Shalin B. Mehta},
|
| 110 |
+
year={2025},
|
| 111 |
+
eprint={2410.11281},
|
| 112 |
+
archivePrefix={arXiv},
|
| 113 |
+
primaryClass={cs.CV},
|
| 114 |
+
url={https://arxiv.org/abs/2410.11281},
|
| 115 |
+
}
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
## Related Projects
|
| 119 |
+
|
| 120 |
+
- [VisCy](https://github.com/mehta-lab/VisCy): Computer vision models for single-cell phenotyping
|
| 121 |
+
- [iohub](https://github.com/czbiohub-sf/iohub): Pythonic and parallelizable I/O for N-dimensional imaging data with OME metadata
|
| 122 |
+
|
| 123 |
+
## License
|
| 124 |
+
|
| 125 |
+
BSD-3-Clause
|
| 126 |
+
|
| 127 |
+
## Contact
|
| 128 |
+
|
| 129 |
+
For questions or issues, please open an issue on the [VisCy GitHub repository](https://github.com/mehta-lab/VisCy/issues).
|
app.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
"""
|
| 3 |
+
DynaCLR Visualization - Hugging Face Spaces Deployment
|
| 4 |
+
|
| 5 |
+
Interactive visualization of cell embeddings and infection status
|
| 6 |
+
with microscopy image viewing.
|
| 7 |
+
|
| 8 |
+
This is a self-contained deployment package for Hugging Face Spaces.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from dynaclr_viz import create_app
|
| 12 |
+
import gradio as gr
|
| 13 |
+
|
| 14 |
+
if __name__ == "__main__":
|
| 15 |
+
print("=" * 60)
|
| 16 |
+
print("DynaCLR Visualization - Hugging Face Spaces")
|
| 17 |
+
print("=" * 60)
|
| 18 |
+
print("Starting application...")
|
| 19 |
+
print("=" * 60)
|
| 20 |
+
|
| 21 |
+
# Create and launch the Gradio app
|
| 22 |
+
demo = create_app()
|
| 23 |
+
|
| 24 |
+
# Launch with HF Spaces configuration
|
| 25 |
+
demo.launch()
|
dynaclr_viz/README.md
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# DynaCLR Visualization Package
|
| 2 |
+
|
| 3 |
+
Modular package for visualizing cell embeddings and infection status with interactive microscopy image viewing.
|
| 4 |
+
|
| 5 |
+
## Structure
|
| 6 |
+
|
| 7 |
+
```
|
| 8 |
+
dynaclr_viz/
|
| 9 |
+
├── __init__.py # Package exports and public API
|
| 10 |
+
├── config.py # Configuration, paths, and constants
|
| 11 |
+
├── data.py # Data loading and state management
|
| 12 |
+
├── image.py # Image processing and extraction
|
| 13 |
+
├── plot.py # Plotly visualization functions
|
| 14 |
+
├── handlers.py # Gradio event handlers
|
| 15 |
+
└── ui.py # Gradio UI layout and components
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
## Module Responsibilities
|
| 19 |
+
|
| 20 |
+
### `config.py` - Configuration (~30 lines)
|
| 21 |
+
- Data file paths
|
| 22 |
+
- Visualization constants (colors, sizes, defaults)
|
| 23 |
+
- Plot settings
|
| 24 |
+
|
| 25 |
+
### `data.py` - Data Management (~130 lines)
|
| 26 |
+
- `load_ome_dataset()`: Load and cache OME-Zarr data
|
| 27 |
+
- `load_anndata()`: Load AnnData with infection annotation merging
|
| 28 |
+
- `initialize_data()`: Initialize all data and embeddings
|
| 29 |
+
- `get_embedding_data()`: Extract specific embedding dimensions
|
| 30 |
+
- `parse_track_selection()`: Parse dropdown selections
|
| 31 |
+
- Global state: `adata_demo`, `embedding_info`, `current_plot_data`
|
| 32 |
+
|
| 33 |
+
### `image.py` - Image Processing (~160 lines)
|
| 34 |
+
- `normalize_channel_image()`: Percentile-based contrast normalization
|
| 35 |
+
- `extract_track_images()`: Multi-channel image extraction and compositing
|
| 36 |
+
- Supports Phase3D, GFP, mCherry with opacity control
|
| 37 |
+
|
| 38 |
+
### `plot.py` - Visualization (~180 lines)
|
| 39 |
+
- `create_embedding_plot()`: Generate Plotly scatter plots
|
| 40 |
+
- Infection status coloring with discrete categories
|
| 41 |
+
- Track highlighting with FOV-specific filtering
|
| 42 |
+
- Interactive hover tooltips
|
| 43 |
+
|
| 44 |
+
### `handlers.py` - Event Logic (~230 lines)
|
| 45 |
+
- `update_plot_and_selector()`: Handle embedding axis changes
|
| 46 |
+
- `on_cell_selected()`: Handle track selection (full update)
|
| 47 |
+
- `update_channel_images()`: Handle channel changes (lightweight update)
|
| 48 |
+
|
| 49 |
+
### `ui.py` - User Interface (~220 lines)
|
| 50 |
+
- `create_app()`: Build complete Gradio interface
|
| 51 |
+
- Embedding controls and plot
|
| 52 |
+
- Track selector with accordion for channel settings
|
| 53 |
+
- Image gallery and track info display
|
| 54 |
+
- Event wiring for all interactions
|
| 55 |
+
|
| 56 |
+
## Usage
|
| 57 |
+
|
| 58 |
+
### Run the Application
|
| 59 |
+
```python
|
| 60 |
+
python app.py
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
### Import as Library
|
| 64 |
+
```python
|
| 65 |
+
from dynaclr_viz import load_anndata, extract_track_images, create_embedding_plot
|
| 66 |
+
|
| 67 |
+
# Load data
|
| 68 |
+
adata, embeddings = load_anndata()
|
| 69 |
+
|
| 70 |
+
# Generate images for specific track
|
| 71 |
+
images = extract_track_images(
|
| 72 |
+
track_id=19,
|
| 73 |
+
fov_name="A/1/000000",
|
| 74 |
+
show_phase=True,
|
| 75 |
+
show_gfp=True
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
# Create plot
|
| 79 |
+
fig = create_embedding_plot("PC1", "PC2", highlight_track_id=19)
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
## Key Features
|
| 83 |
+
|
| 84 |
+
### Unique Cell Identification
|
| 85 |
+
- Cells uniquely identified by `(track_id, fov_name)` pair
|
| 86 |
+
- Dropdown shows: "Track 19 (A/1/000000)"
|
| 87 |
+
- Prevents ambiguity when same track_id appears in multiple FOVs
|
| 88 |
+
|
| 89 |
+
### Performance Optimized
|
| 90 |
+
- Separate event handlers for different update types
|
| 91 |
+
- Channel changes only update gallery (not plot)
|
| 92 |
+
- Eliminates redundant plot re-renders
|
| 93 |
+
|
| 94 |
+
### Clean Separation of Concerns
|
| 95 |
+
- Data layer independent of visualization
|
| 96 |
+
- Image processing reusable in batch scripts
|
| 97 |
+
- UI and handlers separated for testability
|
| 98 |
+
|
| 99 |
+
## Migration Notes
|
| 100 |
+
|
| 101 |
+
### Original File
|
| 102 |
+
- `new_dynaclr_demo.py`: 899 lines, monolithic
|
| 103 |
+
- All code in single file
|
| 104 |
+
|
| 105 |
+
### Refactored Structure
|
| 106 |
+
- 7 modules averaging ~150 lines each
|
| 107 |
+
- Clear boundaries and responsibilities
|
| 108 |
+
- Entry point: 16 lines
|
| 109 |
+
|
| 110 |
+
### Backward Compatibility
|
| 111 |
+
- Original file kept as `new_dynaclr_demo.py`
|
| 112 |
+
- New entry point: `app.py`
|
| 113 |
+
- Both work identically
|
| 114 |
+
|
| 115 |
+
## Development
|
| 116 |
+
|
| 117 |
+
### Adding New Features
|
| 118 |
+
1. **New embedding type**: Modify `data.load_anndata()`
|
| 119 |
+
2. **New channel**: Update `image.extract_track_images()`
|
| 120 |
+
3. **New visualization**: Add function to `plot.py`
|
| 121 |
+
4. **New UI control**: Add to `ui.create_app()`
|
| 122 |
+
|
| 123 |
+
### Testing Individual Modules
|
| 124 |
+
```python
|
| 125 |
+
# Test data loading
|
| 126 |
+
python -c "from dynaclr_viz.data import initialize_data; initialize_data()"
|
| 127 |
+
|
| 128 |
+
# Test image extraction
|
| 129 |
+
python -c "from dynaclr_viz.image import extract_track_images; ..."
|
| 130 |
+
|
| 131 |
+
# Test plotting
|
| 132 |
+
python -c "from dynaclr_viz.plot import create_embedding_plot; ..."
|
| 133 |
+
```
|
dynaclr_viz/__init__.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
DynaCLR Visualization Package
|
| 3 |
+
|
| 4 |
+
A modular package for visualizing cell embeddings and infection status
|
| 5 |
+
with interactive microscopy image viewing.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from .config import (
|
| 9 |
+
DATA_PATH,
|
| 10 |
+
OME_ZARR_PATH,
|
| 11 |
+
ANNDATA_PATH,
|
| 12 |
+
INFECTION_ANNOTATIONS_PATH,
|
| 13 |
+
INFECTION_COLOR_MAP,
|
| 14 |
+
DEFAULT_CROP_SIZE,
|
| 15 |
+
DEFAULT_CONTRAST,
|
| 16 |
+
DEFAULT_OPACITY,
|
| 17 |
+
)
|
| 18 |
+
from .data import (
|
| 19 |
+
load_ome_dataset,
|
| 20 |
+
load_anndata,
|
| 21 |
+
get_embedding_data,
|
| 22 |
+
get_all_embeddings,
|
| 23 |
+
get_fov_choices,
|
| 24 |
+
get_tracks_for_fov,
|
| 25 |
+
is_track_annotated,
|
| 26 |
+
)
|
| 27 |
+
from .image import extract_track_images, normalize_channel_image
|
| 28 |
+
from .plot import create_embedding_plot
|
| 29 |
+
from .handlers import (
|
| 30 |
+
update_plot_and_selector,
|
| 31 |
+
update_track_selector,
|
| 32 |
+
on_cell_selected,
|
| 33 |
+
update_channel_images,
|
| 34 |
+
)
|
| 35 |
+
from .ui import create_app
|
| 36 |
+
|
| 37 |
+
__all__ = [
|
| 38 |
+
# Config
|
| 39 |
+
"DATA_PATH",
|
| 40 |
+
"OME_ZARR_PATH",
|
| 41 |
+
"ANNDATA_PATH",
|
| 42 |
+
"INFECTION_ANNOTATIONS_PATH",
|
| 43 |
+
"INFECTION_COLOR_MAP",
|
| 44 |
+
"DEFAULT_CROP_SIZE",
|
| 45 |
+
"DEFAULT_CONTRAST",
|
| 46 |
+
"DEFAULT_OPACITY",
|
| 47 |
+
# Data
|
| 48 |
+
"load_ome_dataset",
|
| 49 |
+
"load_anndata",
|
| 50 |
+
"get_embedding_data",
|
| 51 |
+
"get_all_embeddings",
|
| 52 |
+
"get_fov_choices",
|
| 53 |
+
"get_tracks_for_fov",
|
| 54 |
+
"is_track_annotated",
|
| 55 |
+
# Image
|
| 56 |
+
"extract_track_images",
|
| 57 |
+
"normalize_channel_image",
|
| 58 |
+
# Plot
|
| 59 |
+
"create_embedding_plot",
|
| 60 |
+
# Handlers
|
| 61 |
+
"update_plot_and_selector",
|
| 62 |
+
"update_track_selector",
|
| 63 |
+
"on_cell_selected",
|
| 64 |
+
"update_channel_images",
|
| 65 |
+
# UI
|
| 66 |
+
"create_app",
|
| 67 |
+
]
|
dynaclr_viz/config.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Configuration and constants for DynaCLR visualization."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
# Data paths - relative to repository root
|
| 7 |
+
REPO_ROOT = Path(__file__).parent.parent
|
| 8 |
+
DATA_PATH = REPO_ROOT / "data"
|
| 9 |
+
|
| 10 |
+
# HuggingFace dataset configuration
|
| 11 |
+
HF_DATASET_REPO = "chanzuckerberg/DynaCLR-data"
|
| 12 |
+
USE_HF_DATASET = os.getenv("USE_HF_DATASET", "true").lower() == "true"
|
| 13 |
+
|
| 14 |
+
# Data file paths (will use HF dataset if USE_HF_DATASET=true)
|
| 15 |
+
OME_ZARR_PATH = DATA_PATH / "dataset.zarr"
|
| 16 |
+
ANNDATA_PATH = DATA_PATH / "annotations_filtered.zarr"
|
| 17 |
+
INFECTION_ANNOTATIONS_PATH = DATA_PATH / "track_infection_annotation.csv"
|
| 18 |
+
|
| 19 |
+
# Visualization constants - colorblind-accessible palette
|
| 20 |
+
INFECTION_COLOR_MAP = {
|
| 21 |
+
"infected": "#FF6B00", # Bright orange (colorblind-accessible)
|
| 22 |
+
"uninfected": "#00D9FF", # Bright cyan-blue (colorblind-accessible)
|
| 23 |
+
"unknown": "#9CA3AF", # Gray
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
# Image processing defaults
|
| 27 |
+
DEFAULT_CROP_SIZE = 160
|
| 28 |
+
DEFAULT_CONTRAST = (10, 90)
|
| 29 |
+
DEFAULT_OPACITY = 1.0
|
| 30 |
+
|
| 31 |
+
# Plot settings
|
| 32 |
+
PLOT_HEIGHT = 700
|
| 33 |
+
MARKER_SIZE_NORMAL = 6
|
| 34 |
+
MARKER_SIZE_HIGHLIGHTED = 10
|
| 35 |
+
MARKER_OPACITY_NORMAL = 0.9
|
| 36 |
+
MARKER_OPACITY_HIGHLIGHTED = 0.9
|
| 37 |
+
MARKER_OPACITY_UNKNOWN = 0.2
|
dynaclr_viz/data.py
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Data loading and management for DynaCLR visualization."""
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import anndata as ad
|
| 5 |
+
from iohub import open_ome_zarr
|
| 6 |
+
from huggingface_hub import snapshot_download
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
from .config import (
|
| 10 |
+
OME_ZARR_PATH,
|
| 11 |
+
ANNDATA_PATH,
|
| 12 |
+
INFECTION_ANNOTATIONS_PATH,
|
| 13 |
+
HF_DATASET_REPO,
|
| 14 |
+
USE_HF_DATASET,
|
| 15 |
+
DATA_PATH,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
# Global state for cached data
|
| 19 |
+
ome_dataset = None
|
| 20 |
+
adata_demo = None
|
| 21 |
+
embedding_info = None
|
| 22 |
+
all_embeddings = None
|
| 23 |
+
embedding_to_key_idx = None
|
| 24 |
+
current_plot_data = None
|
| 25 |
+
# Track selection caching
|
| 26 |
+
cached_fov_choices = None # List of all FOV names
|
| 27 |
+
cached_tracks_by_fov = None # Dict: {fov_name: [track_ids]}
|
| 28 |
+
cached_annotated_tracks = (
|
| 29 |
+
None # Set of (track_id, fov_name) with infected/uninfected cells
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def download_dataset_from_hf():
|
| 34 |
+
"""Download dataset files from HuggingFace dataset repository.
|
| 35 |
+
|
| 36 |
+
This function downloads the entire dataset repository to a local cache
|
| 37 |
+
and returns paths to the downloaded files.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
tuple: (ome_zarr_path, anndata_path, infection_csv_path)
|
| 41 |
+
"""
|
| 42 |
+
print(f"Downloading dataset from HuggingFace: {HF_DATASET_REPO}")
|
| 43 |
+
|
| 44 |
+
# Get HF token from environment (required for private repos)
|
| 45 |
+
# token = os.getenv("HF_TOKEN")
|
| 46 |
+
# if not token:
|
| 47 |
+
# raise ValueError(
|
| 48 |
+
# "HF_TOKEN environment variable not set. "
|
| 49 |
+
# "Please set it to access the private dataset repository."
|
| 50 |
+
# )
|
| 51 |
+
|
| 52 |
+
# Download the entire dataset repository
|
| 53 |
+
# This will cache files locally and reuse them on subsequent runs
|
| 54 |
+
cache_dir = snapshot_download(
|
| 55 |
+
repo_id=HF_DATASET_REPO,
|
| 56 |
+
repo_type="dataset",
|
| 57 |
+
# token=token,
|
| 58 |
+
cache_dir=str(DATA_PATH.parent / ".hf_cache"),
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
cache_path = Path(cache_dir)
|
| 62 |
+
print(f"Dataset cached to: {cache_path}")
|
| 63 |
+
|
| 64 |
+
# Return paths to the downloaded files
|
| 65 |
+
ome_zarr_path = cache_path / "dataset.zarr"
|
| 66 |
+
anndata_path = cache_path / "annotations_filtered.zarr"
|
| 67 |
+
infection_csv_path = cache_path / "track_infection_annotation.csv"
|
| 68 |
+
|
| 69 |
+
# Verify all files exist
|
| 70 |
+
for path, name in [
|
| 71 |
+
(ome_zarr_path, "dataset.zarr"),
|
| 72 |
+
(anndata_path, "annotations_filtered.zarr"),
|
| 73 |
+
(infection_csv_path, "track_infection_annotation.csv"),
|
| 74 |
+
]:
|
| 75 |
+
if not path.exists():
|
| 76 |
+
raise FileNotFoundError(f"Expected file not found in dataset: {name}")
|
| 77 |
+
|
| 78 |
+
return ome_zarr_path, anndata_path, infection_csv_path
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def get_data_paths():
|
| 82 |
+
"""Get data file paths, downloading from HF if USE_HF_DATASET is enabled.
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
tuple: (ome_zarr_path, anndata_path, infection_csv_path)
|
| 86 |
+
"""
|
| 87 |
+
if USE_HF_DATASET:
|
| 88 |
+
return download_dataset_from_hf()
|
| 89 |
+
else:
|
| 90 |
+
return OME_ZARR_PATH, ANNDATA_PATH, INFECTION_ANNOTATIONS_PATH
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def load_ome_dataset():
|
| 94 |
+
"""Load OME-Zarr dataset once and reuse the handle."""
|
| 95 |
+
global ome_dataset
|
| 96 |
+
if ome_dataset is None:
|
| 97 |
+
ome_zarr_path, _, _ = get_data_paths()
|
| 98 |
+
print(f"Loading OME-Zarr dataset from: {ome_zarr_path}")
|
| 99 |
+
ome_dataset = open_ome_zarr(ome_zarr_path, mode="r")
|
| 100 |
+
return ome_dataset
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def load_anndata():
|
| 104 |
+
"""Load AnnData and extract embedding information with infection annotations."""
|
| 105 |
+
_, anndata_path, infection_csv_path = get_data_paths()
|
| 106 |
+
|
| 107 |
+
print(f"Loading AnnData from: {anndata_path}")
|
| 108 |
+
adata = ad.read_zarr(anndata_path)
|
| 109 |
+
|
| 110 |
+
# Check if infection_status is already in obs (e.g., from filtered dataset)
|
| 111 |
+
if "infection_status" not in adata.obs.columns:
|
| 112 |
+
# Load infection annotations and merge with AnnData.obs
|
| 113 |
+
print(f"Loading infection annotations from: {infection_csv_path}")
|
| 114 |
+
infection_annotations = pd.read_csv(infection_csv_path)
|
| 115 |
+
|
| 116 |
+
# Merge infection status using (fov_name, id) as the composite unique key
|
| 117 |
+
# This is necessary because 'id' is only unique within each FOV
|
| 118 |
+
print("Merging infection annotations with AnnData.obs...")
|
| 119 |
+
adata.obs = adata.obs.merge(
|
| 120 |
+
infection_annotations[["fov_name", "id", "infection_status"]],
|
| 121 |
+
on=["fov_name", "id"],
|
| 122 |
+
how="left",
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Fill missing infection_status with 'unknown'
|
| 126 |
+
adata.obs["infection_status"] = adata.obs["infection_status"].fillna("unknown")
|
| 127 |
+
|
| 128 |
+
# Report infection status statistics
|
| 129 |
+
n_with_annot = (adata.obs["infection_status"] != "unknown").sum()
|
| 130 |
+
n_without_annot = (adata.obs["infection_status"] == "unknown").sum()
|
| 131 |
+
print(
|
| 132 |
+
f" - Cells with infection annotations: {n_with_annot} "
|
| 133 |
+
f"({n_with_annot / len(adata.obs) * 100:.1f}%)"
|
| 134 |
+
)
|
| 135 |
+
print(
|
| 136 |
+
f" - Cells without annotations: {n_without_annot} "
|
| 137 |
+
f"({n_without_annot / len(adata.obs) * 100:.1f}%)"
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
# Detect all embeddings from obsm
|
| 141 |
+
embeddings = {}
|
| 142 |
+
for key in adata.obsm.keys():
|
| 143 |
+
n_components = adata.obsm[key].shape[1]
|
| 144 |
+
|
| 145 |
+
if key == "X_pca":
|
| 146 |
+
# PCA components: PC1, PC2, ..., PC8
|
| 147 |
+
embeddings[key] = [f"PC{i + 1}" for i in range(n_components)]
|
| 148 |
+
elif key == "X_projections":
|
| 149 |
+
# Projection dimensions: Proj1, Proj2, ..., Proj32
|
| 150 |
+
embeddings[key] = [f"Proj{i + 1}" for i in range(n_components)]
|
| 151 |
+
else:
|
| 152 |
+
# Generic naming for any other embeddings
|
| 153 |
+
embeddings[key] = [f"{key}_{i + 1}" for i in range(n_components)]
|
| 154 |
+
|
| 155 |
+
print(f"Loaded {adata.shape[0]} cells with {adata.shape[1]} features")
|
| 156 |
+
print(f"Available embeddings: {list(embeddings.keys())}")
|
| 157 |
+
|
| 158 |
+
return adata, embeddings
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def initialize_data():
|
| 162 |
+
"""Initialize all data and create embedding index."""
|
| 163 |
+
global adata_demo, embedding_info, all_embeddings, embedding_to_key_idx
|
| 164 |
+
global cached_fov_choices, cached_tracks_by_fov, cached_annotated_tracks
|
| 165 |
+
|
| 166 |
+
adata_demo, embedding_info = load_anndata()
|
| 167 |
+
|
| 168 |
+
# Create flat list of all embedding names for dropdowns
|
| 169 |
+
all_embeddings = []
|
| 170 |
+
embedding_to_key_idx = {}
|
| 171 |
+
|
| 172 |
+
for obsm_key, component_names in embedding_info.items():
|
| 173 |
+
for idx, component_name in enumerate(component_names):
|
| 174 |
+
all_embeddings.append(component_name)
|
| 175 |
+
embedding_to_key_idx[component_name] = (obsm_key, idx)
|
| 176 |
+
|
| 177 |
+
print(f"Total embedding dimensions available: {len(all_embeddings)}")
|
| 178 |
+
|
| 179 |
+
# Pre-compute FOV and track selections with filtering
|
| 180 |
+
print("Pre-computing FOV and track selections...")
|
| 181 |
+
|
| 182 |
+
# Filter tracks: keep only those with at least one infected or uninfected cell
|
| 183 |
+
print("Filtering tracks with infection annotations...")
|
| 184 |
+
track_groups = adata_demo.obs.groupby(["track_id", "fov_name"])["infection_status"]
|
| 185 |
+
|
| 186 |
+
cached_annotated_tracks = set()
|
| 187 |
+
for (track_id, fov_name), statuses in track_groups:
|
| 188 |
+
# Include track if it has at least one infected or uninfected cell
|
| 189 |
+
if any(status in ["infected", "uninfected"] for status in statuses):
|
| 190 |
+
cached_annotated_tracks.add((int(track_id), fov_name))
|
| 191 |
+
|
| 192 |
+
total_tracks = len(track_groups)
|
| 193 |
+
annotated_count = len(cached_annotated_tracks)
|
| 194 |
+
print(
|
| 195 |
+
f"Filtered tracks: {annotated_count} / {total_tracks} have infection annotations "
|
| 196 |
+
f"({annotated_count / total_tracks * 100:.1f}%)"
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# Build FOV → Tracks mapping (only annotated tracks)
|
| 200 |
+
cached_tracks_by_fov = {}
|
| 201 |
+
for track_id, fov_name in cached_annotated_tracks:
|
| 202 |
+
if fov_name not in cached_tracks_by_fov:
|
| 203 |
+
cached_tracks_by_fov[fov_name] = []
|
| 204 |
+
cached_tracks_by_fov[fov_name].append(track_id)
|
| 205 |
+
|
| 206 |
+
# Sort track lists within each FOV
|
| 207 |
+
for fov_name in cached_tracks_by_fov:
|
| 208 |
+
cached_tracks_by_fov[fov_name].sort()
|
| 209 |
+
|
| 210 |
+
# Only show FOVs that have annotated tracks
|
| 211 |
+
cached_fov_choices = sorted(cached_tracks_by_fov.keys())
|
| 212 |
+
|
| 213 |
+
total_fovs = len(adata_demo.obs["fov_name"].unique())
|
| 214 |
+
print(
|
| 215 |
+
f"Cached annotated tracks for {len(cached_tracks_by_fov)} FOVs (out of {total_fovs} total FOVs)"
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
return adata_demo, embedding_info, all_embeddings
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def get_embedding_data(embedding_name):
|
| 222 |
+
"""Extract embedding data for a given component name."""
|
| 223 |
+
obsm_key, component_idx = embedding_to_key_idx[embedding_name]
|
| 224 |
+
return adata_demo.obsm[obsm_key][:, component_idx]
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def get_all_embeddings():
|
| 228 |
+
"""Get list of all available embedding names."""
|
| 229 |
+
return all_embeddings
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def get_fov_choices():
|
| 233 |
+
"""Get pre-computed list of FOV choices for dropdown."""
|
| 234 |
+
return cached_fov_choices or []
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def get_tracks_for_fov(fov_name):
|
| 238 |
+
"""Get list of annotated track IDs for a specific FOV.
|
| 239 |
+
|
| 240 |
+
Args:
|
| 241 |
+
fov_name: FOV name (e.g., "A/1/000000")
|
| 242 |
+
|
| 243 |
+
Returns:
|
| 244 |
+
list: Sorted list of track IDs with infection annotations in this FOV
|
| 245 |
+
"""
|
| 246 |
+
if cached_tracks_by_fov is None:
|
| 247 |
+
return []
|
| 248 |
+
return cached_tracks_by_fov.get(fov_name, [])
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def is_track_annotated(track_id, fov_name):
|
| 252 |
+
"""Check if a track has infection annotations.
|
| 253 |
+
|
| 254 |
+
Args:
|
| 255 |
+
track_id: Track ID
|
| 256 |
+
fov_name: FOV name
|
| 257 |
+
|
| 258 |
+
Returns:
|
| 259 |
+
bool: True if track has at least one infected or uninfected cell
|
| 260 |
+
"""
|
| 261 |
+
if cached_annotated_tracks is None:
|
| 262 |
+
return False
|
| 263 |
+
return (int(track_id), fov_name) in cached_annotated_tracks
|
dynaclr_viz/handlers.py
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Event handlers for Gradio interface."""
|
| 2 |
+
|
| 3 |
+
import gradio as gr
|
| 4 |
+
import traceback
|
| 5 |
+
|
| 6 |
+
from . import data
|
| 7 |
+
from .image import extract_track_images
|
| 8 |
+
from .plot import create_embedding_plot
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Preset tracks showing infection transitions
|
| 12 |
+
PRESET_TRACKS = [
|
| 13 |
+
{
|
| 14 |
+
"name": "Track 152 (64 timepoints)",
|
| 15 |
+
"track_id": 152,
|
| 16 |
+
"fov_name": "A/2/000001",
|
| 17 |
+
"description": "38 uninfected → 26 infected cells, transition at t=40",
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"name": "Track 114 (49 timepoints)",
|
| 21 |
+
"track_id": 114,
|
| 22 |
+
"fov_name": "A/2/001000",
|
| 23 |
+
"description": "29 uninfected → 20 infected cells, transition at t=40",
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"name": "Track 182 (55 timepoints)",
|
| 27 |
+
"track_id": 182,
|
| 28 |
+
"fov_name": "A/2/000000",
|
| 29 |
+
"description": "33 uninfected → 22 infected cells, transition at t=40",
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"name": "Track 135 (46 timepoints)",
|
| 33 |
+
"track_id": 135,
|
| 34 |
+
"fov_name": "A/2/000000",
|
| 35 |
+
"description": "36 uninfected → 10 infected cells, starts at t=4",
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"name": "Track 107 (46 timepoints)",
|
| 39 |
+
"track_id": 107,
|
| 40 |
+
"fov_name": "A/2/000001",
|
| 41 |
+
"description": "19 uninfected → 27 infected cells, transition at t=39",
|
| 42 |
+
},
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def update_plot_and_selector(emb_x, emb_y):
|
| 47 |
+
"""Update plot and populate FOV/track selectors.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
emb_x: X-axis embedding name
|
| 51 |
+
emb_y: Y-axis embedding name
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
tuple: (plotly.Figure, FOV dropdown, Track dropdown)
|
| 55 |
+
"""
|
| 56 |
+
fig = create_embedding_plot(emb_x, emb_y)
|
| 57 |
+
|
| 58 |
+
# Get FOV choices
|
| 59 |
+
fov_choices = data.get_fov_choices()
|
| 60 |
+
|
| 61 |
+
# Initialize with first FOV's tracks (if available)
|
| 62 |
+
if fov_choices:
|
| 63 |
+
first_fov = fov_choices[0]
|
| 64 |
+
track_choices = data.get_tracks_for_fov(first_fov)
|
| 65 |
+
else:
|
| 66 |
+
first_fov = None
|
| 67 |
+
track_choices = []
|
| 68 |
+
|
| 69 |
+
return (
|
| 70 |
+
fig,
|
| 71 |
+
gr.Dropdown(choices=fov_choices, value=first_fov),
|
| 72 |
+
gr.Dropdown(choices=track_choices),
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def update_track_selector(fov_name):
|
| 77 |
+
"""Update track dropdown based on FOV selection.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
fov_name: Selected FOV name
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
gr.Dropdown: Updated track dropdown with filtered choices
|
| 84 |
+
"""
|
| 85 |
+
if not fov_name:
|
| 86 |
+
return gr.Dropdown(choices=[], value=None)
|
| 87 |
+
|
| 88 |
+
# Get annotated tracks for this FOV
|
| 89 |
+
track_choices = data.get_tracks_for_fov(fov_name)
|
| 90 |
+
|
| 91 |
+
return gr.Dropdown(choices=track_choices, value=None)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def apply_preset(preset_name):
|
| 95 |
+
"""Apply a preset track selection.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
preset_name: Name of the preset to apply
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
tuple: (fov_value, track_value, track_choices)
|
| 102 |
+
"""
|
| 103 |
+
if not preset_name:
|
| 104 |
+
return gr.Dropdown(), gr.Dropdown()
|
| 105 |
+
|
| 106 |
+
# Find the preset
|
| 107 |
+
preset = None
|
| 108 |
+
for p in PRESET_TRACKS:
|
| 109 |
+
if p["name"] == preset_name:
|
| 110 |
+
preset = p
|
| 111 |
+
break
|
| 112 |
+
|
| 113 |
+
if not preset:
|
| 114 |
+
return gr.Dropdown(), gr.Dropdown()
|
| 115 |
+
|
| 116 |
+
# Get tracks for the FOV
|
| 117 |
+
track_choices = data.get_tracks_for_fov(preset["fov_name"])
|
| 118 |
+
|
| 119 |
+
return (
|
| 120 |
+
gr.Dropdown(value=preset["fov_name"]),
|
| 121 |
+
gr.Dropdown(choices=track_choices, value=preset["track_id"]),
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def build_channel_status_text(show_phase, show_gfp, show_mcherry):
|
| 126 |
+
"""Build the channel status display text.
|
| 127 |
+
|
| 128 |
+
Args:
|
| 129 |
+
show_phase: Phase3D enabled flag
|
| 130 |
+
show_gfp: GFP enabled flag
|
| 131 |
+
show_mcherry: mCherry enabled flag
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
str: Formatted channel status markdown
|
| 135 |
+
"""
|
| 136 |
+
active_channels = []
|
| 137 |
+
|
| 138 |
+
if show_phase:
|
| 139 |
+
active_channels.append("🔬 **Phase3D** (structure/morphology)")
|
| 140 |
+
if show_gfp:
|
| 141 |
+
active_channels.append("🟢 **GFP** (green fluorescence)")
|
| 142 |
+
if show_mcherry:
|
| 143 |
+
active_channels.append("🟣 **mCherry** (red/magenta fluorescence)")
|
| 144 |
+
|
| 145 |
+
if not active_channels:
|
| 146 |
+
return "⚠️ **No channels enabled** - Please enable at least one channel in Channel Settings"
|
| 147 |
+
|
| 148 |
+
return " · ".join(active_channels)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def update_channel_images(
|
| 152 |
+
fov_name,
|
| 153 |
+
track_id,
|
| 154 |
+
show_phase,
|
| 155 |
+
show_gfp,
|
| 156 |
+
show_mcherry,
|
| 157 |
+
phase_min,
|
| 158 |
+
phase_max,
|
| 159 |
+
phase_opacity,
|
| 160 |
+
gfp_min,
|
| 161 |
+
gfp_max,
|
| 162 |
+
gfp_opacity,
|
| 163 |
+
mcherry_min,
|
| 164 |
+
mcherry_max,
|
| 165 |
+
mcherry_opacity,
|
| 166 |
+
):
|
| 167 |
+
"""Update image gallery and channel status when channel settings change.
|
| 168 |
+
|
| 169 |
+
Args:
|
| 170 |
+
fov_name: Selected FOV name
|
| 171 |
+
track_id: Selected track ID
|
| 172 |
+
show_phase, show_gfp, show_mcherry: Channel enable flags
|
| 173 |
+
phase_min, phase_max: Phase contrast limits
|
| 174 |
+
phase_opacity: Phase opacity
|
| 175 |
+
gfp_min, gfp_max: GFP contrast limits
|
| 176 |
+
gfp_opacity: GFP opacity
|
| 177 |
+
mcherry_min, mcherry_max: mCherry contrast limits
|
| 178 |
+
mcherry_opacity: mCherry opacity
|
| 179 |
+
|
| 180 |
+
Returns:
|
| 181 |
+
tuple: (gallery_images, channel_status_text)
|
| 182 |
+
"""
|
| 183 |
+
# Build channel status text
|
| 184 |
+
status_text = build_channel_status_text(show_phase, show_gfp, show_mcherry)
|
| 185 |
+
|
| 186 |
+
if not fov_name or track_id is None:
|
| 187 |
+
return [], status_text
|
| 188 |
+
|
| 189 |
+
try:
|
| 190 |
+
# Check if at least one channel is enabled
|
| 191 |
+
if not (show_phase or show_gfp or show_mcherry):
|
| 192 |
+
return [], status_text
|
| 193 |
+
|
| 194 |
+
# Extract images with current channel settings
|
| 195 |
+
images = extract_track_images(
|
| 196 |
+
track_id,
|
| 197 |
+
fov_name,
|
| 198 |
+
crop_size=160,
|
| 199 |
+
show_phase=show_phase,
|
| 200 |
+
show_gfp=show_gfp,
|
| 201 |
+
show_mcherry=show_mcherry,
|
| 202 |
+
phase_min=phase_min,
|
| 203 |
+
phase_max=phase_max,
|
| 204 |
+
phase_opacity=phase_opacity,
|
| 205 |
+
gfp_min=gfp_min,
|
| 206 |
+
gfp_max=gfp_max,
|
| 207 |
+
gfp_opacity=gfp_opacity,
|
| 208 |
+
mcherry_min=mcherry_min,
|
| 209 |
+
mcherry_max=mcherry_max,
|
| 210 |
+
mcherry_opacity=mcherry_opacity,
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
return images, status_text
|
| 214 |
+
|
| 215 |
+
except Exception as e:
|
| 216 |
+
print(f"Error in update_channel_images: {e}")
|
| 217 |
+
return [], status_text
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def on_cell_selected(
|
| 221 |
+
fov_name,
|
| 222 |
+
track_id,
|
| 223 |
+
show_phase,
|
| 224 |
+
show_gfp,
|
| 225 |
+
show_mcherry,
|
| 226 |
+
phase_min,
|
| 227 |
+
phase_max,
|
| 228 |
+
phase_opacity,
|
| 229 |
+
gfp_min,
|
| 230 |
+
gfp_max,
|
| 231 |
+
gfp_opacity,
|
| 232 |
+
mcherry_min,
|
| 233 |
+
mcherry_max,
|
| 234 |
+
mcherry_opacity,
|
| 235 |
+
embedding_x,
|
| 236 |
+
embedding_y,
|
| 237 |
+
):
|
| 238 |
+
"""Handle cell selection - updates gallery, info, and highlighted plot.
|
| 239 |
+
|
| 240 |
+
Args:
|
| 241 |
+
fov_name: Selected FOV name
|
| 242 |
+
track_id: Selected track ID
|
| 243 |
+
show_phase, show_gfp, show_mcherry: Channel flags
|
| 244 |
+
phase_min, phase_max, phase_opacity: Phase settings
|
| 245 |
+
gfp_min, gfp_max, gfp_opacity: GFP settings
|
| 246 |
+
mcherry_min, mcherry_max, mcherry_opacity: mCherry settings
|
| 247 |
+
embedding_x, embedding_y: Current embedding axes
|
| 248 |
+
|
| 249 |
+
Returns:
|
| 250 |
+
tuple: (gallery_images, info_text, highlighted_plot)
|
| 251 |
+
"""
|
| 252 |
+
if not fov_name or track_id is None or data.current_plot_data is None:
|
| 253 |
+
# No selection - return to normal plot without highlighting
|
| 254 |
+
normal_fig = create_embedding_plot(
|
| 255 |
+
embedding_x, embedding_y, highlight_track_id=None
|
| 256 |
+
)
|
| 257 |
+
return (
|
| 258 |
+
[],
|
| 259 |
+
"Select a FOV and track to view the timeline",
|
| 260 |
+
normal_fig,
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
try:
|
| 264 |
+
# Get all cells in this specific track within this specific FOV
|
| 265 |
+
track_cells = data.adata_demo.obs[
|
| 266 |
+
(data.adata_demo.obs["track_id"] == track_id)
|
| 267 |
+
& (data.adata_demo.obs["fov_name"] == fov_name)
|
| 268 |
+
]
|
| 269 |
+
|
| 270 |
+
if len(track_cells) == 0:
|
| 271 |
+
normal_fig = create_embedding_plot(
|
| 272 |
+
embedding_x, embedding_y, highlight_track_id=None
|
| 273 |
+
)
|
| 274 |
+
return (
|
| 275 |
+
[],
|
| 276 |
+
f"No cells found for track {track_id} in FOV {fov_name}",
|
| 277 |
+
normal_fig,
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
# Build channel list for display
|
| 281 |
+
active_channels = []
|
| 282 |
+
if show_phase:
|
| 283 |
+
active_channels.append("Phase3D")
|
| 284 |
+
if show_gfp:
|
| 285 |
+
active_channels.append("GFP (green)")
|
| 286 |
+
if show_mcherry:
|
| 287 |
+
active_channels.append("mCherry (periwinkle)")
|
| 288 |
+
|
| 289 |
+
if not active_channels:
|
| 290 |
+
# Create highlighted plot without images
|
| 291 |
+
highlighted_fig = create_embedding_plot(
|
| 292 |
+
embedding_x,
|
| 293 |
+
embedding_y,
|
| 294 |
+
highlight_track_id=track_id,
|
| 295 |
+
highlight_fov_name=fov_name,
|
| 296 |
+
)
|
| 297 |
+
return (
|
| 298 |
+
[],
|
| 299 |
+
"⚠️ Please select at least one channel to display",
|
| 300 |
+
highlighted_fig,
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
# Extract multi-channel images for all timepoints
|
| 304 |
+
print(
|
| 305 |
+
f"Extracting images for track {track_id} in FOV {fov_name} "
|
| 306 |
+
f"({len(track_cells)} timepoints) with channels: {active_channels}..."
|
| 307 |
+
)
|
| 308 |
+
images = extract_track_images(
|
| 309 |
+
track_id,
|
| 310 |
+
fov_name,
|
| 311 |
+
crop_size=160,
|
| 312 |
+
show_phase=show_phase,
|
| 313 |
+
show_gfp=show_gfp,
|
| 314 |
+
show_mcherry=show_mcherry,
|
| 315 |
+
phase_min=phase_min,
|
| 316 |
+
phase_max=phase_max,
|
| 317 |
+
phase_opacity=phase_opacity,
|
| 318 |
+
gfp_min=gfp_min,
|
| 319 |
+
gfp_max=gfp_max,
|
| 320 |
+
gfp_opacity=gfp_opacity,
|
| 321 |
+
mcherry_min=mcherry_min,
|
| 322 |
+
mcherry_max=mcherry_max,
|
| 323 |
+
mcherry_opacity=mcherry_opacity,
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
if len(images) == 0:
|
| 327 |
+
highlighted_fig = create_embedding_plot(
|
| 328 |
+
embedding_x,
|
| 329 |
+
embedding_y,
|
| 330 |
+
highlight_track_id=track_id,
|
| 331 |
+
highlight_fov_name=fov_name,
|
| 332 |
+
)
|
| 333 |
+
return [], f"Failed to load images for track {track_id}", highlighted_fig
|
| 334 |
+
|
| 335 |
+
# Create updated plot with highlighted track in specific FOV
|
| 336 |
+
highlighted_fig = create_embedding_plot(
|
| 337 |
+
embedding_x,
|
| 338 |
+
embedding_y,
|
| 339 |
+
highlight_track_id=track_id,
|
| 340 |
+
highlight_fov_name=fov_name,
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
# Create metadata info text
|
| 344 |
+
t_min, t_max = track_cells["t"].min(), track_cells["t"].max()
|
| 345 |
+
channels_text = ", ".join(active_channels)
|
| 346 |
+
|
| 347 |
+
# Get infection status for this track
|
| 348 |
+
track_infection_statuses = track_cells["infection_status"].unique()
|
| 349 |
+
infection_text = (
|
| 350 |
+
", ".join(track_infection_statuses)
|
| 351 |
+
if len(track_infection_statuses) > 0
|
| 352 |
+
else "unknown"
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
info_text = f"""
|
| 356 |
+
**Track:** {track_id} | **FOV:** {fov_name}
|
| 357 |
+
**Duration:** T{t_min}→T{t_max} ({len(track_cells)} obs) | **Infection:** {infection_text}
|
| 358 |
+
**Channels:** {channels_text}
|
| 359 |
+
"""
|
| 360 |
+
|
| 361 |
+
print(f"Successfully loaded {len(images)} images for track {track_id}")
|
| 362 |
+
return images, info_text, highlighted_fig
|
| 363 |
+
|
| 364 |
+
except Exception as e:
|
| 365 |
+
print(f"Error in on_cell_selected: {e}")
|
| 366 |
+
traceback.print_exc()
|
| 367 |
+
normal_fig = create_embedding_plot(
|
| 368 |
+
embedding_x, embedding_y, highlight_track_id=None
|
| 369 |
+
)
|
| 370 |
+
return [], f"Error loading track images: {str(e)}", normal_fig
|
dynaclr_viz/image.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Image processing and extraction for microscopy data."""
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from PIL import Image
|
| 5 |
+
from skimage.exposure import rescale_intensity
|
| 6 |
+
|
| 7 |
+
from .config import DEFAULT_CROP_SIZE
|
| 8 |
+
from . import data
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def normalize_channel_image(channel_array, contrast_min=1, contrast_max=99):
|
| 12 |
+
"""Normalize channel image for display using percentile-based clipping.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
channel_array: Raw channel data
|
| 16 |
+
contrast_min: Minimum percentile for clipping (0-100)
|
| 17 |
+
contrast_max: Maximum percentile for clipping (0-100)
|
| 18 |
+
|
| 19 |
+
Returns:
|
| 20 |
+
Normalized uint8 array (0-255)
|
| 21 |
+
"""
|
| 22 |
+
p_min = np.percentile(channel_array, contrast_min)
|
| 23 |
+
p_max = np.percentile(channel_array, contrast_max)
|
| 24 |
+
clipped = np.clip(channel_array, p_min, p_max)
|
| 25 |
+
normalized = rescale_intensity(clipped, out_range=(0, 255))
|
| 26 |
+
return normalized.astype(np.uint8)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def extract_track_images(
|
| 30 |
+
track_id,
|
| 31 |
+
fov_name,
|
| 32 |
+
crop_size=DEFAULT_CROP_SIZE,
|
| 33 |
+
show_phase=True,
|
| 34 |
+
show_gfp=False,
|
| 35 |
+
show_mcherry=False,
|
| 36 |
+
phase_min=10,
|
| 37 |
+
phase_max=90,
|
| 38 |
+
gfp_min=10,
|
| 39 |
+
gfp_max=90,
|
| 40 |
+
mcherry_min=10,
|
| 41 |
+
mcherry_max=90,
|
| 42 |
+
phase_opacity=1.0,
|
| 43 |
+
gfp_opacity=1.0,
|
| 44 |
+
mcherry_opacity=1.0,
|
| 45 |
+
):
|
| 46 |
+
"""Extract multi-channel crops for all timepoints in a track within a specific FOV.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
track_id: Track ID to extract images for
|
| 50 |
+
fov_name: Field of view name to filter by
|
| 51 |
+
crop_size: Size of the square crop around cell centroid
|
| 52 |
+
show_phase: Show Phase3D channel (grayscale)
|
| 53 |
+
show_gfp: Show GFP channel (green)
|
| 54 |
+
show_mcherry: Show mCherry channel (periwinkle)
|
| 55 |
+
phase_min, phase_max: Contrast limits for Phase (percentile 0-100)
|
| 56 |
+
gfp_min, gfp_max: Contrast limits for GFP (percentile 0-100)
|
| 57 |
+
mcherry_min, mcherry_max: Contrast limits for mCherry (percentile 0-100)
|
| 58 |
+
phase_opacity: Opacity for Phase channel (0-1)
|
| 59 |
+
gfp_opacity: Opacity for GFP channel (0-1)
|
| 60 |
+
mcherry_opacity: Opacity for mCherry channel (0-1)
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
List of tuples (PIL.Image, label_text)
|
| 64 |
+
"""
|
| 65 |
+
# Query AnnData for all cells in this track within this specific FOV
|
| 66 |
+
track_cells = data.adata_demo.obs[
|
| 67 |
+
(data.adata_demo.obs["track_id"] == track_id)
|
| 68 |
+
& (data.adata_demo.obs["fov_name"] == fov_name)
|
| 69 |
+
]
|
| 70 |
+
track_cells = track_cells.sort_values("t") # Sort chronologically
|
| 71 |
+
|
| 72 |
+
# Load OME-Zarr dataset (cached after first call)
|
| 73 |
+
dataset = data.load_ome_dataset()
|
| 74 |
+
|
| 75 |
+
images = []
|
| 76 |
+
for _, cell in track_cells.iterrows():
|
| 77 |
+
try:
|
| 78 |
+
fov = cell["fov_name"]
|
| 79 |
+
t = int(cell["t"])
|
| 80 |
+
y_center = int(cell["y"])
|
| 81 |
+
x_center = int(cell["x"])
|
| 82 |
+
|
| 83 |
+
# Access position
|
| 84 |
+
position = dataset[fov]
|
| 85 |
+
|
| 86 |
+
# Crop boundaries with boundary handling
|
| 87 |
+
half_crop = crop_size // 2
|
| 88 |
+
y_min = max(0, y_center - half_crop)
|
| 89 |
+
y_max = min(position["0"].shape[3], y_center + half_crop) # Shape is TCZYX
|
| 90 |
+
x_min = max(0, x_center - half_crop)
|
| 91 |
+
x_max = min(position["0"].shape[4], x_center + half_crop)
|
| 92 |
+
|
| 93 |
+
# Initialize composite as black RGB image
|
| 94 |
+
composite = np.zeros((y_max - y_min, x_max - x_min, 3), dtype=np.float32)
|
| 95 |
+
|
| 96 |
+
# Extract and composite channels with opacity control
|
| 97 |
+
if show_phase:
|
| 98 |
+
# Phase channel (C=0) - display as grayscale base
|
| 99 |
+
phase_data = position["0"][t, 0, 0, y_min:y_max, x_min:x_max]
|
| 100 |
+
normalized_phase = normalize_channel_image(
|
| 101 |
+
phase_data, phase_min, phase_max
|
| 102 |
+
).astype(np.float32)
|
| 103 |
+
|
| 104 |
+
# Apply opacity
|
| 105 |
+
normalized_phase = normalized_phase * phase_opacity
|
| 106 |
+
|
| 107 |
+
# If showing fluorescence channels, dim the phase to not overwhelm
|
| 108 |
+
if show_gfp or show_mcherry:
|
| 109 |
+
normalized_phase = normalized_phase * 0.4
|
| 110 |
+
|
| 111 |
+
# Convert grayscale to RGB by setting all channels equal
|
| 112 |
+
composite[:, :, 0] += normalized_phase
|
| 113 |
+
composite[:, :, 1] += normalized_phase
|
| 114 |
+
composite[:, :, 2] += normalized_phase
|
| 115 |
+
|
| 116 |
+
if show_gfp:
|
| 117 |
+
# GFP channel (C=1) - green overlay with opacity
|
| 118 |
+
gfp_data = position["0"][t, 1, 0, y_min:y_max, x_min:x_max]
|
| 119 |
+
normalized_gfp = normalize_channel_image(
|
| 120 |
+
gfp_data, gfp_min, gfp_max
|
| 121 |
+
).astype(np.float32)
|
| 122 |
+
|
| 123 |
+
# Apply green color with opacity
|
| 124 |
+
composite[:, :, 1] += normalized_gfp * gfp_opacity
|
| 125 |
+
|
| 126 |
+
if show_mcherry:
|
| 127 |
+
# mCherry channel (C=2) - periwinkle overlay with opacity
|
| 128 |
+
mcherry_data = position["0"][t, 2, 0, y_min:y_max, x_min:x_max]
|
| 129 |
+
normalized_mcherry = normalize_channel_image(
|
| 130 |
+
mcherry_data, mcherry_min, mcherry_max
|
| 131 |
+
).astype(np.float32)
|
| 132 |
+
|
| 133 |
+
# Apply periwinkle color with opacity (R=43%, G=31%, B=98%)
|
| 134 |
+
composite[:, :, 0] += normalized_mcherry * 0.43 * mcherry_opacity
|
| 135 |
+
composite[:, :, 1] += normalized_mcherry * 0.31 * mcherry_opacity
|
| 136 |
+
composite[:, :, 2] += normalized_mcherry * 0.98 * mcherry_opacity
|
| 137 |
+
|
| 138 |
+
# Clip and convert to uint8
|
| 139 |
+
composite = np.clip(composite, 0, 255).astype(np.uint8)
|
| 140 |
+
|
| 141 |
+
# Convert to PIL Image
|
| 142 |
+
img = Image.fromarray(composite, mode="RGB")
|
| 143 |
+
|
| 144 |
+
# Create label with timepoint info
|
| 145 |
+
label = f"T={t}"
|
| 146 |
+
images.append((img, label))
|
| 147 |
+
|
| 148 |
+
except Exception as e:
|
| 149 |
+
print(
|
| 150 |
+
f"Warning: Failed to load image for track {track_id}, t={cell['t']}: {e}"
|
| 151 |
+
)
|
| 152 |
+
continue
|
| 153 |
+
|
| 154 |
+
return images
|
dynaclr_viz/plot.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Plotly visualization functions for cell embedding plots."""
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import plotly.graph_objects as go
|
| 6 |
+
|
| 7 |
+
from .config import (
|
| 8 |
+
INFECTION_COLOR_MAP,
|
| 9 |
+
PLOT_HEIGHT,
|
| 10 |
+
MARKER_SIZE_NORMAL,
|
| 11 |
+
MARKER_SIZE_HIGHLIGHTED,
|
| 12 |
+
MARKER_OPACITY_NORMAL,
|
| 13 |
+
MARKER_OPACITY_UNKNOWN,
|
| 14 |
+
)
|
| 15 |
+
from . import data
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def create_embedding_plot(
|
| 19 |
+
embedding_x="PC1",
|
| 20 |
+
embedding_y="PC2",
|
| 21 |
+
highlight_track_id=None,
|
| 22 |
+
highlight_fov_name=None,
|
| 23 |
+
):
|
| 24 |
+
"""Create interactive Plotly scatter plot of embeddings with infection status coloring.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
embedding_x: X-axis embedding name
|
| 28 |
+
embedding_y: Y-axis embedding name
|
| 29 |
+
highlight_track_id: If provided, highlight this specific track
|
| 30 |
+
highlight_fov_name: If provided with track_id, highlight only this FOV
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
plotly.graph_objects.Figure: Interactive Plotly figure
|
| 34 |
+
"""
|
| 35 |
+
# Get embedding coordinates
|
| 36 |
+
x_data = data.get_embedding_data(embedding_x)
|
| 37 |
+
y_data = data.get_embedding_data(embedding_y)
|
| 38 |
+
|
| 39 |
+
# Get metadata for coloring and hover
|
| 40 |
+
obs_data = data.adata_demo.obs
|
| 41 |
+
|
| 42 |
+
# Map each observation to its color
|
| 43 |
+
color_values = obs_data["infection_status"].map(INFECTION_COLOR_MAP).values
|
| 44 |
+
|
| 45 |
+
# Map infection status to opacity (unknown cells more transparent)
|
| 46 |
+
opacity_values = (
|
| 47 |
+
obs_data["infection_status"]
|
| 48 |
+
.map(
|
| 49 |
+
{
|
| 50 |
+
"infected": MARKER_OPACITY_NORMAL,
|
| 51 |
+
"uninfected": MARKER_OPACITY_NORMAL,
|
| 52 |
+
"unknown": MARKER_OPACITY_UNKNOWN,
|
| 53 |
+
}
|
| 54 |
+
)
|
| 55 |
+
.values
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
# Create DataFrame for plotting
|
| 59 |
+
plot_data = pd.DataFrame(
|
| 60 |
+
{
|
| 61 |
+
"x": x_data,
|
| 62 |
+
"y": y_data,
|
| 63 |
+
"color": color_values,
|
| 64 |
+
"opacity": opacity_values,
|
| 65 |
+
"id": obs_data["id"].values,
|
| 66 |
+
"track_id": obs_data["track_id"].values,
|
| 67 |
+
"timepoint": obs_data["t"].values,
|
| 68 |
+
"fov_name": obs_data["fov_name"].values,
|
| 69 |
+
"x_pos": obs_data["x"].values,
|
| 70 |
+
"y_pos": obs_data["y"].values,
|
| 71 |
+
"infection_status": obs_data["infection_status"].values,
|
| 72 |
+
}
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
# Store for potential future interactions
|
| 76 |
+
data.current_plot_data = plot_data
|
| 77 |
+
|
| 78 |
+
# Create Plotly figure
|
| 79 |
+
fig = go.Figure()
|
| 80 |
+
|
| 81 |
+
# If highlighting a specific track, split into background and foreground
|
| 82 |
+
if highlight_track_id is not None:
|
| 83 |
+
# Create filter for the specific track (and FOV if specified)
|
| 84 |
+
if highlight_fov_name is not None:
|
| 85 |
+
# Highlight only cells from this specific track in this specific FOV
|
| 86 |
+
highlight_mask = (plot_data["track_id"] == highlight_track_id) & (
|
| 87 |
+
plot_data["fov_name"] == highlight_fov_name
|
| 88 |
+
)
|
| 89 |
+
else:
|
| 90 |
+
# Highlight all cells with this track_id (across all FOVs)
|
| 91 |
+
highlight_mask = plot_data["track_id"] == highlight_track_id
|
| 92 |
+
|
| 93 |
+
# Background: all cells NOT in the highlighted set
|
| 94 |
+
bg_data = plot_data[~highlight_mask]
|
| 95 |
+
if len(bg_data) > 0:
|
| 96 |
+
fig.add_trace(
|
| 97 |
+
go.Scattergl(
|
| 98 |
+
x=bg_data["x"],
|
| 99 |
+
y=bg_data["y"],
|
| 100 |
+
mode="markers",
|
| 101 |
+
marker=dict(
|
| 102 |
+
size=3,
|
| 103 |
+
color="lightgray",
|
| 104 |
+
opacity=0.2,
|
| 105 |
+
line=dict(width=0),
|
| 106 |
+
),
|
| 107 |
+
hoverinfo="skip",
|
| 108 |
+
showlegend=False,
|
| 109 |
+
)
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
# Foreground: selected track in selected FOV (highlighted)
|
| 113 |
+
fg_data = plot_data[highlight_mask].copy()
|
| 114 |
+
if len(fg_data) > 0:
|
| 115 |
+
# Sort by timepoint to ensure correct trajectory order
|
| 116 |
+
fg_data = fg_data.sort_values("timepoint").reset_index(drop=True)
|
| 117 |
+
n_points = len(fg_data)
|
| 118 |
+
|
| 119 |
+
# Add the main trajectory trace with markers and line
|
| 120 |
+
fig.add_trace(
|
| 121 |
+
go.Scattergl(
|
| 122 |
+
x=fg_data["x"],
|
| 123 |
+
y=fg_data["y"],
|
| 124 |
+
mode="markers+lines",
|
| 125 |
+
marker=dict(
|
| 126 |
+
size=MARKER_SIZE_HIGHLIGHTED,
|
| 127 |
+
color=fg_data["color"],
|
| 128 |
+
opacity=0.9,
|
| 129 |
+
line=dict(width=1, color="white"),
|
| 130 |
+
),
|
| 131 |
+
line=dict(width=2, color="rgba(255, 255, 255, 0.5)"),
|
| 132 |
+
text=[
|
| 133 |
+
f"ID: {row['id']}<br>"
|
| 134 |
+
f"Track: {row['track_id']}<br>"
|
| 135 |
+
f"Time: {row['timepoint']}<br>"
|
| 136 |
+
f"FOV: {row['fov_name']}<br>"
|
| 137 |
+
f"Infection: {row['infection_status']}<br>"
|
| 138 |
+
f"Position: ({row['x_pos']}, {row['y_pos']})<br>"
|
| 139 |
+
f"{embedding_x}: {row['x']:.2f}<br>"
|
| 140 |
+
f"{embedding_y}: {row['y']:.2f}"
|
| 141 |
+
for _, row in fg_data.iterrows()
|
| 142 |
+
],
|
| 143 |
+
hovertemplate="%{text}<extra></extra>",
|
| 144 |
+
showlegend=False,
|
| 145 |
+
)
|
| 146 |
+
)
|
| 147 |
+
else:
|
| 148 |
+
# Standard view: all cells with normal coloring and variable opacity
|
| 149 |
+
fig.add_trace(
|
| 150 |
+
go.Scattergl(
|
| 151 |
+
x=plot_data["x"],
|
| 152 |
+
y=plot_data["y"],
|
| 153 |
+
mode="markers",
|
| 154 |
+
marker=dict(
|
| 155 |
+
size=MARKER_SIZE_NORMAL,
|
| 156 |
+
color=plot_data["color"],
|
| 157 |
+
opacity=plot_data["opacity"], # Use variable opacity per point
|
| 158 |
+
line=dict(width=0),
|
| 159 |
+
),
|
| 160 |
+
text=[
|
| 161 |
+
f"ID: {row['id']}<br>"
|
| 162 |
+
f"Track: {row['track_id']}<br>"
|
| 163 |
+
f"Time: {row['timepoint']}<br>"
|
| 164 |
+
f"FOV: {row['fov_name']}<br>"
|
| 165 |
+
f"Infection: {row['infection_status']}<br>"
|
| 166 |
+
f"Position: ({row['x_pos']}, {row['y_pos']})<br>"
|
| 167 |
+
f"{embedding_x}: {row['x']:.2f}<br>"
|
| 168 |
+
f"{embedding_y}: {row['y']:.2f}"
|
| 169 |
+
for _, row in plot_data.iterrows()
|
| 170 |
+
],
|
| 171 |
+
hovertemplate="%{text}<extra></extra>",
|
| 172 |
+
)
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
# Create title
|
| 176 |
+
title_text = f"Cell Embedding Visualization: {embedding_x} vs {embedding_y}"
|
| 177 |
+
if highlight_track_id is not None:
|
| 178 |
+
if highlight_fov_name is not None:
|
| 179 |
+
title_text += (
|
| 180 |
+
f" (Highlighting Track {highlight_track_id} - {highlight_fov_name})"
|
| 181 |
+
)
|
| 182 |
+
else:
|
| 183 |
+
title_text += f" (Highlighting Track {highlight_track_id})"
|
| 184 |
+
|
| 185 |
+
fig.update_layout(
|
| 186 |
+
title=title_text,
|
| 187 |
+
xaxis_title=embedding_x,
|
| 188 |
+
yaxis_title=embedding_y,
|
| 189 |
+
hovermode="closest",
|
| 190 |
+
height=PLOT_HEIGHT,
|
| 191 |
+
showlegend=False,
|
| 192 |
+
template="plotly_dark",
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
print(f"Created plot with {len(plot_data)} cells, colored by infection status")
|
| 196 |
+
|
| 197 |
+
return fig
|
dynaclr_viz/ui.py
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Gradio UI components and layout for DynaCLR visualization."""
|
| 2 |
+
|
| 3 |
+
import gradio as gr
|
| 4 |
+
|
| 5 |
+
from . import data
|
| 6 |
+
from .handlers import (
|
| 7 |
+
on_cell_selected,
|
| 8 |
+
update_channel_images,
|
| 9 |
+
update_plot_and_selector,
|
| 10 |
+
update_track_selector,
|
| 11 |
+
apply_preset,
|
| 12 |
+
build_channel_status_text,
|
| 13 |
+
PRESET_TRACKS,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def create_app():
|
| 18 |
+
"""Create and configure the Gradio application.
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
gr.Blocks: Configured Gradio demo
|
| 22 |
+
"""
|
| 23 |
+
# Initialize data first
|
| 24 |
+
adata, embeddings, all_emb = data.initialize_data()
|
| 25 |
+
|
| 26 |
+
with gr.Blocks(title="Cell Embedding Visualization") as demo:
|
| 27 |
+
gr.Markdown("# 🔬 DynaCLR: Cell Embedding Visualization")
|
| 28 |
+
gr.Markdown(
|
| 29 |
+
f"Exploring **{adata.shape[0]:,} cells** colored by infection status"
|
| 30 |
+
)
|
| 31 |
+
gr.Markdown(
|
| 32 |
+
'**Paper:** Hirata-Miyasaki E., Pradeep S., Liu Z., et al. "DynaCLR: Contrastive Learning of Cellular Dynamics with Temporal Regularization." '
|
| 33 |
+
"[arXiv:2410.11281](https://arxiv.org/abs/2410.11281) (2025)\n\n"
|
| 34 |
+
"**GitHub Repository:** [mehta-lab/VisCy](https://github.com/mehta-lab/viscy)"
|
| 35 |
+
)
|
| 36 |
+
gr.Markdown("---")
|
| 37 |
+
|
| 38 |
+
# Embedding plot section
|
| 39 |
+
gr.Markdown("## Embedding Plot")
|
| 40 |
+
with gr.Row():
|
| 41 |
+
embedding_x = gr.Dropdown(
|
| 42 |
+
choices=all_emb, value="PC1", label="X-axis", scale=1
|
| 43 |
+
)
|
| 44 |
+
embedding_y = gr.Dropdown(
|
| 45 |
+
choices=all_emb, value="PC2", label="Y-axis", scale=1
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
scatter_plot = gr.Plot(label="Embedding Scatter Plot")
|
| 49 |
+
|
| 50 |
+
gr.Markdown("**Legend:** 🟠 Infected · 🔵 Uninfected · ⚪ Unknown")
|
| 51 |
+
|
| 52 |
+
# Track viewer section
|
| 53 |
+
gr.Markdown("---")
|
| 54 |
+
gr.Markdown("## Track Inspector")
|
| 55 |
+
gr.Markdown("Select a FOV and track to view timeline and microscopy images")
|
| 56 |
+
|
| 57 |
+
# Presets section
|
| 58 |
+
gr.Markdown("### 🎯 Featured Tracks: Infection Transitions")
|
| 59 |
+
gr.Markdown("*Quick access to tracks showing dynamic infection state changes*")
|
| 60 |
+
|
| 61 |
+
preset_selector = gr.Dropdown(
|
| 62 |
+
choices=[p["name"] for p in PRESET_TRACKS],
|
| 63 |
+
label="Select a Preset Track",
|
| 64 |
+
info="These tracks show cells transitioning from uninfected to infected states",
|
| 65 |
+
value=None,
|
| 66 |
+
scale=2,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
with gr.Accordion("📖 Preset Details", open=False):
|
| 70 |
+
preset_info_md = "\n\n".join(
|
| 71 |
+
[
|
| 72 |
+
f"**{p['name']}**\n- {p['description']}\n- FOV: `{p['fov_name']}`"
|
| 73 |
+
for p in PRESET_TRACKS
|
| 74 |
+
]
|
| 75 |
+
)
|
| 76 |
+
gr.Markdown(preset_info_md)
|
| 77 |
+
|
| 78 |
+
gr.Markdown("### Manual Selection")
|
| 79 |
+
|
| 80 |
+
# Two-level selector: FOV → Track
|
| 81 |
+
with gr.Row():
|
| 82 |
+
fov_selector = gr.Dropdown(
|
| 83 |
+
choices=[],
|
| 84 |
+
label="Field of View (FOV)",
|
| 85 |
+
info="Select imaging field of view",
|
| 86 |
+
filterable=True,
|
| 87 |
+
scale=1,
|
| 88 |
+
)
|
| 89 |
+
track_selector = gr.Dropdown(
|
| 90 |
+
choices=[],
|
| 91 |
+
label="Track ID",
|
| 92 |
+
info="Annotated tracks only (filtered: has infected/uninfected cells)",
|
| 93 |
+
filterable=True,
|
| 94 |
+
scale=1,
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
# Channel Settings Accordion
|
| 98 |
+
with gr.Accordion("Channel Settings", open=False):
|
| 99 |
+
gr.Markdown("*Adjust contrast and opacity for each microscopy channel*")
|
| 100 |
+
|
| 101 |
+
# Phase3D channel
|
| 102 |
+
with gr.Group():
|
| 103 |
+
gr.Markdown("**Phase3D**")
|
| 104 |
+
show_phase = gr.Checkbox(label="Enable Phase3D", value=True)
|
| 105 |
+
with gr.Row():
|
| 106 |
+
phase_contrast = gr.Slider(
|
| 107 |
+
minimum=0, maximum=100, value=10, step=0.1, label="Min %"
|
| 108 |
+
)
|
| 109 |
+
phase_max_slider = gr.Slider(
|
| 110 |
+
minimum=0, maximum=100, value=90, step=0.1, label="Max %"
|
| 111 |
+
)
|
| 112 |
+
phase_opacity = gr.Slider(
|
| 113 |
+
minimum=0, maximum=1, value=1.0, step=0.05, label="Opacity"
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
# GFP channel
|
| 117 |
+
with gr.Group():
|
| 118 |
+
gr.Markdown("**GFP (Green)**")
|
| 119 |
+
show_gfp = gr.Checkbox(label="Enable GFP", value=True)
|
| 120 |
+
with gr.Row():
|
| 121 |
+
gfp_contrast = gr.Slider(
|
| 122 |
+
minimum=0, maximum=100, value=10, step=0.1, label="Min %"
|
| 123 |
+
)
|
| 124 |
+
gfp_max_slider = gr.Slider(
|
| 125 |
+
minimum=0, maximum=100, value=90, step=0.1, label="Max %"
|
| 126 |
+
)
|
| 127 |
+
gfp_opacity = gr.Slider(
|
| 128 |
+
minimum=0, maximum=1, value=1.0, step=0.05, label="Opacity"
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
# mCherry channel
|
| 132 |
+
with gr.Group():
|
| 133 |
+
gr.Markdown("**mCherry (Periwinkle)**")
|
| 134 |
+
show_mcherry = gr.Checkbox(label="Enable mCherry", value=True)
|
| 135 |
+
with gr.Row():
|
| 136 |
+
mcherry_contrast = gr.Slider(
|
| 137 |
+
minimum=0, maximum=100, value=10, step=0.1, label="Min %"
|
| 138 |
+
)
|
| 139 |
+
mcherry_max_slider = gr.Slider(
|
| 140 |
+
minimum=0, maximum=100, value=90, step=0.1, label="Max %"
|
| 141 |
+
)
|
| 142 |
+
mcherry_opacity = gr.Slider(
|
| 143 |
+
minimum=0, maximum=1, value=1.0, step=0.05, label="Opacity"
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# Channel Status Display - always visible
|
| 147 |
+
gr.Markdown("### Currently Displayed Channels")
|
| 148 |
+
channel_status = gr.Markdown(
|
| 149 |
+
value="🔬 **Phase3D** (structure/morphology) · 🟢 **GFP** (green fluorescence) · 🟣 **mCherry** (red/magenta fluorescence)",
|
| 150 |
+
elem_classes=["channel-status-info"],
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
# Image gallery
|
| 154 |
+
lineage_gallery = gr.Gallery(
|
| 155 |
+
label="Track Timeline",
|
| 156 |
+
show_label=True,
|
| 157 |
+
columns=8,
|
| 158 |
+
rows=2,
|
| 159 |
+
object_fit="contain",
|
| 160 |
+
height=400,
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
# Event wiring
|
| 164 |
+
# Connect embedding controls to plot and selector initialization
|
| 165 |
+
for component in [embedding_x, embedding_y]:
|
| 166 |
+
component.change(
|
| 167 |
+
fn=update_plot_and_selector,
|
| 168 |
+
inputs=[embedding_x, embedding_y],
|
| 169 |
+
outputs=[scatter_plot, fov_selector, track_selector],
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
# Connect preset selector to FOV and track selectors
|
| 173 |
+
# Using .then() to set both FOV and track without triggering intermediate events
|
| 174 |
+
preset_selector.select(
|
| 175 |
+
fn=apply_preset,
|
| 176 |
+
inputs=[preset_selector],
|
| 177 |
+
outputs=[fov_selector, track_selector],
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
# Connect FOV selector to track selector (cascading)
|
| 181 |
+
# Only triggers on manual FOV selection, not on preset updates
|
| 182 |
+
fov_selector.select(
|
| 183 |
+
fn=update_track_selector,
|
| 184 |
+
inputs=[fov_selector],
|
| 185 |
+
outputs=[track_selector],
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
# Inputs for cell selection (full update)
|
| 189 |
+
cell_selection_inputs = [
|
| 190 |
+
fov_selector,
|
| 191 |
+
track_selector,
|
| 192 |
+
show_phase,
|
| 193 |
+
show_gfp,
|
| 194 |
+
show_mcherry,
|
| 195 |
+
phase_contrast,
|
| 196 |
+
phase_max_slider,
|
| 197 |
+
phase_opacity,
|
| 198 |
+
gfp_contrast,
|
| 199 |
+
gfp_max_slider,
|
| 200 |
+
gfp_opacity,
|
| 201 |
+
mcherry_contrast,
|
| 202 |
+
mcherry_max_slider,
|
| 203 |
+
mcherry_opacity,
|
| 204 |
+
embedding_x,
|
| 205 |
+
embedding_y,
|
| 206 |
+
]
|
| 207 |
+
|
| 208 |
+
# Inputs for channel updates (lightweight)
|
| 209 |
+
channel_only_inputs = [
|
| 210 |
+
fov_selector,
|
| 211 |
+
track_selector,
|
| 212 |
+
show_phase,
|
| 213 |
+
show_gfp,
|
| 214 |
+
show_mcherry,
|
| 215 |
+
phase_contrast,
|
| 216 |
+
phase_max_slider,
|
| 217 |
+
phase_opacity,
|
| 218 |
+
gfp_contrast,
|
| 219 |
+
gfp_max_slider,
|
| 220 |
+
gfp_opacity,
|
| 221 |
+
mcherry_contrast,
|
| 222 |
+
mcherry_max_slider,
|
| 223 |
+
mcherry_opacity,
|
| 224 |
+
]
|
| 225 |
+
|
| 226 |
+
# Info display for track metadata
|
| 227 |
+
lineage_info = gr.Markdown(
|
| 228 |
+
value="Select a FOV and track to view the timeline",
|
| 229 |
+
elem_classes=["track-info"],
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
# Connect track selector to full update
|
| 233 |
+
track_selector.change(
|
| 234 |
+
fn=on_cell_selected,
|
| 235 |
+
inputs=cell_selection_inputs,
|
| 236 |
+
outputs=[lineage_gallery, lineage_info, scatter_plot],
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
# Connect channel controls to update gallery and status display
|
| 240 |
+
for control in [
|
| 241 |
+
show_phase,
|
| 242 |
+
show_gfp,
|
| 243 |
+
show_mcherry,
|
| 244 |
+
phase_contrast,
|
| 245 |
+
phase_max_slider,
|
| 246 |
+
phase_opacity,
|
| 247 |
+
gfp_contrast,
|
| 248 |
+
gfp_max_slider,
|
| 249 |
+
gfp_opacity,
|
| 250 |
+
mcherry_contrast,
|
| 251 |
+
mcherry_max_slider,
|
| 252 |
+
mcherry_opacity,
|
| 253 |
+
]:
|
| 254 |
+
control.change(
|
| 255 |
+
fn=update_channel_images,
|
| 256 |
+
inputs=channel_only_inputs,
|
| 257 |
+
outputs=[lineage_gallery, channel_status],
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
# Initialize plot and selectors on load
|
| 261 |
+
demo.load(
|
| 262 |
+
fn=update_plot_and_selector,
|
| 263 |
+
inputs=[embedding_x, embedding_y],
|
| 264 |
+
outputs=[scatter_plot, fov_selector, track_selector],
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
return demo
|
requirements.txt
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core dependencies for DynaCLR Visualization on Hugging Face Spaces
|
| 2 |
+
|
| 3 |
+
# Gradio for the web interface
|
| 4 |
+
gradio==6.0.2
|
| 5 |
+
|
| 6 |
+
# Data handling and visualization
|
| 7 |
+
plotly>=6.3.0
|
| 8 |
+
pandas>=2.0.0
|
| 9 |
+
numpy>=1.24.0
|
| 10 |
+
|
| 11 |
+
# AnnData for single-cell analysis
|
| 12 |
+
anndata>=0.10.0
|
| 13 |
+
|
| 14 |
+
# OME-Zarr and image I/O
|
| 15 |
+
iohub[tensorstore]>=0.3.0a2
|
| 16 |
+
|
| 17 |
+
# Image processing
|
| 18 |
+
scikit-image>=0.22.0
|
| 19 |
+
|
| 20 |
+
# Zarr for data storage
|
| 21 |
+
zarr>=2.17.0
|
| 22 |
+
|
| 23 |
+
# HuggingFace Hub for dataset loading
|
| 24 |
+
huggingface_hub>=1.1.7
|
| 25 |
+
|
| 26 |
+
# Additional dependencies for data handling
|
| 27 |
+
xarray<=2025.9.0
|
| 28 |
+
datashader>=0.18.2
|
| 29 |
+
matplotlib>=3.10.0
|