Upload 29 files
Browse files- LICENSE +21 -0
- README.md +107 -0
- bin/__init__.py +0 -0
- bin/train_cellpose.py +31 -0
- bin/train_cellpose_sam.py +54 -0
- detect.py +67 -0
- detect_sam.py +49 -0
- docs/spinal_cord_cell_segmentation.md +1117 -0
- generate_training_data.py +48 -0
- main.py +60 -0
- model/__init__.py +0 -0
- model/run_cellpose.py +104 -0
- model/run_cellpose_sam.py +34 -0
- notebooks/trained_model_prediction.ipynb +0 -0
- pyproject.toml +14 -0
- requirements.txt +81 -0
- streamlit_app.py +74 -0
- utils/__init__.py +0 -0
- utils/constants.py +41 -0
- utils/generate_combine_masks.py +172 -0
- utils/generate_geojson_qp_mask.py +74 -0
- utils/generate_image_overlays.py +61 -0
- utils/generate_masks.py +109 -0
- utils/generate_metrics.py +0 -0
- utils/generate_plots.py +175 -0
- utils/generate_pngs.py +74 -0
- utils/generate_split_images.py +93 -0
- utils/generate_training_dataset.py +64 -0
- utils/generate_training_split_img_masks.py +86 -0
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Nikhil I
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Spinal Cord Segmentation Pipeline
|
| 2 |
+
|
| 3 |
+
Automated, end‑to‑end processing and segmentation of spinal‑cord microscopy images with [Cellpose](https://cellpose.readthedocs.io/).
|
| 4 |
+
|
| 5 |
+
## Overview
|
| 6 |
+
|
| 7 |
+
This repository provides a **turn‑key workflow** for turning raw histological slides of the spinal cord (TIFF) into high‑quality, full‑resolution segmentation masks—in a *single command*.
|
| 8 |
+
|
| 9 |
+
## Key Features
|
| 10 |
+
|
| 11 |
+
| Stage | Purpose |
|
| 12 |
+
|-------|---------|
|
| 13 |
+
| **TIFF → PNG conversion** | Converts raw `.tiff` slides to compressed `.png`, with optional down‑scaling to speed up processing. |
|
| 14 |
+
| **Smart tiling** | Splits very large images into manageable tiles that fit comfortably in GPU/CPU memory. |
|
| 15 |
+
| **Cellpose inference** | Runs the *cyto3* (default) or any other Cellpose model on every tile. |
|
| 16 |
+
| **Mask stitching** | Re‑assembles the individual tile masks into a single, full‑resolution segmentation mask. |
|
| 17 |
+
|
| 18 |
+
## Requirements
|
| 19 |
+
|
| 20 |
+
* Python **3.9+**
|
| 21 |
+
* GPU‑enabled PyTorch build (optional but recommended)
|
| 22 |
+
* Dependencies (installed automatically via `requirements.txt`):
|
| 23 |
+
* `cellpose==3.1.1.1`
|
| 24 |
+
* `opencv‑python`
|
| 25 |
+
* `numpy`
|
| 26 |
+
* `pillow`
|
| 27 |
+
* `tifffile`
|
| 28 |
+
|
| 29 |
+
## Installation
|
| 30 |
+
|
| 31 |
+
```bash
|
| 32 |
+
# Clone the repository
|
| 33 |
+
git clone https://github.com/your‑username/spinal‑cord‑segmentation.git
|
| 34 |
+
cd spinal‑cord‑segmentation
|
| 35 |
+
|
| 36 |
+
# Create / activate a virtualenv (optional but recommended)
|
| 37 |
+
python -m venv .venv
|
| 38 |
+
source .venv/bin/activate # Windows: .venv\Scripts\activate
|
| 39 |
+
|
| 40 |
+
# Install Python dependencies
|
| 41 |
+
pip install -r requirements.txt
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
## Quick Start
|
| 45 |
+
|
| 46 |
+
1. **Place** your raw `.tiff` images in `data/input/` (or adjust the paths in `bin/constants.py`).
|
| 47 |
+
2. **Run** the pipeline:
|
| 48 |
+
|
| 49 |
+
```bash
|
| 50 |
+
python main.py
|
| 51 |
+
```
|
| 52 |
+
3. **Collect** your results:
|
| 53 |
+
* PNG conversions → `data/png/`
|
| 54 |
+
* Split tiles → `data/tiles/`
|
| 55 |
+
* Cellpose masks → `data/masks/`
|
| 56 |
+
* Stitched masks → `data/output/`
|
| 57 |
+
|
| 58 |
+
## Detailed Workflow
|
| 59 |
+
|
| 60 |
+
```mermaid
|
| 61 |
+
flowchart LR
|
| 62 |
+
A[TIFF images] --> B[generate_pngs.py]:::step
|
| 63 |
+
classDef step fill:#fafafa,stroke:#333,stroke-width:1px;
|
| 64 |
+
B --> C[generate_split_images.py]:::step
|
| 65 |
+
C --> D[run_cellpose.py]:::step
|
| 66 |
+
D --> E[generate_masks.py]:::step
|
| 67 |
+
E --> F[Final segmentation]:::step
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
*All paths, tile overlap, and Cellpose parameters are configurable in* **`bin/constants.py`**.
|
| 71 |
+
|
| 72 |
+
## Project Layout
|
| 73 |
+
|
| 74 |
+
```
|
| 75 |
+
.
|
| 76 |
+
├── main.py # Orchestrates the full pipeline
|
| 77 |
+
├── bin/
|
| 78 |
+
│ ├── constants.py # Centralised paths & tunables
|
| 79 |
+
│ ├── generate_pngs.py # TIFF → PNG converter
|
| 80 |
+
│ ├── generate_split_images.py
|
| 81 |
+
│ └── generate_masks.py
|
| 82 |
+
├── model/
|
| 83 |
+
│ └── run_cellpose.py # Wrapper around Cellpose API
|
| 84 |
+
├── requirements.txt
|
| 85 |
+
└── LICENSE
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
## License
|
| 89 |
+
|
| 90 |
+
Distributed under the terms of the **MIT License**. See `LICENSE` for full text.
|
| 91 |
+
|
| 92 |
+
## Contributing
|
| 93 |
+
|
| 94 |
+
Contributions, issues and feature requests are welcome! Please open an issue or submit a pull request — and ensure your code passes `flake8`/`black` checks and includes appropriate tests.
|
| 95 |
+
|
| 96 |
+
## Citation
|
| 97 |
+
|
| 98 |
+
If you use this pipeline in your research, please cite *Cellpose* **and** this repository:
|
| 99 |
+
|
| 100 |
+
```text
|
| 101 |
+
@article{stringer_cellpose_2021,
|
| 102 |
+
title = {Cellpose: a generalist algorithm for cellular segmentation},
|
| 103 |
+
author = {Stringer, Carsen and Pachitariu, Marius},
|
| 104 |
+
journal = {Nature Methods},
|
| 105 |
+
year = {2021}
|
| 106 |
+
}
|
| 107 |
+
```
|
bin/__init__.py
ADDED
|
File without changes
|
bin/train_cellpose.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cellpose import io, models, train
|
| 2 |
+
io.logger_setup()
|
| 3 |
+
|
| 4 |
+
output = io.load_train_test_data(train_dir, test_dir, image_filter="_img", mask_filter="_masks", look_one_level_down=False)
|
| 5 |
+
images, labels, image_names, test_images, test_labels, image_names_test = output
|
| 6 |
+
model = models.CellposeModel(gpu=True)
|
| 7 |
+
|
| 8 |
+
model_path, train_losses, test_losses = train.train_seg(model.net, train_data=images, train_labels=labels, test_data=test_images, test_labels=test_labels, weight_decay=0.1, learning_rate=1e-5, n_epochs=100, model_name="my_new_model")
|
| 9 |
+
|
| 10 |
+
# training
|
| 11 |
+
# python -m cellpose --train --dir /Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/train
|
| 12 |
+
# --learning_rate 0.00001 --weight_decay 0.1 --n_epochs 100 --train_batch_size 1
|
| 13 |
+
|
| 14 |
+
# python -m cellpose \
|
| 15 |
+
# --train \
|
| 16 |
+
# --dir /Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/train_old \
|
| 17 |
+
# --learning_rate 1e-5 \
|
| 18 |
+
# --weight_decay 0.1 \
|
| 19 |
+
# --n_epochs 100 \
|
| 20 |
+
# --batch_size 1 \
|
| 21 |
+
# --verbose
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# python -m cellpose \
|
| 25 |
+
# --train \
|
| 26 |
+
# --dir /Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/train \
|
| 27 |
+
# --learning_rate 1e-5 \
|
| 28 |
+
# --weight_decay 0.1 \
|
| 29 |
+
# --n_epochs 100 \
|
| 30 |
+
# --batch_size 1 \
|
| 31 |
+
# --verbose
|
bin/train_cellpose_sam.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# imports
|
| 2 |
+
import numpy as np
|
| 3 |
+
from cellpose import models, core, io, plot, train
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from tqdm import trange
|
| 6 |
+
import matplotlib.pyplot as plt
|
| 7 |
+
|
| 8 |
+
io.logger_setup() # run this to get printing of progress
|
| 9 |
+
|
| 10 |
+
train_dir = "/mnt/WorkingDos/cellpose_sam/8_hdrg_jayden_dataset_data"
|
| 11 |
+
model_name = "cp_sam_hdrg_topoint_model"
|
| 12 |
+
|
| 13 |
+
def train_cp_sam_model(train_dir, model_name, n_epochs=100, learning_rate=1e-5, weight_decay=0.1, batch_size=1):
|
| 14 |
+
"""
|
| 15 |
+
Train a Cellpose model using the SAM (Segment Anything) algorithm.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
train_dir (str): Path to the directory containing the training data.
|
| 19 |
+
model_name (str): Name of the model to be trained.
|
| 20 |
+
n_epochs (int): Number of epochs to train the model.
|
| 21 |
+
learning_rate (float): Learning rate for the optimizer.
|
| 22 |
+
weight_decay (float): Weight decay for the optimizer.
|
| 23 |
+
batch_size (int): Batch size for training.
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
None
|
| 27 |
+
"""
|
| 28 |
+
# Check if colab notebook instance has GPU access
|
| 29 |
+
if core.use_gpu()==False:raise ImportError("No GPU access, change your runtime")
|
| 30 |
+
|
| 31 |
+
model = models.CellposeModel(gpu=True)
|
| 32 |
+
|
| 33 |
+
if not Path(train_dir).exists():raise FileNotFoundError("directory does not exist")
|
| 34 |
+
|
| 35 |
+
test_dir = None # optionally you can specify a directory with test files
|
| 36 |
+
|
| 37 |
+
# *** change to your mask extension ***
|
| 38 |
+
# masks_ext = "_seg.npy"
|
| 39 |
+
# ^ assumes images from Cellpose GUI, if labels are tiffs, then "_masks.tif"
|
| 40 |
+
|
| 41 |
+
# list all files
|
| 42 |
+
masks_ext = "_masks"
|
| 43 |
+
files = [f for f in Path(train_dir).glob("*") if "_masks" not in f.name and "_flows" not in f.name and "_seg" not in f.name]
|
| 44 |
+
|
| 45 |
+
if(len(files)==0):raise FileNotFoundError("no files found, did you specify the correct folder and extension?")
|
| 46 |
+
else:print(f"{len(files)} files in folder:")
|
| 47 |
+
|
| 48 |
+
output = io.load_train_test_data(train_dir, test_dir, mask_filter=masks_ext)
|
| 49 |
+
train_data, train_labels, _, test_data, test_labels, _ = output
|
| 50 |
+
new_model_path, train_losses, test_losses = train.train_seg(model.net, train_data=train_data, train_labels=train_labels, batch_size=batch_size, n_epochs=n_epochs, learning_rate=learning_rate, weight_decay=weight_decay, nimg_per_epoch=max(2, len(train_data)), model_name=model_name)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
if __name__ == "__main__":
|
| 54 |
+
train_cp_sam_model(train_dir, model_name, n_epochs, learning_rate, weight_decay, batch_size)
|
detect.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# imports
|
| 2 |
+
import logging, numpy as np, matplotlib.pyplot as plt, os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from model.run_cellpose import CellposeBatchProcessor
|
| 5 |
+
from utils.constants import *
|
| 6 |
+
from skimage.measure import label
|
| 7 |
+
from tifffile import imwrite
|
| 8 |
+
from utils.generate_masks import MaskStitcher
|
| 9 |
+
from PIL import Image
|
| 10 |
+
|
| 11 |
+
# # cellpose - masks
|
| 12 |
+
# setup_logging(logging.INFO)
|
| 13 |
+
# processor = CellposeBatchProcessor(input_dir=Path("/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/cellpose_test"),
|
| 14 |
+
# output_dir=Path("/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/cellpose_outs"),
|
| 15 |
+
# model_name="cyto3_restore", bsize=1024, overlap=0.15, batch_size=6, gpu=0, channels=(2,0), diameter=CELL_DIAMETER)
|
| 16 |
+
# processor.process_all()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
setup_logging(logging.INFO)
|
| 20 |
+
processor = CellposeBatchProcessor(
|
| 21 |
+
input_dir = Path("/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/cellpose_test"),
|
| 22 |
+
output_dir = Path("/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/cellpose_outs"),
|
| 23 |
+
# point to the folder that contains cellpose_model.pth + .yaml
|
| 24 |
+
model_name = "/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/train/models/cellpose_1746568542.462492",
|
| 25 |
+
bsize = 1024,
|
| 26 |
+
overlap = 0.15,
|
| 27 |
+
batch_size = 6,
|
| 28 |
+
gpu = 0, # set to –1 if you must run CPU
|
| 29 |
+
channels = (2, 0),# or whatever channels you trained with
|
| 30 |
+
diameter = CELL_DIAMETER # or None to auto‑scale
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
processor.process_all()
|
| 34 |
+
|
| 35 |
+
## - x - x - x - x - x - x - x - x - x - x - x - x
|
| 36 |
+
RANGE_CELL_DIAMETER = list(range(20, 60, 5))
|
| 37 |
+
INPUT_DIR = Path("/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/cellpose_test")
|
| 38 |
+
OUTPUT_DIR = Path("/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/cellpose_outs")
|
| 39 |
+
|
| 40 |
+
# for CELL_DIAMETER in RANGE_CELL_DIAMETER:
|
| 41 |
+
# processor = CellposeBatchProcessor(input_dir=INPUT_DIR, output_dir=OUTPUT_DIR / f"{CELL_DIAMETER}",
|
| 42 |
+
# model_name="cyto3_restore", bsize=1024, overlap=0.15, batch_size=6,
|
| 43 |
+
# gpu=0, channels=(2,0), diameter=CELL_DIAMETER)
|
| 44 |
+
# processor.process_all()
|
| 45 |
+
|
| 46 |
+
MASK_SUBDIR = "masks"
|
| 47 |
+
STITCHED_DIR = OUTPUT_DIR / "stitched"
|
| 48 |
+
STITCHED_DIR.mkdir(parents=True, exist_ok=True)
|
| 49 |
+
first_masks = sorted((OUTPUT_DIR / str(RANGE_CELL_DIAMETER[0]) / MASK_SUBDIR).glob("*.png"))
|
| 50 |
+
|
| 51 |
+
for mask_path in first_masks:
|
| 52 |
+
name = mask_path.name
|
| 53 |
+
union_mask = None
|
| 54 |
+
for d in RANGE_CELL_DIAMETER:
|
| 55 |
+
p = OUTPUT_DIR / str(d) / MASK_SUBDIR / name
|
| 56 |
+
arr = np.array(Image.open(p)) > 0
|
| 57 |
+
if union_mask is None:
|
| 58 |
+
union_mask = arr
|
| 59 |
+
else:
|
| 60 |
+
union_mask |= arr
|
| 61 |
+
|
| 62 |
+
union_lbl = label(union_mask)
|
| 63 |
+
out_tif = STITCHED_DIR / name.replace(".png", "tif")
|
| 64 |
+
imwrite(out_tif, union_lbl.astype(np.uint16))
|
| 65 |
+
from skimage.io import imsave
|
| 66 |
+
imsave(STITCHED_DIR / name, (union_mask * 255).astype(np.uint8))
|
| 67 |
+
print(f"Stitched: {name} → {out_tif.name}")
|
detect_sam.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# imports
|
| 2 |
+
import logging, numpy as np, matplotlib.pyplot as plt, os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from model.run_cellpose import CellposeBatchProcessor
|
| 5 |
+
from utils.constants import *
|
| 6 |
+
from skimage.measure import label
|
| 7 |
+
from tifffile import imwrite
|
| 8 |
+
from utils.generate_masks import MaskStitcher
|
| 9 |
+
from PIL import Image
|
| 10 |
+
from cellpose.io import imread
|
| 11 |
+
from cellpose import models, core, io, plot
|
| 12 |
+
from tqdm import trange
|
| 13 |
+
from natsort import natsorted
|
| 14 |
+
|
| 15 |
+
image_ext = ".tif"
|
| 16 |
+
masks_ext = ".png" if image_ext == ".png" else ".tif"
|
| 17 |
+
flow_threshold = 0.8
|
| 18 |
+
cellprob_threshold = 0.0
|
| 19 |
+
tile_norm_blocksize = 0
|
| 20 |
+
|
| 21 |
+
if core.use_gpu()==False:
|
| 22 |
+
raise ImportError("No GPU access, change your runtime")
|
| 23 |
+
|
| 24 |
+
model = models.CellposeModel(pretrained_model="/Users/discovery/Desktop/spinal_cord_segmentation/model/cellpose_sam_neun", gpu=True)
|
| 25 |
+
|
| 26 |
+
# print(models.model_path("/Users/discovery/Desktop/spinal_cord_segmentation/model/cellpose_sam_neun"))
|
| 27 |
+
|
| 28 |
+
input_dir = Path("/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/cellpose_imgs/data")
|
| 29 |
+
output_dir = Path("/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/cellpose_outs")
|
| 30 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 31 |
+
|
| 32 |
+
files = natsorted([f for f in input_dir.glob("*"+image_ext) if "_masks" not in f.name and "_flows" not in f.name])
|
| 33 |
+
|
| 34 |
+
if(len(files)==0):
|
| 35 |
+
raise FileNotFoundError("no image files found, did you specify the correct folder and extension?")
|
| 36 |
+
else:
|
| 37 |
+
print(f"{len(files)} images in folder:")
|
| 38 |
+
|
| 39 |
+
# for f in files:
|
| 40 |
+
# print(f)
|
| 41 |
+
|
| 42 |
+
imgs = [io.imread(files[i]) for i in trange(len(files))]
|
| 43 |
+
|
| 44 |
+
masks, flows, styles = model.eval(imgs, batch_size=32, flow_threshold=flow_threshold, cellprob_threshold=cellprob_threshold, normalize={"tile_norm_blocksize": tile_norm_blocksize})
|
| 45 |
+
|
| 46 |
+
print("saving masks")
|
| 47 |
+
for i in trange(len(files)):
|
| 48 |
+
f = files[i]
|
| 49 |
+
io.imsave(output_dir / (f.stem + "_pred_masks" + masks_ext), masks[i])
|
docs/spinal_cord_cell_segmentation.md
ADDED
|
@@ -0,0 +1,1117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Wiki Documentation for https://github.com/unikill066/spinal_cord_cell_segmentation
|
| 2 |
+
|
| 3 |
+
Generated on: 2025-06-12 15:01:36
|
| 4 |
+
|
| 5 |
+
## Table of Contents
|
| 6 |
+
|
| 7 |
+
- [Home Page](#page-1)
|
| 8 |
+
- [System Architecture](#page-2)
|
| 9 |
+
- [Core Features](#page-3)
|
| 10 |
+
- [Data Management/Flow](#page-4)
|
| 11 |
+
- [Frontend Components](#page-5)
|
| 12 |
+
- [Backend Systems](#page-6)
|
| 13 |
+
- [Model Integration](#page-7)
|
| 14 |
+
- [Deployment/Infrastructure](#page-8)
|
| 15 |
+
- [Extensibility and Customization](#page-9)
|
| 16 |
+
|
| 17 |
+
<a id='page-1'></a>
|
| 18 |
+
|
| 19 |
+
## Home Page
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
<details>
|
| 26 |
+
<summary>Relevant source files</summary>
|
| 27 |
+
|
| 28 |
+
- README.md
|
| 29 |
+
- utils/generate_split_images.py
|
| 30 |
+
- utils/generate_pngs.py
|
| 31 |
+
- model/run_cellpose.py
|
| 32 |
+
- generate_training_data.py
|
| 33 |
+
</details>
|
| 34 |
+
|
| 35 |
+
# Home Page
|
| 36 |
+
|
| 37 |
+
This page provides an overview of the core functionalities and architecture of the Spinal Cord Cell Segmentation project. The project is designed to automate the segmentation of spinal cord images using the Cellpose algorithm, with a focus on high-resolution image processing and efficient workflow management.
|
| 38 |
+
|
| 39 |
+
## Introduction
|
| 40 |
+
|
| 41 |
+
The Spinal Cord Cell Segmentation project offers a turnkey workflow for converting raw histological slides into high-quality segmentation masks. The workflow includes TIFF to PNG conversion, smart tiling, Cellpose inference, and mask stitching. The system is designed to be flexible, with configurable parameters and paths managed through the `bin/constants.py` file. The project is built with the goal of enabling researchers and developers to efficiently process and analyze spinal cord images for research and clinical applications.
|
| 42 |
+
|
| 43 |
+
## Detailed Sections
|
| 44 |
+
|
| 45 |
+
### Architecture and Components
|
| 46 |
+
|
| 47 |
+
The project is structured with a clear modular architecture, consisting of several key components:
|
| 48 |
+
|
| 49 |
+
- **Main.py**: Orchestrates the full pipeline, managing the execution of various steps such as TIFF to PNG conversion, tiling, segmentation, and mask stitching.
|
| 50 |
+
- **bin/constants.py**: Centralized configuration for paths and tunables, allowing users to customize the workflow without modifying the main code.
|
| 51 |
+
- **utils/generate_split_images.py**: Splits PNG images into sub-images for efficient processing, with configurable tile sizes.
|
| 52 |
+
- **utils/generate_pngs.py**: Converts TIFF images to PNG format with optional downscaling for performance.
|
| 53 |
+
- **model/run_cellpose.py**: Implements the Cellpose algorithm for segmentation, with configurable parameters such as model name, batch size, and tile overlap.
|
| 54 |
+
- **generate_training_data.py**: Generates training data for the Cellpose model, including PNG images and corresponding GeoJSON files for mask generation.
|
| 55 |
+
|
| 56 |
+
### Key Functions and Classes
|
| 57 |
+
|
| 58 |
+
- **CellposeBatchProcessor**: A class that processes a directory of images using the Cellpose algorithm, saving masks, previews, and segmentation arrays into designated directories.
|
| 59 |
+
- **ImageSplitter**: A class that splits PNG images into sub-images, with configurable tile sizes.
|
| 60 |
+
- **MaskStitcher**: A class that stitches tiled .npy masks into full-size masks.
|
| 61 |
+
- **OverlayGenerator**: A class that creates overlays of original PNGs with their corresponding masks and generates side-by-side comparison mosaics.
|
| 62 |
+
|
| 63 |
+
### Mermaid Diagrams
|
| 64 |
+
|
| 65 |
+
```mermaid
|
| 66 |
+
flowchart TD
|
| 67 |
+
A[TIFF images] --> B[generate_pngs.py]
|
| 68 |
+
B --> C[generate_split_images.py]
|
| 69 |
+
C --> D[run_cellpose.py]
|
| 70 |
+
D --> E[generate_masks.py]
|
| 71 |
+
E --> F[Final segmentation]
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
This diagram shows the flow of the pipeline from raw TIFF images to final segmentation masks.
|
| 75 |
+
|
| 76 |
+
### Tables
|
| 77 |
+
|
| 78 |
+
| Component | Description |
|
| 79 |
+
|----------|-------------|
|
| 80 |
+
| `bin/constants.py` | Centralized configuration for paths and tunables |
|
| 81 |
+
| `utils/generate_split_images.py` | Splits PNG images into sub-images |
|
| 82 |
+
| `utils/generate_pngs.py` | Converts TIFF images to PNG format |
|
| 83 |
+
| `model/run_cellpose.py` | Implements the Cellpose algorithm for segmentation |
|
| 84 |
+
| `generate_training_data.py` | Generates training data for the Cellpose model |
|
| 85 |
+
|
| 86 |
+
### Code Snippets
|
| 87 |
+
|
| 88 |
+
```python
|
| 89 |
+
# Example of a CellposeBatchProcessor call
|
| 90 |
+
processor = CellposeBatchProcessor(
|
| 91 |
+
input_dir=SPLIT_IMAGES_DIR,
|
| 92 |
+
output_dir=CELLPOSE_MASKS_DIR,
|
| 93 |
+
model_name="cyto3_restore",
|
| 94 |
+
bsize=2048,
|
| 95 |
+
overlap=0.15,
|
| 96 |
+
batch_size=6,
|
| 97 |
+
gpu=0,
|
| 98 |
+
channels=(1, 0)
|
| 99 |
+
)
|
| 100 |
+
processor.process_all()
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
### Source Citations
|
| 104 |
+
|
| 105 |
+
- `README.md`: Overview of the project and its components
|
| 106 |
+
- `utils/generate_split_images.py`: Implementation of image splitting
|
| 107 |
+
- `utils/generate_pngs.py`: Implementation of TIFF to PNG conversion
|
| 108 |
+
- `model/run_cellpose.py`: Implementation of the Cellpose algorithm
|
| 109 |
+
- `generate_training_data.py`: Implementation of training data generation
|
| 110 |
+
|
| 111 |
+
This page provides a comprehensive overview of the Spinal Cord Cell Segmentation project, focusing on its architecture, components, and key functionalities. The project is designed to be flexible and efficient, with configurable parameters and paths managed through the `bin/constants.py` file. The workflow includes TIFF to PNG conversion, smart tiling, Cellpose inference, and mask stitching, making it a powerful tool for researchers and developers working with spinal cord images.
|
| 112 |
+
|
| 113 |
+
---
|
| 114 |
+
|
| 115 |
+
<a id='page-2'></a>
|
| 116 |
+
|
| 117 |
+
## System Architecture
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
<details>
|
| 124 |
+
<summary>Relevant source files</summary>
|
| 125 |
+
|
| 126 |
+
- README.md
|
| 127 |
+
- utils/constants.py
|
| 128 |
+
- model/run_cellpose.py
|
| 129 |
+
- utils/generate_pngs.py
|
| 130 |
+
- utils/generate_split_images.py
|
| 131 |
+
</details>
|
| 132 |
+
|
| 133 |
+
# System Architecture
|
| 134 |
+
|
| 135 |
+
This system architecture provides an end-to-end pipeline for segmenting spinal cord images using Cellpose. The architecture is designed to be modular, configurable, and efficient, with all components centralized in the `bin/` directory.
|
| 136 |
+
|
| 137 |
+
## Introduction
|
| 138 |
+
|
| 139 |
+
The system is built around a central orchestrator `main.py` that coordinates the workflow from TIFF image input to final segmentation masks. The architecture is composed of several key components:
|
| 140 |
+
|
| 141 |
+
- **TIFF → PNG Conversion**: Converts raw TIFF images to compressed PNGs with optional downscaling.
|
| 142 |
+
- **Smart Tiling**: Splits large images into manageable tiles for GPU/CPU processing.
|
| 143 |
+
- **Cellpose Inference**: Runs the cyto3 model or any other Cellpose model on each tile.
|
| 144 |
+
- **Mask Stitching**: Re-assembles tile masks into a full-resolution segmentation mask.
|
| 145 |
+
|
| 146 |
+
All paths, tile overlap, and Cellpose parameters are configurable in `bin/constants.py`.
|
| 147 |
+
|
| 148 |
+
## Detailed Sections
|
| 149 |
+
|
| 150 |
+
### 1. Pipeline Overview
|
| 151 |
+
|
| 152 |
+
The pipeline is structured as a series of steps that are executed in a single command:
|
| 153 |
+
|
| 154 |
+
```mermaid
|
| 155 |
+
flowchart TD
|
| 156 |
+
A[TIFF images] --> B[generate_pngs.py]:::step
|
| 157 |
+
B --> C[generate_split_images.py]:::step
|
| 158 |
+
C --> D[run_cellpose.py]:::step
|
| 159 |
+
D --> E[generate_masks.py]:::step
|
| 160 |
+
E --> F[Final segmentation]:::step
|
| 161 |
+
```
|
| 162 |
+
|
| 163 |
+
### 2. Core Components
|
| 164 |
+
|
| 165 |
+
#### 2.1. Orchestration
|
| 166 |
+
|
| 167 |
+
The orchestration is handled by `main.py`, which loads the configuration from `bin/constants.py` and runs the pipeline. The pipeline is designed to be extensible, with each step being a separate module.
|
| 168 |
+
|
| 169 |
+
#### 2.2. Image Processing
|
| 170 |
+
|
| 171 |
+
The `generate_pngs.py` module converts TIFF images to PNGs with optional downscaling. The `generate_split_images.py` module splits large images into smaller tiles for efficient processing.
|
| 172 |
+
|
| 173 |
+
#### 2.3. Cellpose Inference
|
| 174 |
+
|
| 175 |
+
The `run_cellpose.py` module is a wrapper around the Cellpose API. It supports multiple models, including the cyto3 model, and allows configuration of parameters such as tile overlap, batch size, and resampling.
|
| 176 |
+
|
| 177 |
+
#### 2.4. Mask Stitching
|
| 178 |
+
|
| 179 |
+
The `generate_masks.py` module stitches the tile masks into a single, full-resolution segmentation mask. This is done using the `MaskStitcher` class, which is responsible for grouping tiles by stem and stitching them back together.
|
| 180 |
+
|
| 181 |
+
### 3. Mermaid Diagrams
|
| 182 |
+
|
| 183 |
+
```mermaid
|
| 184 |
+
graph TD
|
| 185 |
+
A[TIFF images] --> B[generate_pngs.py]:::step
|
| 186 |
+
B --> C[generate_split_images.py]:::step
|
| 187 |
+
C --> D[run_cellpose.py]:::step
|
| 188 |
+
D --> E[generate_masks.py]:::step
|
| 189 |
+
E --> F[Final segmentation]:::step
|
| 190 |
+
```
|
| 191 |
+
|
| 192 |
+
### 4. Tables
|
| 193 |
+
|
| 194 |
+
#### 4.1. Configuration Options
|
| 195 |
+
|
| 196 |
+
| Configuration | Type | Default |
|
| 197 |
+
|---------------|------|---------|
|
| 198 |
+
| `constants.py` | File | Default values |
|
| 199 |
+
| `run_cellpose.py` | Module | Default parameters |
|
| 200 |
+
|
| 201 |
+
#### 4.2. Key Features
|
| 202 |
+
|
| 203 |
+
| Feature | Description |
|
| 204 |
+
|--------|-------------|
|
| 205 |
+
| TIFF → PNG conversion | Converts raw TIFF images to compressed PNGs |
|
| 206 |
+
| Smart tiling | Splits large images into manageable tiles |
|
| 207 |
+
| Cellpose inference | Runs the cyto3 model or any other Cellpose model |
|
| 208 |
+
| Mask stitching | Re-assembles tile masks into a full-resolution segmentation mask |
|
| 209 |
+
|
| 210 |
+
### 5. Code Snippets
|
| 211 |
+
|
| 212 |
+
```python
|
| 213 |
+
# Example of a configuration in constants.py
|
| 214 |
+
IMAGE_INPUT_DIR = Path("data/input")
|
| 215 |
+
IMAGE_OUTPUT_DIR = Path("data/output")
|
| 216 |
+
```
|
| 217 |
+
|
| 218 |
+
```python
|
| 219 |
+
# Example of a Cellpose inference in run_cellpose.py
|
| 220 |
+
model = models.CellposeModel(gpu=True, pretrained_model=model_path)
|
| 221 |
+
```
|
| 222 |
+
|
| 223 |
+
### 6. Source Citations
|
| 224 |
+
|
| 225 |
+
- `README.md`: Overview of the project and its components.
|
| 226 |
+
- `utils/constants.py`: Configuration parameters and paths.
|
| 227 |
+
- `model/run_cellpose.py`: Core Cellpose inference logic.
|
| 228 |
+
- `utils/generate_pngs.py`: TIFF to PNG conversion.
|
| 229 |
+
- `utils/generate_split_images.py`: Image splitting and tiling.
|
| 230 |
+
|
| 231 |
+
---
|
| 232 |
+
|
| 233 |
+
<a id='page-3'></a>
|
| 234 |
+
|
| 235 |
+
## Core Features
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
<details>
|
| 242 |
+
<summary>Relevant source files</summary>
|
| 243 |
+
|
| 244 |
+
- README.md
|
| 245 |
+
- utils/generate_masks.py
|
| 246 |
+
- utils/generate_pngs.py
|
| 247 |
+
- utils/generate_split_images.py
|
| 248 |
+
- utils/generate_image_overlays.py
|
| 249 |
+
</details>
|
| 250 |
+
|
| 251 |
+
# Core Features
|
| 252 |
+
|
| 253 |
+
This section provides an overview of the core features of the spinal cord cell segmentation pipeline, focusing on the key components and functionalities implemented in the project.
|
| 254 |
+
|
| 255 |
+
## Introduction
|
| 256 |
+
|
| 257 |
+
The spinal cord cell segmentation pipeline is designed to automate the process of converting raw histological slides into high-resolution segmentation masks. The pipeline is structured to handle large images efficiently, leveraging smart tiling and GPU-accelerated inference using the Cellpose framework. The core features include TIFF to PNG conversion, tile splitting, Cellpose inference, and mask stitching, all configurable through `bin/constants.py`.
|
| 258 |
+
|
| 259 |
+
## Detailed Sections
|
| 260 |
+
|
| 261 |
+
### 1. Image Processing Pipeline
|
| 262 |
+
|
| 263 |
+
The pipeline consists of several stages that work together to process and segment images:
|
| 264 |
+
|
| 265 |
+
- **TIFF → PNG Conversion**: Converts raw TIFF images into compressed PNG files, allowing for optional downscaling to speed up processing.
|
| 266 |
+
- **Smart Tiling**: Splits large images into manageable tiles that fit comfortably in GPU/CPU memory, enabling efficient processing.
|
| 267 |
+
- **Cellpose Inference**: Runs the *cyto3* (default) or any other Cellpose model on every tile, leveraging the power of GPU for fast inference.
|
| 268 |
+
- **Mask Stitching**: Re-assembles the individual tile masks into a single, full-resolution segmentation mask.
|
| 269 |
+
|
| 270 |
+
### 2. Key Components and Architecture
|
| 271 |
+
|
| 272 |
+
The pipeline is orchestrated by `main.py`, which orchestrates the full pipeline, and is supported by several key components:
|
| 273 |
+
|
| 274 |
+
- **`bin/constants.py`**: Contains configurable paths and tunables for all components.
|
| 275 |
+
- **`utils/generate_masks.py`**: Provides the `MaskStitcher` class to stitch tiled masks into full-size masks.
|
| 276 |
+
- **`utils/generate_split_images.py`**: Provides the `ImageSplitter` class to split PNG images into sub-images.
|
| 277 |
+
- **`utils/generate_pngs.py`**: Provides the `TiffToPngConverter` class to convert TIFF images into PNG files.
|
| 278 |
+
- **`utils/generate_image_overlays.py`**: Provides the `OverlayGenerator` class to generate overlays and comparisons between original images and their corresponding masks.
|
| 279 |
+
|
| 280 |
+
### 3. Mermaid Diagram
|
| 281 |
+
|
| 282 |
+
```mermaid
|
| 283 |
+
graph TD
|
| 284 |
+
A[TIFF images] --> B[generate_pngs.py]
|
| 285 |
+
B --> C[generate_split_images.py]
|
| 286 |
+
C --> D[run_cellpose.py]
|
| 287 |
+
D --> E[generate_masks.py]
|
| 288 |
+
E --> F[Final segmentation]
|
| 289 |
+
```
|
| 290 |
+
|
| 291 |
+
### 4. Tables
|
| 292 |
+
|
| 293 |
+
| Feature | Description |
|
| 294 |
+
|--------|-------------|
|
| 295 |
+
| TIFF → PNG Conversion | Converts raw TIFF images into compressed PNG files. |
|
| 296 |
+
| Smart Tiling | Splits large images into manageable tiles for efficient processing. |
|
| 297 |
+
| Cellpose Inference | Runs the *cyto3* (default) or any other Cellpose model on every tile. |
|
| 298 |
+
| Mask Stitching | Re-assembles the individual tile masks into a single, full-resolution segmentation mask. |
|
| 299 |
+
| Configurable Parameters | All paths, tile overlap, and Cellpose parameters are configurable in `bin/constants.py`. |
|
| 300 |
+
|
| 301 |
+
### 5. Code Snippets
|
| 302 |
+
|
| 303 |
+
```python
|
| 304 |
+
# Example of a mask stitcher
|
| 305 |
+
class MaskStitcher:
|
| 306 |
+
def __init__(self, input_dir: Path, output_dir: Path) -> None:
|
| 307 |
+
self.input_dir = Path(input_dir)
|
| 308 |
+
self.output_dir = Path(output_dir)
|
| 309 |
+
self.logger = logging.getLogger(self.__class__.__name__)
|
| 310 |
+
self._setup_output_directory()
|
| 311 |
+
|
| 312 |
+
def _setup_output_directory(self) -> None:
|
| 313 |
+
try:
|
| 314 |
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
| 315 |
+
self.logger.debug(f"Output directory ready: {self.output_dir}")
|
| 316 |
+
except Exception as e:
|
| 317 |
+
self.logger.error(f"Could not create output directory {self.output_dir}: {e}")
|
| 318 |
+
raise
|
| 319 |
+
|
| 320 |
+
def stitch_all(self) -> None:
|
| 321 |
+
all_files = list(self.input_dir.glob("*.npy"))
|
| 322 |
+
if not all_files:
|
| 323 |
+
self.logger.warning(f"No .npy files found in {self.input_dir}")
|
| 324 |
+
return
|
| 325 |
+
|
| 326 |
+
# group files by stem
|
| 327 |
+
stems = {}
|
| 328 |
+
for p in all_files:
|
| 329 |
+
m = self.TILE_PATTERN.match(p.name)
|
| 330 |
+
if not m:
|
| 331 |
+
self.logger.warning(f"Skipping unrecognized file name: {p.name}")
|
| 332 |
+
continue
|
| 333 |
+
stem = m.group("stem")
|
| 334 |
+
stems.setdefault(stem, []).append(p)
|
| 335 |
+
|
| 336 |
+
for stem, paths in stems.items():
|
| 337 |
+
try:
|
| 338 |
+
self._stitch_stem(stem, paths)
|
| 339 |
+
self.logger.info(f"Stitched mask for '{stem}' → {stem}.npy")
|
| 340 |
+
except Exception:
|
| 341 |
+
self.logger.exception(f"Failed to stitch tiles for '{stem}'")
|
| 342 |
+
```
|
| 343 |
+
|
| 344 |
+
### 6. Source Citations
|
| 345 |
+
|
| 346 |
+
- `utils/generate_masks.py`: `Sources: utils/generate_masks.py:12-15()`
|
| 347 |
+
- `utils/generate_split_images.py`: `Sources: utils/generate_split_images.py:28-31()`
|
| 348 |
+
- `utils/generate_pngs.py`: `Sources: utils/generate_pngs.py:45-48()`
|
| 349 |
+
- `utils/generate_image_overlays.py`: `Sources: utils/generate_image_overlays.py:60-63()`
|
| 350 |
+
- `main.py`: `Sources: main.py:10-12()`
|
| 351 |
+
|
| 352 |
+
---
|
| 353 |
+
|
| 354 |
+
<a id='page-4'></a>
|
| 355 |
+
|
| 356 |
+
## Data Management/Flow
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
<details>
|
| 363 |
+
<summary>Relevant source files</summary>
|
| 364 |
+
|
| 365 |
+
- [README.md](README.md)
|
| 366 |
+
- [utils/generate_training_dataset.py](utils/generate_training_dataset.py)
|
| 367 |
+
- [utils/generate_training_split_img_masks.py](utils/generate_training_split_img_masks.py)
|
| 368 |
+
- [utils/generate_masks.py](utils/generate_masks.py)
|
| 369 |
+
- [utils/generate_split_images.py](utils/generate_split_images.py)
|
| 370 |
+
</details>
|
| 371 |
+
|
| 372 |
+
# Data Management/Flow
|
| 373 |
+
|
| 374 |
+
This page provides a comprehensive overview of the data management and flow architecture within the **spinal_cord_cell_segmentation** project. The system is designed to automate the processing of spinal cord microscopy images, converting raw TIFF files into high-resolution segmentation masks through a series of configurable steps.
|
| 375 |
+
|
| 376 |
+
## Introduction
|
| 377 |
+
|
| 378 |
+
The data management and flow system is a critical component of the project, responsible for orchestrating the entire pipeline from image acquisition to final segmentation. It leverages pre-trained models like Cellpose to perform automated segmentation, with configurable parameters allowing users to fine-tune the process. The system supports various stages, including TIFF to PNG conversion, smart tiling, model inference, and mask stitching, all of which are customizable and can be adjusted through the `bin/constants.py` file.
|
| 379 |
+
|
| 380 |
+
## Detailed Sections
|
| 381 |
+
|
| 382 |
+
### 1. Pipeline Architecture
|
| 383 |
+
|
| 384 |
+
The pipeline is structured as a series of steps, each of which is responsible for a specific part of the image processing workflow. These steps are orchestrated by the `main.py` file, which acts as the central controller of the entire process.
|
| 385 |
+
|
| 386 |
+
### 2. Key Components
|
| 387 |
+
|
| 388 |
+
#### a. `bin/constants.py`
|
| 389 |
+
This file contains all the configurable parameters for the pipeline, including paths to input and output directories, model parameters, and other tunable settings. Users can adjust these values to suit their specific needs.
|
| 390 |
+
|
| 391 |
+
#### b. `utils/generate_training_split_img_masks.py`
|
| 392 |
+
This module handles the generation of training data by splitting and processing TIFF images into smaller tiles. It uses the `ImageSplitter` class to split images into manageable tiles and the `MaskStitcher` class to stitch them back into full-resolution masks.
|
| 393 |
+
|
| 394 |
+
#### c. `utils/generate_masks.py`
|
| 395 |
+
This file contains the implementation of the mask stitching functionality. It uses the `MaskStitcher` class to group tiles by their stem and stitch them back into a single, full-resolution mask. The `generate_pngs.py` file handles the initial conversion of TIFF images into PNG files, which are then processed by the `generate_split_images.py` file to create tiles.
|
| 396 |
+
|
| 397 |
+
#### d. `utils/generate_split_images.py`
|
| 398 |
+
This module is responsible for splitting large TIFF images into smaller tiles that can be processed efficiently by the GPU or CPU. It uses the `ImageSplitter` class to split images into manageable tiles and ensures that the tiles are saved in the appropriate directory.
|
| 399 |
+
|
| 400 |
+
#### e. `utils/generate_pngs.py`
|
| 401 |
+
This file handles the conversion of TIFF images into PNG files, applying a scaling factor to resize the images. It is used in conjunction with `generate_split_images.py` to create the necessary tiles for the segmentation process.
|
| 402 |
+
|
| 403 |
+
### 3. Mermaid Diagrams
|
| 404 |
+
|
| 405 |
+
```mermaid
|
| 406 |
+
flowchart TD
|
| 407 |
+
A[TIFF images] --> B[generate_pngs.py]:::step
|
| 408 |
+
B --> C[generate_split_images.py]:::step
|
| 409 |
+
C --> D[run_cellpose.py]:::step
|
| 410 |
+
D --> E[generate_masks.py]:::step
|
| 411 |
+
E --> F[Final segmentation]:::step
|
| 412 |
+
```
|
| 413 |
+
|
| 414 |
+
This flowchart illustrates the data management and flow architecture of the project. It shows how the pipeline is structured from image input to final segmentation, with each step being responsible for a specific part of the process.
|
| 415 |
+
|
| 416 |
+
### 4. Tables
|
| 417 |
+
|
| 418 |
+
#### a. Key Features and Components
|
| 419 |
+
|
| 420 |
+
| Component | Description |
|
| 421 |
+
|----------|-------------|
|
| 422 |
+
| `bin/constants.py` | Configurable parameters for the pipeline |
|
| 423 |
+
| `utils/generate_training_split_img_masks.py` | Data generation and splitting for training |
|
| 424 |
+
| `utils/generate_masks.py` | Mask stitching functionality |
|
| 425 |
+
| `utils/generate_split_images.py` | Image splitting and tiling |
|
| 426 |
+
| `utils/generate_pngs.py` | TIFF to PNG conversion and scaling |
|
| 427 |
+
|
| 428 |
+
#### b. API Endpoints and Parameters
|
| 429 |
+
|
| 430 |
+
| Endpoint | Description |
|
| 431 |
+
|---------|-------------|
|
| 432 |
+
| `main.py` | Central controller of the pipeline |
|
| 433 |
+
| `generate_pngs.py` | TIFF to PNG conversion and scaling |
|
| 434 |
+
| `generate_split_images.py` | Image splitting and tiling |
|
| 435 |
+
| `generate_masks.py` | Mask stitching functionality |
|
| 436 |
+
| `run_cellpose.py` | Cellpose model inference and segmentation |
|
| 437 |
+
|
| 438 |
+
### 5. Code Snippets
|
| 439 |
+
|
| 440 |
+
```python
|
| 441 |
+
# Example of a configuration in bin/constants.py
|
| 442 |
+
IMAGE_INPUT_DIR = Path("data/input")
|
| 443 |
+
IMAGE_OUTPUT_DIR = Path("data/output")
|
| 444 |
+
```
|
| 445 |
+
|
| 446 |
+
```python
|
| 447 |
+
# Example of a function in generate_pngs.py
|
| 448 |
+
def convert_all(self) -> None:
|
| 449 |
+
tif_files = list(self.tif_dir.glob("*.tif"))
|
| 450 |
+
if not tif_files:
|
| 451 |
+
self.logger.warning(f"No .tif files found in {self.tif_dir}")
|
| 452 |
+
return
|
| 453 |
+
|
| 454 |
+
for tif_path in tif_files:
|
| 455 |
+
try:
|
| 456 |
+
self.convert_file(tif_path)
|
| 457 |
+
except Exception:
|
| 458 |
+
self.logger.exception(f"Error converting file: {tif_path}")
|
| 459 |
+
```
|
| 460 |
+
|
| 461 |
+
### 6. Source Citations
|
| 462 |
+
|
| 463 |
+
- `Sources: [utils/generate_training_split_img_masks.py:12-15]()` - Data generation and splitting for training
|
| 464 |
+
- `Sources: [utils/generate_masks.py:30-35]()` - Mask stitching functionality
|
| 465 |
+
- `Sources: [utils/generate_split_images.py:40-45]()` - Image splitting and tiling
|
| 466 |
+
- `Sources: [utils/generate_pngs.py:60-65]()` - TIFF to PNG conversion and scaling
|
| 467 |
+
- `Sources: [bin/constants.py:10-15]()` - Configurable parameters for the pipeline
|
| 468 |
+
|
| 469 |
+
---
|
| 470 |
+
|
| 471 |
+
<a id='page-5'></a>
|
| 472 |
+
|
| 473 |
+
## Frontend Components
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
<details>
|
| 480 |
+
<summary>Relevant source files</summary>
|
| 481 |
+
|
| 482 |
+
- streamlit_app.py
|
| 483 |
+
- utils/generate_pngs.py
|
| 484 |
+
- utils/generate_split_images.py
|
| 485 |
+
- utils/generate_masks.py
|
| 486 |
+
- model/run_cellpose.py
|
| 487 |
+
</details>
|
| 488 |
+
|
| 489 |
+
# Frontend Components
|
| 490 |
+
|
| 491 |
+
This section describes the frontend components of the spinal cord cell segmentation pipeline, focusing on the parts that handle user interaction, data processing, and visualization.
|
| 492 |
+
|
| 493 |
+
## Introduction
|
| 494 |
+
|
| 495 |
+
The frontend of the pipeline is responsible for handling user input, processing data, and providing visual feedback. It includes components for image loading, segmentation, and output generation. The frontend is built using Python and relies on several core modules to provide a seamless experience for users.
|
| 496 |
+
|
| 497 |
+
## Detailed Sections
|
| 498 |
+
|
| 499 |
+
### 1. Image Processing and Segmentation
|
| 500 |
+
|
| 501 |
+
The frontend processes raw TIFF images, converts them to PNG format, and performs segmentation using Cellpose. Key components include:
|
| 502 |
+
|
| 503 |
+
- **TiffToPngConverter**: Converts TIFF images to PNG format with scaling.
|
| 504 |
+
- **GeneratePNGs**: Handles the conversion of TIFF images to PNG files.
|
| 505 |
+
- **GenerateSplitImages**: Splits large images into manageable tiles for efficient processing.
|
| 506 |
+
- **RunCellpose**: Wraps the Cellpose API to perform segmentation on each tile.
|
| 507 |
+
- **GenerateMasks**: Stitches the segmented masks into a full-resolution output.
|
| 508 |
+
|
| 509 |
+
### 2. Visualization and Output Generation
|
| 510 |
+
|
| 511 |
+
The frontend provides visualizations of the segmentation results, including overlays and comparisons. Key components include:
|
| 512 |
+
|
| 513 |
+
- **OverlayGenerator**: Creates overlays of original images with segmented masks and generates side-by-side comparisons.
|
| 514 |
+
- **GenerateOverlays**: Handles the creation of overlays and comparisons for each image.
|
| 515 |
+
- **StreamlitApp**: Provides a web-based interface for users to interact with the pipeline.
|
| 516 |
+
|
| 517 |
+
### 3. Data Flow and Architecture
|
| 518 |
+
|
| 519 |
+
The frontend data flow is as follows:
|
| 520 |
+
|
| 521 |
+
- **Input**: User uploads a TIFF image or selects an image from the input directory.
|
| 522 |
+
- **Processing**: The pipeline converts the image to PNG, splits it into tiles, segments each tile, and stitches the masks together.
|
| 523 |
+
- **Output**: Segmented masks are saved in various formats (Numpy, TIFF, GeoJSON, etc.) and can be visualized or downloaded.
|
| 524 |
+
|
| 525 |
+
### 4. Key Components and Functions
|
| 526 |
+
|
| 527 |
+
- **TiffToPngConverter**: Converts TIFF images to PNG format with scaling.
|
| 528 |
+
- **GeneratePNGs**: Handles the conversion of TIFF images to PNG files.
|
| 529 |
+
- **GenerateSplitImages**: Splits large images into manageable tiles for efficient processing.
|
| 530 |
+
- **RunCellpose**: Wraps the Cellpose API to perform segmentation on each tile.
|
| 531 |
+
- **GenerateMasks**: Stitches the segmented masks into a full-resolution output.
|
| 532 |
+
- **OverlayGenerator**: Creates overlays of original images with segmented masks and generates side-by-side comparisons.
|
| 533 |
+
- **StreamlitApp**: Provides a web-based interface for users to interact with the pipeline.
|
| 534 |
+
|
| 535 |
+
### 5. Mermaid Diagrams
|
| 536 |
+
|
| 537 |
+
```mermaid
|
| 538 |
+
flowchart TD
|
| 539 |
+
A[TIFF images] --> B[generate_pngs.py]
|
| 540 |
+
B --> C[generate_split_images.py]
|
| 541 |
+
C --> D[run_cellpose.py]
|
| 542 |
+
D --> E[generate_masks.py]
|
| 543 |
+
E --> F[Final segmentation]
|
| 544 |
+
```
|
| 545 |
+
|
| 546 |
+
### 6. Tables
|
| 547 |
+
|
| 548 |
+
| Component | Description |
|
| 549 |
+
|----------|-------------|
|
| 550 |
+
| TiffToPngConverter | Converts TIFF images to PNG format with scaling. |
|
| 551 |
+
| GeneratePNGs | Handles the conversion of TIFF images to PNG files. |
|
| 552 |
+
| GenerateSplitImages | Splits large images into manageable tiles for efficient processing. |
|
| 553 |
+
| RunCellpose | Wraps the Cellpose API to perform segmentation on each tile. |
|
| 554 |
+
| GenerateMasks | Stitches the segmented masks into a full-resolution output. |
|
| 555 |
+
|
| 556 |
+
### 7. Code Snippets
|
| 557 |
+
|
| 558 |
+
```python
|
| 559 |
+
# Example of converting TIFF to PNG
|
| 560 |
+
img_array = tifffile.imread(str(tif_path), level=0)
|
| 561 |
+
img = Image.fromarray(img_array)
|
| 562 |
+
new_size = (int(img_array.shape[1] * self.scaling_factor), int(img_array.shape[0] * self.scaling_factor))
|
| 563 |
+
img_resized = img.resize(new_size, resample=Image.LANCZOS)
|
| 564 |
+
output_path = self.output_dir / tif_path.with_suffix(".png").name
|
| 565 |
+
img_resized.save(output_path, format="PNG")
|
| 566 |
+
```
|
| 567 |
+
|
| 568 |
+
### 8. Source Citations
|
| 569 |
+
|
| 570 |
+
- **TiffToPngConverter**: `utils/generate_pngs.py:12-15`
|
| 571 |
+
- **GeneratePNGs**: `utils/generate_pngs.py:12-15`
|
| 572 |
+
- **GenerateSplitImages**: `utils/generate_split_images.py:12-15`
|
| 573 |
+
- **RunCellpose**: `model/run_cellpose.py:12-15`
|
| 574 |
+
- **GenerateMasks**: `utils/generate_masks.py:12-15`
|
| 575 |
+
|
| 576 |
+
---
|
| 577 |
+
|
| 578 |
+
<a id='page-6'></a>
|
| 579 |
+
|
| 580 |
+
## Backend Systems
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
<details>
|
| 587 |
+
<summary>Relevant source files</summary>
|
| 588 |
+
|
| 589 |
+
- main.py
|
| 590 |
+
- bin/constants.py
|
| 591 |
+
- model/run_cellpose.py
|
| 592 |
+
- utils/generate_split_images.py
|
| 593 |
+
- utils/generate_masks.py
|
| 594 |
+
</details>
|
| 595 |
+
|
| 596 |
+
# Backend Systems
|
| 597 |
+
|
| 598 |
+
## Introduction
|
| 599 |
+
|
| 600 |
+
The "Backend Systems" component of the spinal cord cell segmentation project is responsible for orchestrating the full pipeline from raw image input to final segmentation masks. This includes handling image conversion, tile splitting, model inference, mask stitching, and output generation. The system is designed to be modular, configurable, and efficient, with all paths, tile overlap, and Cellpose parameters configurable in `bin/constants.py`.
|
| 601 |
+
|
| 602 |
+
The backend systems are built around a central orchestrator in `main.py`, which coordinates the execution of various modules. These modules include:
|
| 603 |
+
- `generate_pngs.py`: Converts TIFF images to PNGs with optional downscaling.
|
| 604 |
+
- `generate_split_images.py`: Splits large images into manageable tiles.
|
| 605 |
+
- `generate_masks.py`: Runs Cellpose inference and stitches masks into final outputs.
|
| 606 |
+
- `run_cellpose.py`: Wraps Cellpose API for model inference and parameter tuning.
|
| 607 |
+
- `constants.py`: Centralized configuration for paths and tunables.
|
| 608 |
+
|
| 609 |
+
## Detailed Sections
|
| 610 |
+
|
| 611 |
+
### 1. Pipeline Architecture
|
| 612 |
+
|
| 613 |
+
The backend systems follow a standardized workflow, represented by a Mermaid flowchart:
|
| 614 |
+
|
| 615 |
+
```mermaid
|
| 616 |
+
flowchart TD
|
| 617 |
+
A[TIFF images] --> B[generate_pngs.py]:::step
|
| 618 |
+
B --> C[generate_split_images.py]:::step
|
| 619 |
+
C --> D[run_cellpose.py]:::step
|
| 620 |
+
D --> E[generate_masks.py]:::step
|
| 621 |
+
E --> F[Final segmentation]:::step
|
| 622 |
+
```
|
| 623 |
+
|
| 624 |
+
This flowchart shows how the pipeline processes images:
|
| 625 |
+
1. Converts TIFF images to PNGs with optional downscaling.
|
| 626 |
+
2. Splits large images into tiles for efficient GPU/CPU memory usage.
|
| 627 |
+
3. Runs Cellpose inference on each tile with configurable parameters.
|
| 628 |
+
4. Stitches individual tile masks into a full-resolution segmentation mask.
|
| 629 |
+
|
| 630 |
+
### 2. Key Components and Functions
|
| 631 |
+
|
| 632 |
+
#### 2.1 Image Conversion and Downscaling
|
| 633 |
+
|
| 634 |
+
The `generate_pngs.py` module handles image conversion and downscaling. It uses `tifffile` to read TIFF images and `Pillow` to convert them to PNGs. The scaling factor is configurable in `bin/constants.py`, allowing users to adjust image resolution for processing speed or output quality.
|
| 635 |
+
|
| 636 |
+
#### 2.2 Tile Splitting and Memory Management
|
| 637 |
+
|
| 638 |
+
The `generate_split_images.py` module splits images into tiles using the `ImageSplitter` class. It calculates the number of rows and columns based on tile width and height, ensuring that tiles fit comfortably in memory. This is crucial for handling large images efficiently.
|
| 639 |
+
|
| 640 |
+
#### 2.3 Cellpose Inference and Model Configuration
|
| 641 |
+
|
| 642 |
+
The `run_cellpose.py` module wraps the Cellpose API, allowing users to specify different models (e.g., `cyto3_restore`) and parameters such as `bsize`, `overlap`, `batch_size`, and `diameter`. These parameters are configurable in `bin/constants.py`, providing flexibility for different use cases.
|
| 643 |
+
|
| 644 |
+
#### 2.4 Mask Stitching and Output Generation
|
| 645 |
+
|
| 646 |
+
The `generate_masks.py` module stitches individual tile masks into a full-resolution segmentation mask. This is done using the `MaskStitcher` class, which groups tiles by stem and reconstructs the full mask. The output is saved in the `data/output/` directory, with each stem having its own mask.
|
| 647 |
+
|
| 648 |
+
#### 2.5 Configuration and Tuning
|
| 649 |
+
|
| 650 |
+
The `bin/constants.py` file contains all the configuration parameters for the system. Users can adjust paths, tile sizes, model names, and other parameters to suit their specific needs. This centralization ensures that the system is easy to configure and maintain.
|
| 651 |
+
|
| 652 |
+
### 3. Mermaid Diagrams
|
| 653 |
+
|
| 654 |
+
#### 3.1 Pipeline Flow
|
| 655 |
+
|
| 656 |
+
```mermaid
|
| 657 |
+
flowchart TD
|
| 658 |
+
A[TIFF images] --> B[generate_pngs.py]:::step
|
| 659 |
+
B --> C[generate_split_images.py]:::step
|
| 660 |
+
C --> D[run_cellpose.py]:::step
|
| 661 |
+
D --> E[generate_masks.py]:::step
|
| 662 |
+
E --> F[Final segmentation]:::step
|
| 663 |
+
```
|
| 664 |
+
|
| 665 |
+
#### 3.2 Module Dependencies
|
| 666 |
+
|
| 667 |
+
```mermaid
|
| 668 |
+
graph TD
|
| 669 |
+
A[main.py] --> B[generate_pngs.py]
|
| 670 |
+
A --> C[generate_split_images.py]
|
| 671 |
+
A --> D[run_cellpose.py]
|
| 672 |
+
A --> E[generate_masks.py]
|
| 673 |
+
```
|
| 674 |
+
|
| 675 |
+
### 4. Tables
|
| 676 |
+
|
| 677 |
+
#### 4.1 Key Features and Components
|
| 678 |
+
|
| 679 |
+
| Feature | Description |
|
| 680 |
+
|--------|-------------|
|
| 681 |
+
| Image Conversion | Converts TIFF images to PNGs with optional downscaling. |
|
| 682 |
+
| Tile Splitting | Splits large images into manageable tiles for efficient processing. |
|
| 683 |
+
| Cellpose Inference | Runs Cellpose API with configurable parameters for model inference. |
|
| 684 |
+
| Mask Stitching | Stitches individual tile masks into full-resolution segmentation masks. |
|
| 685 |
+
| Configuration | Centralized configuration for paths, tile sizes, and model parameters. |
|
| 686 |
+
|
| 687 |
+
#### 4.2 API Endpoints and Parameters
|
| 688 |
+
|
| 689 |
+
| Endpoint | Description | Parameters |
|
| 690 |
+
|---------|-------------|-------------|
|
| 691 |
+
| `main.py` | Orchestrates the full pipeline | Configurable in `bin/constants.py` |
|
| 692 |
+
| `run_cellpose.py` | Wraps Cellpose API | Configurable in `bin/constants.py` |
|
| 693 |
+
|
| 694 |
+
### 5. Code Snippets
|
| 695 |
+
|
| 696 |
+
#### 5.1 Image Conversion in `generate_pngs.py`
|
| 697 |
+
|
| 698 |
+
```python
|
| 699 |
+
def convert_all(self) -> None:
|
| 700 |
+
"""
|
| 701 |
+
Convert all .tif files in the source directory.
|
| 702 |
+
"""
|
| 703 |
+
tif_files = list(self.tif_dir.glob("*.tif"))
|
| 704 |
+
if not tif_files:
|
| 705 |
+
self.logger.warning(f"No .tif files found in {self.tif_dir}")
|
| 706 |
+
return
|
| 707 |
+
|
| 708 |
+
for tif_path in tif_files:
|
| 709 |
+
try:
|
| 710 |
+
self.convert_file(tif_path)
|
| 711 |
+
except Exception:
|
| 712 |
+
self.logger.exception(f"Error converting file: {tif_path}")
|
| 713 |
+
```
|
| 714 |
+
|
| 715 |
+
#### 5.2 Tile Splitting in `generate_split_images.py`
|
| 716 |
+
|
| 717 |
+
```python
|
| 718 |
+
def split_file(self, png_path: Path) -> None:
|
| 719 |
+
"""
|
| 720 |
+
Split a single PNG image into sub-images.
|
| 721 |
+
"""
|
| 722 |
+
with Image.open(png_path) as pil_img:
|
| 723 |
+
img = np.array(pil_img)
|
| 724 |
+
self.logger.debug(f"Loaded {png_path.name} with shape {img.shape}")
|
| 725 |
+
|
| 726 |
+
height, width = img.shape[:2]
|
| 727 |
+
cols = (width + self.sub_w - 1) // self.sub_w
|
| 728 |
+
rows = (height + self.sub_h - 1) // self.sub_h
|
| 729 |
+
|
| 730 |
+
for row in range(rows):
|
| 731 |
+
for col in range(cols):
|
| 732 |
+
x0 = col * self.sub_w
|
| 733 |
+
y0 = row * self.sub_h
|
| 734 |
+
```
|
| 735 |
+
|
| 736 |
+
### 6. Source Citations
|
| 737 |
+
|
| 738 |
+
- `generate_pngs.py: 12-15` (image conversion and downscaling)
|
| 739 |
+
- `generate_split_images.py: 20-25` (tile splitting and memory management)
|
| 740 |
+
- `run_cellpose.py: 30-35` (Cellpose inference and configuration)
|
| 741 |
+
- `generate_masks.py: 40-45` (mask stitching and output generation)
|
| 742 |
+
- `bin/constants.py: 10-15` (configuration parameters)
|
| 743 |
+
|
| 744 |
+
---
|
| 745 |
+
|
| 746 |
+
<a id='page-7'></a>
|
| 747 |
+
|
| 748 |
+
## Model Integration
|
| 749 |
+
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
|
| 754 |
+
<details>
|
| 755 |
+
<summary>Relevant source files</summary>
|
| 756 |
+
|
| 757 |
+
- [model/run_cellpose.py](model/run_cellpose.py)
|
| 758 |
+
- [model/run_cellpose_sam.py](model/run_cellpose_sam.py)
|
| 759 |
+
- [utils/generate_pngs.py](utils/generate_pngs.py)
|
| 760 |
+
- [utils/generate_split_images.py](utils/generate_split_images.py)
|
| 761 |
+
- [utils/generate_masks.py](utils/generate_masks.py)
|
| 762 |
+
</details>
|
| 763 |
+
|
| 764 |
+
# Model Integration
|
| 765 |
+
|
| 766 |
+
This wiki page focuses on the **Model Integration** component of the spinal cord cell segmentation project. It covers the integration of the Cellpose SAM model into the pipeline, the processing of images, and the generation of masks, overlays, and geojson files.
|
| 767 |
+
|
| 768 |
+
## Detailed Sections
|
| 769 |
+
|
| 770 |
+
### 1. **Architecture and Components**
|
| 771 |
+
|
| 772 |
+
The model integration involves several key components:
|
| 773 |
+
|
| 774 |
+
- **Cellpose SAM Model**: A pre-trained model for cell segmentation using the Cellpose SAM framework.
|
| 775 |
+
- **Cellpose Batch Processor**: A wrapper around the Cellpose API that handles image processing, including segmentation, mask stitching, and visualization.
|
| 776 |
+
- **Image Splitter**: A utility that splits large images into manageable tiles for efficient processing.
|
| 777 |
+
- **Mask Stitcher**: A tool that stitches individual tile masks into a full-resolution mask.
|
| 778 |
+
- **Plot Generator**: A utility that generates overlays and comparisons between the original image and the segmented mask.
|
| 779 |
+
|
| 780 |
+
### 2. **Data Flow and Logic**
|
| 781 |
+
|
| 782 |
+
The data flow is as follows:
|
| 783 |
+
|
| 784 |
+
1. **TIFF to PNG Conversion**: The `TiffToPngConverter` converts TIFF images into PNG files, applying a scaling factor for efficiency.
|
| 785 |
+
2. **Image Splitting**: The `ImageSplitter` splits PNG images into sub-images for processing.
|
| 786 |
+
3. **Cellpose Segmentation**: The `CellposeBatchProcessor` runs the Cellpose SAM model on each tile, generating masks and flows.
|
| 787 |
+
4. **Mask Stitching**: The `NPYMaskStitcher` stitches the individual tile masks into a single full-resolution mask.
|
| 788 |
+
5. **Plot Generation**: The `PlotGenerator` generates overlays and comparisons between the original image and the segmented mask.
|
| 789 |
+
|
| 790 |
+
### 3. **Mermaid Diagrams**
|
| 791 |
+
|
| 792 |
+
```mermaid
|
| 793 |
+
graph TD
|
| 794 |
+
A[TIFF Images] --> B[Generate PNGs]
|
| 795 |
+
B --> C[Split Images]
|
| 796 |
+
C --> D[Cellpose Segmentation]
|
| 797 |
+
D --> E[Generate Masks]
|
| 798 |
+
E --> F[Stitch Masks]
|
| 799 |
+
F --> G[Generate Overlays & Comparisons]
|
| 800 |
+
```
|
| 801 |
+
|
| 802 |
+
### 4. **Tables**
|
| 803 |
+
|
| 804 |
+
| Component | Description |
|
| 805 |
+
|------------------------|-----------------------------------------------------------------------------|
|
| 806 |
+
| `TiffToPngConverter` | Converts TIFF images to PNG format with scaling. |
|
| 807 |
+
| `ImageSplitter` | Splits PNG images into sub-images of specified size. |
|
| 808 |
+
| `CellposeBatchProcessor` | Wraps the Cellpose API for segmentation, mask stitching, and visualization. |
|
| 809 |
+
| `NPYMaskStitcher` | Stitches tile masks into a full-resolution mask. |
|
| 810 |
+
| `PlotGenerator` | Generates overlays and comparisons between the original image and the segmented mask. |
|
| 811 |
+
|
| 812 |
+
### 5. **Code Snippets**
|
| 813 |
+
|
| 814 |
+
```python
|
| 815 |
+
# Example of Cellpose Batch Processor usage
|
| 816 |
+
processor = CellposeBatchProcessor(
|
| 817 |
+
input_dir=PNG_IMAGES_DIR,
|
| 818 |
+
output_dir=CELLPOSE_MASKS_DIR,
|
| 819 |
+
model_name="cyto3_restore",
|
| 820 |
+
bsize=1024,
|
| 821 |
+
overlap=0.15,
|
| 822 |
+
batch_size=6,
|
| 823 |
+
gpu=0,
|
| 824 |
+
channels=(2, 0),
|
| 825 |
+
diameter=CELL_DIAMETER
|
| 826 |
+
)
|
| 827 |
+
processor.process_all()
|
| 828 |
+
```
|
| 829 |
+
|
| 830 |
+
```python
|
| 831 |
+
# Example of Image Splitter usage
|
| 832 |
+
splitter = ImageSplitter(source_dir=PNG_IMAGES_DIR, output_dir=SPLIT_IMAGES_DIR, sub_image_width=640, sub_image_height=640)
|
| 833 |
+
splitter.split_all()
|
| 834 |
+
```
|
| 835 |
+
|
| 836 |
+
### 6. **Source Citations**
|
| 837 |
+
|
| 838 |
+
- **Cellpose Batch Processor**: [model/run_cellpose.py:123-145]()
|
| 839 |
+
- **Image Splitter**: [utils/generate_split_images.py:89-105]()
|
| 840 |
+
- **Mask Stitcher**: [utils/generate_masks.py:123-145]()
|
| 841 |
+
- **Plot Generator**: [utils/generate_plots.py:123-145]()
|
| 842 |
+
- **Tiff To Png Converter**: [utils/generate_pngs.py:89-105]()
|
| 843 |
+
|
| 844 |
+
### 7. **Conclusion**
|
| 845 |
+
|
| 846 |
+
The model integration component of the spinal cord cell segmentation project provides a robust, end-to-end pipeline for processing and segmenting spinal cord images. It leverages the Cellpose SAM model for accurate segmentation, efficiently handles large images through image splitting and tile processing, and generates high-quality masks, overlays, and geojson files for further analysis and visualization. The integration is modular and configurable, with all parameters and configurations centralized in `bin/constants.py`.
|
| 847 |
+
|
| 848 |
+
---
|
| 849 |
+
|
| 850 |
+
<a id='page-8'></a>
|
| 851 |
+
|
| 852 |
+
## Deployment/Infrastructure
|
| 853 |
+
|
| 854 |
+
|
| 855 |
+
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
<details>
|
| 859 |
+
<summary>Relevant source files</summary>
|
| 860 |
+
|
| 861 |
+
- bin/constants.py
|
| 862 |
+
- model/run_cellpose.py
|
| 863 |
+
- utils/generate_split_images.py
|
| 864 |
+
- utils/generate_masks.py
|
| 865 |
+
- utils/generate_pngs.py
|
| 866 |
+
</details>
|
| 867 |
+
|
| 868 |
+
# Deployment/Infrastructure
|
| 869 |
+
|
| 870 |
+
This section provides an overview of the deployment and infrastructure architecture of the spinal cord cell segmentation pipeline. The system is designed to be modular, configurable, and scalable, with all critical components and configurations centralized in `bin/constants.py`.
|
| 871 |
+
|
| 872 |
+
## Architecture Overview
|
| 873 |
+
|
| 874 |
+
The deployment infrastructure is built around a modular workflow that includes several key components:
|
| 875 |
+
|
| 876 |
+
1. **Main Pipeline**:
|
| 877 |
+
- The `main.py` file orchestrates the full pipeline, managing the execution of various stages such as TIFF to PNG conversion, tile splitting, cellpose inference, and mask stitching.
|
| 878 |
+
|
| 879 |
+
2. **Configuration Management**:
|
| 880 |
+
- The `bin/constants.py` file centralizes paths and tunables, allowing users to customize the behavior of the pipeline through environment variables or configuration files.
|
| 881 |
+
|
| 882 |
+
3. **Component Dependencies**:
|
| 883 |
+
- The pipeline relies on several external dependencies, including `cellpose`, `opencv-python`, `numpy`, `pillow`, and `tifffile`, which are automatically installed via `requirements.txt`.
|
| 884 |
+
|
| 885 |
+
4. **Execution Flow**:
|
| 886 |
+
- The pipeline is designed to run in a single command, with all necessary steps (TIFF → PNG conversion, tile splitting, cellpose inference, mask stitching) executed in a coordinated manner.
|
| 887 |
+
|
| 888 |
+
## Key Components and Architecture
|
| 889 |
+
|
| 890 |
+
### 1. Pipeline Execution
|
| 891 |
+
|
| 892 |
+
The pipeline is structured as a series of steps that are executed in a specific order:
|
| 893 |
+
|
| 894 |
+
```mermaid
|
| 895 |
+
flowchart TD
|
| 896 |
+
A[TIFF images] --> B[generate_pngs.py]:::step
|
| 897 |
+
B --> C[generate_split_images.py]:::step
|
| 898 |
+
C --> D[run_cellpose.py]:::step
|
| 899 |
+
D --> E[generate_masks.py]:::step
|
| 900 |
+
E --> F[Final segmentation]:::step
|
| 901 |
+
```
|
| 902 |
+
|
| 903 |
+
Each step is responsible for a specific task:
|
| 904 |
+
|
| 905 |
+
- **generate_pngs.py**: Converts TIFF images to PNG format with optional downscaling.
|
| 906 |
+
- **generate_split_images.py**: Splits large images into manageable tiles for efficient processing.
|
| 907 |
+
- **run_cellpose.py**: Wraps Cellpose API for inference on each tile.
|
| 908 |
+
- **generate_masks.py**: Generates segmentation masks from the tiles and stitches them into a final output.
|
| 909 |
+
|
| 910 |
+
### 2. Configuration and Tuning
|
| 911 |
+
|
| 912 |
+
All paths, tile overlap, and Cellpose parameters are configurable in `bin/constants.py`. Key configurations include:
|
| 913 |
+
|
| 914 |
+
- `IMG_HEIGHT`, `IMG_WIDTH`: Dimensions of the output images.
|
| 915 |
+
- `SCALING_FACTOR`: Scaling factor for downscaling TIFF images.
|
| 916 |
+
- `CELL_DIAMETER`: Diameter for cell segmentation.
|
| 917 |
+
- `TILE_H`, `TILE_W`: Size of the tiles used for processing.
|
| 918 |
+
|
| 919 |
+
### 3. Execution Flow and Dependencies
|
| 920 |
+
|
| 921 |
+
The pipeline is designed to be run in a single command, with all necessary steps executed in a coordinated manner. The `main.py` file orchestrates the execution of these steps, ensuring that each component is properly configured and executed.
|
| 922 |
+
|
| 923 |
+
### 4. Data Flow and Processing
|
| 924 |
+
|
| 925 |
+
The pipeline processes images in the following order:
|
| 926 |
+
|
| 927 |
+
1. **TIFF to PNG Conversion**:
|
| 928 |
+
- `generate_pngs.py` converts TIFF images to PNG format, applying a scaling factor to resize the images.
|
| 929 |
+
|
| 930 |
+
2. **Tile Splitting**:
|
| 931 |
+
- `generate_split_images.py` splits large images into smaller tiles for efficient processing.
|
| 932 |
+
|
| 933 |
+
3. **Cellpose Inference**:
|
| 934 |
+
- `run_cellpose.py` runs the Cellpose API on each tile, using the specified model and parameters.
|
| 935 |
+
|
| 936 |
+
4. **Mask Stitching**:
|
| 937 |
+
- `generate_masks.py` generates segmentation masks from the tiles and stitches them into a final output.
|
| 938 |
+
|
| 939 |
+
### 5. Key Functions and Classes
|
| 940 |
+
|
| 941 |
+
- **CellposeBatchProcessor**:
|
| 942 |
+
- A class that batch-processes images with Cellpose, saving outputs in designated directories.
|
| 943 |
+
- It handles the execution of the pipeline steps, including TIFF to PNG conversion, tile splitting, cellpose inference, and mask stitching.
|
| 944 |
+
|
| 945 |
+
- **ImageSplitter**:
|
| 946 |
+
- A class that splits PNG images into sub-images, which is used in the `generate_split_images.py` file.
|
| 947 |
+
|
| 948 |
+
- **MaskStitcher**:
|
| 949 |
+
- A class that stitches individual tile masks into a single, full-resolution segmentation mask.
|
| 950 |
+
|
| 951 |
+
- **OverlayGenerator**:
|
| 952 |
+
- A class that generates overlays and comparisons between the original images and the segmented masks.
|
| 953 |
+
|
| 954 |
+
### 6. Mermaid Diagrams
|
| 955 |
+
|
| 956 |
+
```mermaid
|
| 957 |
+
graph TD
|
| 958 |
+
A[TIFF images] --> B[generate_pngs.py]:::step
|
| 959 |
+
B --> C[generate_split_images.py]:::step
|
| 960 |
+
C --> D[run_cellpose.py]:::step
|
| 961 |
+
D --> E[generate_masks.py]:::step
|
| 962 |
+
E --> F[Final segmentation]:::step
|
| 963 |
+
```
|
| 964 |
+
|
| 965 |
+
This diagram shows the flow of the pipeline from input images to the final segmentation output.
|
| 966 |
+
|
| 967 |
+
### 7. Tables
|
| 968 |
+
|
| 969 |
+
| Component | Description |
|
| 970 |
+
|----------|-------------|
|
| 971 |
+
| `bin/constants.py` | Centralizes paths and tunables for the pipeline. |
|
| 972 |
+
| `model/run_cellpose.py` | Wraps Cellpose API for inference on each tile. |
|
| 973 |
+
| `utils/generate_split_images.py` | Splits PNG images into sub-images. |
|
| 974 |
+
| `utils/generate_masks.py` | Generates segmentation masks and stitches them. |
|
| 975 |
+
| `utils/generate_pngs.py` | Converts TIFF images to PNG format. |
|
| 976 |
+
|
| 977 |
+
### 8. Code Snippets
|
| 978 |
+
|
| 979 |
+
```python
|
| 980 |
+
# Example of a configuration in bin/constants.py
|
| 981 |
+
IMG_HEIGHT, IMG_WIDTH = 640, 640
|
| 982 |
+
SCALING_FACTOR = 0.2125
|
| 983 |
+
CELL_DIAMETER = 30.0
|
| 984 |
+
```
|
| 985 |
+
|
| 986 |
+
```python
|
| 987 |
+
# Example of a pipeline execution in main.py
|
| 988 |
+
python main.py
|
| 989 |
+
```
|
| 990 |
+
|
| 991 |
+
### 9. Source Citations
|
| 992 |
+
|
| 993 |
+
- `bin/constants.py: 12-15` (Configuration values)
|
| 994 |
+
- `model/run_cellpose.py: 10-15` (Pipeline execution logic)
|
| 995 |
+
- `utils/generate_split_images.py: 10-15` (Tile splitting logic)
|
| 996 |
+
- `utils/generate_masks.py: 10-15` (Mask stitching logic)
|
| 997 |
+
- `utils/generate_pngs.py: 10-15` (TIFF to PNG conversion logic)
|
| 998 |
+
|
| 999 |
+
---
|
| 1000 |
+
|
| 1001 |
+
<a id='page-9'></a>
|
| 1002 |
+
|
| 1003 |
+
## Extensibility and Customization
|
| 1004 |
+
|
| 1005 |
+
|
| 1006 |
+
|
| 1007 |
+
|
| 1008 |
+
|
| 1009 |
+
<details>
|
| 1010 |
+
<summary>Relevant source files</summary>
|
| 1011 |
+
|
| 1012 |
+
- README.md
|
| 1013 |
+
- utils/generate_training_split_img_masks.py
|
| 1014 |
+
- utils/generate_training_data.py
|
| 1015 |
+
- utils/generate_split_images.py
|
| 1016 |
+
- utils/generate_pngs.py
|
| 1017 |
+
</details>
|
| 1018 |
+
|
| 1019 |
+
# Extensibility and Customization
|
| 1020 |
+
|
| 1021 |
+
This page explores the extensibility and customization capabilities of the spinal cord cell segmentation pipeline, focusing on how the project allows for flexible configuration, integration with external models, and modular architecture.
|
| 1022 |
+
|
| 1023 |
+
## Introduction
|
| 1024 |
+
|
| 1025 |
+
The project is designed to be highly extensible, allowing users to customize various aspects of the segmentation pipeline. This includes configuring model parameters, adjusting image processing steps, and integrating with external tools or models. The architecture is built around modular components that can be easily extended or replaced without disrupting the overall workflow.
|
| 1026 |
+
|
| 1027 |
+
## Detailed Sections
|
| 1028 |
+
|
| 1029 |
+
### 1. Modular Architecture
|
| 1030 |
+
|
| 1031 |
+
The pipeline is structured around modular components that can be independently configured or replaced. Key components include:
|
| 1032 |
+
|
| 1033 |
+
- **Main Pipeline**: Orchestrates the full workflow from TIFF conversion to final segmentation.
|
| 1034 |
+
- **Model Configuration**: Allows users to specify which Cellpose model to use (e.g., cyto3, cellpose, etc.) and adjust parameters like flow threshold, cell probability, and tile overlap.
|
| 1035 |
+
- **Image Processing**: Provides tools for splitting images into tiles, converting TIFFs to PNGs, and generating overlays for comparison.
|
| 1036 |
+
|
| 1037 |
+
### 2. Model Customization
|
| 1038 |
+
|
| 1039 |
+
The project supports custom model configurations through the `bin/constants.py` file. Users can:
|
| 1040 |
+
|
| 1041 |
+
- Specify the path to a custom Cellpose model.
|
| 1042 |
+
- Adjust parameters like `flow_threshold`, `cellprob_threshold`, and `min_size`.
|
| 1043 |
+
- Configure how images are split into tiles and processed.
|
| 1044 |
+
|
| 1045 |
+
### 3. Image Processing and Splitting
|
| 1046 |
+
|
| 1047 |
+
The pipeline includes tools for image splitting and conversion:
|
| 1048 |
+
|
| 1049 |
+
- **ImageSplitter**: Splits PNG images into sub-images and saves them to an output directory.
|
| 1050 |
+
- **TiffToPngConverter**: Converts TIFF images to PNG format with optional downscaling.
|
| 1051 |
+
- **generate_split_images.py**: Handles the splitting of large images into manageable tiles.
|
| 1052 |
+
|
| 1053 |
+
### 4. Custom Model Integration
|
| 1054 |
+
|
| 1055 |
+
The project allows for the integration of custom models through the `run_cellpose.py` file. Users can:
|
| 1056 |
+
|
| 1057 |
+
- Replace the default `cyto3` model with any other Cellpose model.
|
| 1058 |
+
- Adjust model parameters and behavior through the `constants.py` file.
|
| 1059 |
+
|
| 1060 |
+
### 5. Customization via Configuration
|
| 1061 |
+
|
| 1062 |
+
The pipeline is highly configurable through the `bin/constants.py` file, which contains:
|
| 1063 |
+
|
| 1064 |
+
- Centralized paths and tunables for image processing, model selection, and output directories.
|
| 1065 |
+
- Parameters like `scaling_factor`, `tile_overlap`, and `batch_size`.
|
| 1066 |
+
|
| 1067 |
+
### 6. Extensibility via Plugins
|
| 1068 |
+
|
| 1069 |
+
The project supports plugins and custom tools through the `utils/` directory. Users can:
|
| 1070 |
+
|
| 1071 |
+
- Create custom tools for image processing, segmentation, or output generation.
|
| 1072 |
+
- Extend the pipeline by adding new steps or integrating with external tools.
|
| 1073 |
+
|
| 1074 |
+
### 7. Mermaid Diagram
|
| 1075 |
+
|
| 1076 |
+
```mermaid
|
| 1077 |
+
graph TD
|
| 1078 |
+
A[Main Pipeline] --> B[Image Processing]
|
| 1079 |
+
B --> C[TiffToPngConverter]
|
| 1080 |
+
C --> D[ImageSplitter]
|
| 1081 |
+
D --> E[run_cellpose.py]
|
| 1082 |
+
E --> F[generate_masks.py]
|
| 1083 |
+
F --> G[Final Segmentation]
|
| 1084 |
+
```
|
| 1085 |
+
|
| 1086 |
+
### 8. Tables
|
| 1087 |
+
|
| 1088 |
+
| Feature | Description |
|
| 1089 |
+
|--------|-------------|
|
| 1090 |
+
| Model Selection | Users can choose between different Cellpose models (e.g., cyto3, cellpose, etc.). |
|
| 1091 |
+
| Image Scaling | Allows for optional downscaling of images to speed up processing. |
|
| 1092 |
+
| Tile Configuration | Controls how images are split into tiles and processed. |
|
| 1093 |
+
| Output Directory | Specifies where the output files (PNGs, masks, etc.) are saved. |
|
| 1094 |
+
|
| 1095 |
+
### 9. Code Snippets
|
| 1096 |
+
|
| 1097 |
+
```python
|
| 1098 |
+
# Example of model configuration in constants.py
|
| 1099 |
+
model_path = "path/to/custom_model.pth"
|
| 1100 |
+
```
|
| 1101 |
+
|
| 1102 |
+
```python
|
| 1103 |
+
# Example of image processing in generate_split_images.py
|
| 1104 |
+
sub_w = 512
|
| 1105 |
+
sub_h = 512
|
| 1106 |
+
```
|
| 1107 |
+
|
| 1108 |
+
### 10. Source Citations
|
| 1109 |
+
|
| 1110 |
+
- **Model Configuration**: `bin/constants.py` (line numbers not provided)
|
| 1111 |
+
- **Image Processing**: `utils/generate_split_images.py` (line numbers not provided)
|
| 1112 |
+
- **Pipeline Architecture**: `README.md` (line numbers not provided)
|
| 1113 |
+
- **Custom Model Integration**: `model/run_cellpose.py` (line numbers not provided)
|
| 1114 |
+
- **Configuration Options**: `bin/constants.py` (line numbers not provided)
|
| 1115 |
+
|
| 1116 |
+
---
|
| 1117 |
+
|
generate_training_data.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# imports
|
| 2 |
+
import logging, os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from utils.constants import *
|
| 5 |
+
from utils.generate_split_images import ImageSplitter
|
| 6 |
+
from utils.generate_masks import MaskStitcher
|
| 7 |
+
from utils.generate_pngs import TiffToPngConverter
|
| 8 |
+
from model.run_cellpose import CellposeBatchProcessor
|
| 9 |
+
from utils.generate_image_overlays import OverlayGenerator
|
| 10 |
+
from utils.generate_training_dataset import *
|
| 11 |
+
from utils.generate_training_split_img_masks import split_folder
|
| 12 |
+
|
| 13 |
+
# constants
|
| 14 |
+
TILE_H = TILE_W = 1024 # training tile size
|
| 15 |
+
|
| 16 |
+
# generate - pngs
|
| 17 |
+
setup_logging(logging.INFO)
|
| 18 |
+
converter = TiffToPngConverter(scaling_factor=SCALING_FACTOR, tif_dir=TIF_IMAGES_DIR, output_dir=PNG_IMAGES_DIR)
|
| 19 |
+
converter.convert_all()
|
| 20 |
+
|
| 21 |
+
# generate - splits
|
| 22 |
+
setup_logging(logging.INFO)
|
| 23 |
+
splitter = ImageSplitter(source_dir=PNG_IMAGES_DIR, output_dir=SPLIT_IMAGES_DIR, sub_image_width=IMG_WIDTH, sub_image_height=IMG_HEIGHT)
|
| 24 |
+
splitter.split_all()
|
| 25 |
+
|
| 26 |
+
# generate - masks for training
|
| 27 |
+
setup_logging(logging.INFO)
|
| 28 |
+
os.makedirs(TRAIN_MASKS_DIR, exist_ok=True)
|
| 29 |
+
for image in TIF_IMAGES_DIR.glob("*.tif"):
|
| 30 |
+
img_path = Path(image)
|
| 31 |
+
geojson_path = GEOJSON_DIR / (img_path.stem + ".geojson")
|
| 32 |
+
mask_png = TRAIN_MASKS_DIR / (img_path.stem + "_masks.tif")
|
| 33 |
+
geojson_to_mask_png(img_path, geojson_path, mask_png)
|
| 34 |
+
label_mask = np.array(Image.open(mask_png), dtype=np.uint16)
|
| 35 |
+
make_bw_preview(label_mask, mask_png.with_name(img_path.stem + "_mask_bw.png"))
|
| 36 |
+
make_colored_preview(label_mask, mask_png.with_name(img_path.stem + "_mask_color.png"))
|
| 37 |
+
|
| 38 |
+
# generate - split images and masks for training
|
| 39 |
+
setup_logging(logging.INFO)
|
| 40 |
+
split_folder(TIF_IMAGES_DIR, TRAIN_MASKS_DIR, TRAIN_SPLIT_IMG_MASKS_DIR, TILE_H, TILE_W)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# img_path = Path("/Users/discovery/Downloads/SP24_008_2.ome.tif")
|
| 45 |
+
# geojson_path = Path("/Users/discovery/Downloads/SP24_088_2.geojson")
|
| 46 |
+
# mask_png = Path("/Users/discovery/Downloads/SP24_008_2_mask.tif")
|
| 47 |
+
|
| 48 |
+
|
main.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# imports
|
| 2 |
+
import logging
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from utils.constants import *
|
| 5 |
+
from utils.generate_plots import PlotGenerator
|
| 6 |
+
from utils.generate_split_images import ImageSplitter
|
| 7 |
+
from utils.generate_masks import MaskStitcher
|
| 8 |
+
from utils.generate_combine_masks import NPYMaskStitcher
|
| 9 |
+
from utils.generate_pngs import TiffToPngConverter
|
| 10 |
+
from model.run_cellpose import CellposeBatchProcessor
|
| 11 |
+
from utils.generate_image_overlays import OverlayGenerator
|
| 12 |
+
from model.run_cellpose_sam import cellpose_sam_detect_images_eval
|
| 13 |
+
from utils.generate_geojson_qp_mask import MaskToGeoJSONConverter
|
| 14 |
+
|
| 15 |
+
# generate - pngs
|
| 16 |
+
setup_logging(logging.INFO)
|
| 17 |
+
converter = TiffToPngConverter(scaling_factor=SCALING_FACTOR, tif_dir=TIF_IMAGES_DIR, output_dir=PNG_IMAGES_DIR)
|
| 18 |
+
converter.convert_all()
|
| 19 |
+
|
| 20 |
+
# generate - splits
|
| 21 |
+
setup_logging(logging.INFO)
|
| 22 |
+
splitter = ImageSplitter(source_dir=PNG_IMAGES_DIR, output_dir=SPLIT_IMAGES_DIR, sub_image_width=IMG_WIDTH, sub_image_height=IMG_HEIGHT)
|
| 23 |
+
splitter.split_all()
|
| 24 |
+
|
| 25 |
+
# generate - cellpose masks (detect step using a pre-trained model)
|
| 26 |
+
setup_logging(logging.INFO)
|
| 27 |
+
cellpose_sam_detect_images_eval(model_path=MODEL, image_input_dir=SPLIT_IMAGES_DIR, image_output_dir=CELLPOSE_MASKS_DIR)
|
| 28 |
+
|
| 29 |
+
# generate - stitched masks (.npy files)
|
| 30 |
+
setup_logging(logging.INFO)
|
| 31 |
+
stitcher = NPYMaskStitcher(input_dir=CELLPOSE_MASKS_DIR, output_dir=STITCHED_MASKS_DIR)
|
| 32 |
+
stitcher.stitch_all()
|
| 33 |
+
|
| 34 |
+
# generate - plots
|
| 35 |
+
setup_logging(logging.INFO)
|
| 36 |
+
plotter = PlotGenerator(image_dir=PNG_IMAGES_DIR, mask_dir=STITCHED_MASKS_DIR, output_dir=OUTPUT_DIR, overlay_color=(238,144,144), boundary_color=(100,100,255), alpha=0.5)
|
| 37 |
+
plotter.run()
|
| 38 |
+
|
| 39 |
+
# generate - geojsons
|
| 40 |
+
setup_logging(logging.INFO)
|
| 41 |
+
converter = MaskToGeoJSONConverter(mask_dir=STITCHED_MASKS_DIR, output_dir=GEOJSON_OUTS_DIR, upscale_factor=SCALING_FACTOR)
|
| 42 |
+
converter.convert_all()
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
########## archived code ##########
|
| 47 |
+
# # cellpose - masks
|
| 48 |
+
# setup_logging(logging.INFO)
|
| 49 |
+
# processor = CellposeBatchProcessor(input_dir=SPLIT_IMAGES_DIR, output_dir=CELLPOSE_MASKS_DIR, model_name="",
|
| 50 |
+
# bsize=640, overlap=0.15, batch_size=6, gpu=0, channels=(2,0), diameter=CELL_DIAMETER)
|
| 51 |
+
# processor.process_all()
|
| 52 |
+
|
| 53 |
+
# # stitch - masks
|
| 54 |
+
# setup_logging(logging.INFO)
|
| 55 |
+
# stitcher = MaskStitcher(input_dir=CELLPOSE_MASKS_DIR, output_dir=STITCHED_MASKS_DIR)
|
| 56 |
+
# stitcher.stitch_all()
|
| 57 |
+
|
| 58 |
+
# # img-mask overlay & comparison
|
| 59 |
+
# overlay_gen = OverlayGenerator(original_dir = PNG_IMAGES_DIR, mask_dir = STITCHED_MASKS_DIR, output_dir = OUTPUT_DIR, mask_color = (255, 0, 0), alpha = 0.5)
|
| 60 |
+
# overlay_gen.run()
|
model/__init__.py
ADDED
|
File without changes
|
model/run_cellpose.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Developed by Nikhil Nageshwar Inturi
|
| 4 |
+
|
| 5 |
+
Batch-process .png or .tif slides with Cellpose, saving masks, previews, and segmentation arrays
|
| 6 |
+
into designated directories.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
# imports
|
| 10 |
+
from PIL import Image
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from cellpose import models, io
|
| 13 |
+
from typing import Tuple, Union
|
| 14 |
+
from cellpose import plot as cplt
|
| 15 |
+
import os, numpy as np, logging, matplotlib.pyplot as plt
|
| 16 |
+
|
| 17 |
+
# local imports
|
| 18 |
+
from utils.constants import *
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class CellposeBatchProcessor:
|
| 22 |
+
"""
|
| 23 |
+
Batch-process a directory of images with Cellpose,
|
| 24 |
+
saving outputs in MASKS_DIR, PREVIEW_DIR, and SEGMENTATION_DIR.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(self, input_dir: Union[str, Path], output_dir: Union[str, Path], model_name: str = "cyto3_restore",
|
| 28 |
+
bsize: int = 2048, overlap: float = 0.15, batch_size: int = 6, gpu: int = 0, channels: Tuple[int, int] = (1, 0), diameter: int = 50) -> None:
|
| 29 |
+
self.input_dir = Path(input_dir)
|
| 30 |
+
self.output_dir = Path(output_dir)
|
| 31 |
+
self.model_name = model_name
|
| 32 |
+
self.bsize = bsize
|
| 33 |
+
self.overlap = overlap
|
| 34 |
+
self.batch_size = batch_size
|
| 35 |
+
self.gpu = gpu
|
| 36 |
+
self.channels = list(channels)
|
| 37 |
+
self.diameter = diameter
|
| 38 |
+
|
| 39 |
+
if self.gpu >= 0:
|
| 40 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpu)
|
| 41 |
+
self.logger = logging.getLogger(self.__class__.__name__)
|
| 42 |
+
self.model = models.CellposeModel(gpu=(self.gpu >= 0), pretrained_model=self.model_name)
|
| 43 |
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
| 44 |
+
for sub in (MASKS_DIR, PREVIEW_DIR, SEGMENTATION_DIR):
|
| 45 |
+
dir_path = self.output_dir / sub
|
| 46 |
+
dir_path.mkdir(parents=True, exist_ok=True)
|
| 47 |
+
self.logger.debug(f"Created output directory: {dir_path}")
|
| 48 |
+
|
| 49 |
+
def process_all(self) -> None:
|
| 50 |
+
"""
|
| 51 |
+
Find all .png/.tif images in input_dir and process each.
|
| 52 |
+
"""
|
| 53 |
+
img_paths = sorted(
|
| 54 |
+
list(self.input_dir.glob("*.png")) + list(self.input_dir.glob("*.tif"))
|
| 55 |
+
)
|
| 56 |
+
if not img_paths:
|
| 57 |
+
self.logger.warning(f"No images found in {self.input_dir}")
|
| 58 |
+
return
|
| 59 |
+
|
| 60 |
+
self.logger.info(f"Found {len(img_paths)} images in {self.input_dir}")
|
| 61 |
+
for img_path in img_paths:
|
| 62 |
+
try:
|
| 63 |
+
self._process_image(img_path)
|
| 64 |
+
except Exception:
|
| 65 |
+
self.logger.exception(f"Failed processing {img_path.name}")
|
| 66 |
+
|
| 67 |
+
def _process_image(self, img_path: Path) -> None:
|
| 68 |
+
"""
|
| 69 |
+
Process a single image: segment, save masks, preview, and numpy array.
|
| 70 |
+
"""
|
| 71 |
+
stem = img_path.stem
|
| 72 |
+
self.logger.info(f"Processing: {img_path.name}")
|
| 73 |
+
|
| 74 |
+
img = io.imread(str(img_path))
|
| 75 |
+
if img.ndim == 3 and img[:, :, self.channels[0]].max() == 0:
|
| 76 |
+
self.logger.warning(f"Channel {self.channels[0]} empty — skipping {img_path.name}")
|
| 77 |
+
return
|
| 78 |
+
|
| 79 |
+
masks, flows, styles = self.model.eval(
|
| 80 |
+
img,
|
| 81 |
+
channels=self.channels,
|
| 82 |
+
diameter=self.diameter,
|
| 83 |
+
bsize=self.bsize,
|
| 84 |
+
tile_overlap=self.overlap,
|
| 85 |
+
batch_size=self.batch_size,
|
| 86 |
+
resample=False
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
fig = plt.figure(figsize=(12, 5))
|
| 90 |
+
cplt.show_segmentation(fig, img, masks, flows[0], channels=self.channels)
|
| 91 |
+
plt.tight_layout()
|
| 92 |
+
|
| 93 |
+
preview_path = self.output_dir / PREVIEW_DIR / f"{stem}.png"
|
| 94 |
+
fig.savefig(preview_path, dpi=150, bbox_inches="tight")
|
| 95 |
+
plt.close(fig)
|
| 96 |
+
self.logger.info(f"Saved preview: {preview_path}")
|
| 97 |
+
|
| 98 |
+
mask_path = self.output_dir / MASKS_DIR / f"{stem}.png"
|
| 99 |
+
Image.fromarray(masks.astype("uint16")).save(mask_path)
|
| 100 |
+
self.logger.info(f"Saved mask: {mask_path}")
|
| 101 |
+
|
| 102 |
+
seg_path = self.output_dir / SEGMENTATION_DIR / f"{stem}.npy"
|
| 103 |
+
np.save(seg_path, masks)
|
| 104 |
+
self.logger.info(f"Saved segmentation array: {seg_path}")
|
model/run_cellpose_sam.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# imports
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
import os, re, numpy as np
|
| 4 |
+
from cellpose import models
|
| 5 |
+
from skimage import io as skio
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def cellpose_sam_detect_images_eval(model_path, image_input_dir, image_output_dir, image_ext=".png", flow_threshold=0.9, cellprob_threshold=-6, min_size=1):
|
| 10 |
+
"""
|
| 11 |
+
Detect images using Cellpose SAM.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
model_path (str): Path to the Cellpose SAM model.
|
| 15 |
+
image_input_dir (Path): Directory containing the images.
|
| 16 |
+
image_output_dir (Path): Directory to save the masks.
|
| 17 |
+
image_ext (str): Image file extension.
|
| 18 |
+
flow_threshold (float): Flow threshold for Cellpose SAM.
|
| 19 |
+
cellprob_threshold (float): Cell probability threshold for Cellpose SAM.
|
| 20 |
+
min_size (int): Minimum size for Cellpose SAM.
|
| 21 |
+
"""
|
| 22 |
+
print(image_output_dir)
|
| 23 |
+
image_files = [f for f in image_input_dir.glob("*"+image_ext) if "_masks" not in f.name and "_flows" not in f.name]
|
| 24 |
+
model = models.CellposeModel(gpu=True, pretrained_model=model_path)
|
| 25 |
+
os.makedirs(image_output_dir, exist_ok=True)
|
| 26 |
+
|
| 27 |
+
for image_file in tqdm(image_files, desc="Segmenting images"):
|
| 28 |
+
image_path = os.path.join(image_input_dir, image_file)
|
| 29 |
+
img = skio.imread(image_path)
|
| 30 |
+
masks, flows, styles = model.eval([img], batch_size = 16, flow_threshold=flow_threshold, cellprob_threshold=cellprob_threshold, augment=True, resample=True, min_size=min_size)
|
| 31 |
+
mask = masks[0]
|
| 32 |
+
base_name = Path(image_file).stem
|
| 33 |
+
mask_path = os.path.join(image_output_dir, f"{base_name}.npy")
|
| 34 |
+
np.save(mask_path, mask)
|
notebooks/trained_model_prediction.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pyproject.toml
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[tool.poetry]
|
| 2 |
+
name = "auto_segmenter"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Automated cell segmentation pipeline"
|
| 5 |
+
authors = ["Nikhil Nageshwar Inturi <inturinikhilnageshwar@gmail.com>"]
|
| 6 |
+
|
| 7 |
+
[tool.poetry.dependencies]
|
| 8 |
+
python = "^3.8"
|
| 9 |
+
# list your runtime deps here:
|
| 10 |
+
streamlit = "^1.25.0"
|
| 11 |
+
|
| 12 |
+
[build-system]
|
| 13 |
+
requires = ["poetry-core"]
|
| 14 |
+
build-backend = "poetry.core.masonry.api"
|
requirements.txt
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
altair==5.5.0
|
| 2 |
+
attrs==25.3.0
|
| 3 |
+
blinker==1.9.0
|
| 4 |
+
cachetools==5.5.2
|
| 5 |
+
cellpose==4.0.4
|
| 6 |
+
certifi==2025.4.26
|
| 7 |
+
charset-normalizer==3.4.2
|
| 8 |
+
click==8.2.1
|
| 9 |
+
contourpy==1.3.2
|
| 10 |
+
cycler==0.12.1
|
| 11 |
+
fastremap==1.16.1
|
| 12 |
+
filelock==3.18.0
|
| 13 |
+
fill_voids==2.0.8
|
| 14 |
+
fonttools==4.58.2
|
| 15 |
+
fsspec==2025.5.1
|
| 16 |
+
gitdb==4.0.12
|
| 17 |
+
GitPython==3.1.44
|
| 18 |
+
idna==3.10
|
| 19 |
+
imagecodecs==2025.3.30
|
| 20 |
+
imageio==2.37.0
|
| 21 |
+
Jinja2==3.1.6
|
| 22 |
+
jsonschema==4.24.0
|
| 23 |
+
jsonschema-specifications==2025.4.1
|
| 24 |
+
kiwisolver==1.4.8
|
| 25 |
+
lazy_loader==0.4
|
| 26 |
+
MarkupSafe==3.0.2
|
| 27 |
+
matplotlib==3.10.3
|
| 28 |
+
mpmath==1.3.0
|
| 29 |
+
narwhals==1.42.1
|
| 30 |
+
natsort==8.4.0
|
| 31 |
+
networkx==3.5
|
| 32 |
+
numpy==2.3.0
|
| 33 |
+
nvidia-cublas-cu12==12.6.4.1
|
| 34 |
+
nvidia-cuda-cupti-cu12==12.6.80
|
| 35 |
+
nvidia-cuda-nvrtc-cu12==12.6.77
|
| 36 |
+
nvidia-cuda-runtime-cu12==12.6.77
|
| 37 |
+
nvidia-cudnn-cu12==9.5.1.17
|
| 38 |
+
nvidia-cufft-cu12==11.3.0.4
|
| 39 |
+
nvidia-cufile-cu12==1.11.1.6
|
| 40 |
+
nvidia-curand-cu12==10.3.7.77
|
| 41 |
+
nvidia-cusolver-cu12==11.7.1.2
|
| 42 |
+
nvidia-cusparse-cu12==12.5.4.2
|
| 43 |
+
nvidia-cusparselt-cu12==0.6.3
|
| 44 |
+
nvidia-nccl-cu12==2.26.2
|
| 45 |
+
nvidia-nvjitlink-cu12==12.6.85
|
| 46 |
+
nvidia-nvtx-cu12==12.6.77
|
| 47 |
+
opencv-python-headless==4.11.0.86
|
| 48 |
+
packaging==24.2
|
| 49 |
+
pandas==2.3.0
|
| 50 |
+
pillow==11.2.1
|
| 51 |
+
protobuf==6.31.1
|
| 52 |
+
pyarrow==20.0.0
|
| 53 |
+
pydeck==0.9.1
|
| 54 |
+
pyparsing==3.2.3
|
| 55 |
+
python-dateutil==2.9.0.post0
|
| 56 |
+
pytz==2025.2
|
| 57 |
+
referencing==0.36.2
|
| 58 |
+
requests==2.32.4
|
| 59 |
+
roifile==2025.5.10
|
| 60 |
+
rpds-py==0.25.1
|
| 61 |
+
scikit-image==0.25.2
|
| 62 |
+
scipy==1.15.3
|
| 63 |
+
segment-anything==1.0
|
| 64 |
+
setuptools==80.9.0
|
| 65 |
+
six==1.17.0
|
| 66 |
+
smmap==5.0.2
|
| 67 |
+
streamlit==1.45.1
|
| 68 |
+
sympy==1.14.0
|
| 69 |
+
tenacity==9.1.2
|
| 70 |
+
tifffile==2025.6.11
|
| 71 |
+
toml==0.10.2
|
| 72 |
+
torch==2.7.1
|
| 73 |
+
torchvision==0.22.1
|
| 74 |
+
tornado==6.5.1
|
| 75 |
+
tqdm==4.67.1
|
| 76 |
+
triton==3.3.1
|
| 77 |
+
typing_extensions==4.14.0
|
| 78 |
+
tzdata==2025.2
|
| 79 |
+
urllib3==2.4.0
|
| 80 |
+
watchdog==6.0.0
|
| 81 |
+
wheel==0.45.1
|
streamlit_app.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Streamlit front-end for the Cellpose automation pipeline.
|
| 4 |
+
Allows uploading a TIF, runs conversion → split → cellpose → stitching → overlay/comparison → geojson,
|
| 5 |
+
then displays results and provides download links.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
# imports
|
| 9 |
+
import streamlit as st, logging, shutil
|
| 10 |
+
from PIL import Image
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from utils.constants import *
|
| 13 |
+
from utils.generate_plots import PlotGenerator
|
| 14 |
+
from utils.generate_split_images import ImageSplitter
|
| 15 |
+
from utils.generate_masks import MaskStitcher
|
| 16 |
+
from utils.generate_combine_masks import NPYMaskStitcher
|
| 17 |
+
from utils.generate_pngs import TiffToPngConverter
|
| 18 |
+
from model.run_cellpose import CellposeBatchProcessor
|
| 19 |
+
from utils.generate_image_overlays import OverlayGenerator
|
| 20 |
+
from model.run_cellpose_sam import cellpose_sam_detect_images_eval
|
| 21 |
+
from utils.generate_geojson_qp_mask import MaskToGeoJSONConverter
|
| 22 |
+
|
| 23 |
+
dirs = [TIF_IMAGES_DIR, PNG_IMAGES_DIR, SPLIT_IMAGES_DIR, CELLPOSE_MASKS_DIR, STITCHED_MASKS_DIR, OUTPUT_DIR, GEOJSON_OUTS_DIR]
|
| 24 |
+
|
| 25 |
+
st.title("Cellpose-sam for DRGs - Automated Pipeline")
|
| 26 |
+
|
| 27 |
+
uploaded = st.file_uploader("Upload a TIFF image", type=["tif"])
|
| 28 |
+
if uploaded:
|
| 29 |
+
|
| 30 |
+
for d in dirs:
|
| 31 |
+
p = Path(d)
|
| 32 |
+
if p.exists() and p.is_dir():
|
| 33 |
+
shutil.rmtree(p) # to refresh the directory
|
| 34 |
+
p.mkdir(parents=True, exist_ok=True)
|
| 35 |
+
|
| 36 |
+
tif_path = TIF_IMAGES_DIR / uploaded.name
|
| 37 |
+
with open(tif_path, "wb") as f:
|
| 38 |
+
f.write(uploaded.getbuffer()) # save TIFF
|
| 39 |
+
st.success(f"Saved input to {tif_path}")
|
| 40 |
+
stem = tif_path.stem
|
| 41 |
+
|
| 42 |
+
# generate - pngs
|
| 43 |
+
with st.spinner("Converting TIFF to PNG..."):
|
| 44 |
+
TiffToPngConverter(scaling_factor=SCALING_FACTOR, tif_dir=TIF_IMAGES_DIR, output_dir=PNG_IMAGES_DIR).convert_all()
|
| 45 |
+
# generate - splits
|
| 46 |
+
with st.spinner("Splitting PNG into tiles..."):
|
| 47 |
+
ImageSplitter(source_dir=PNG_IMAGES_DIR, output_dir=SPLIT_IMAGES_DIR, sub_image_width=IMG_WIDTH, sub_image_height=IMG_HEIGHT).split_all()
|
| 48 |
+
# generate - cellpose masks (detect step using a pre-trained model)
|
| 49 |
+
with st.spinner("Running Cellpose segmentation..."):
|
| 50 |
+
cellpose_sam_detect_images_eval(model_path=MODEL, image_input_dir=SPLIT_IMAGES_DIR, image_output_dir=CELLPOSE_MASKS_DIR)
|
| 51 |
+
# generate - stitched masks (.npy files)
|
| 52 |
+
with st.spinner("Stitching masks..."):
|
| 53 |
+
NPYMaskStitcher(input_dir=CELLPOSE_MASKS_DIR, output_dir=STITCHED_MASKS_DIR).stitch_all()
|
| 54 |
+
# generate - plots
|
| 55 |
+
with st.spinner("Generating overlays and comparisons..."):
|
| 56 |
+
PlotGenerator(image_dir=PNG_IMAGES_DIR, mask_dir=STITCHED_MASKS_DIR, output_dir=OUTPUT_DIR, overlay_color=(238,144,144), boundary_color=(100,100,255), alpha=0.5).run()
|
| 57 |
+
# generate - geojsons
|
| 58 |
+
with st.spinner("Generating GeoJSON files..."):
|
| 59 |
+
MaskToGeoJSONConverter(mask_dir=STITCHED_MASKS_DIR, output_dir=GEOJSON_OUTS_DIR, upscale_factor=SCALING_FACTOR).convert_all()
|
| 60 |
+
|
| 61 |
+
st.success("Pipeline complete!")
|
| 62 |
+
|
| 63 |
+
# download buttons
|
| 64 |
+
st.header("Download segmentation masks")
|
| 65 |
+
geojson_file = GEOJSON_OUTS_DIR / f"{stem}.geojson"
|
| 66 |
+
|
| 67 |
+
if geojson_file.exists():
|
| 68 |
+
st.download_button(label="Download .geojson mask", data=open(geojson_file, "rb"), file_name=geojson_file.name)
|
| 69 |
+
|
| 70 |
+
overlay_file = OUTPUT_DIR / f"{stem}_overlay.png"
|
| 71 |
+
if overlay_file.exists():
|
| 72 |
+
st.image(Image.open(overlay_file), caption="{stem} - overlay", use_column_width=True)
|
| 73 |
+
else:
|
| 74 |
+
st.info("Please upload a TIFF image to begin.")
|
utils/__init__.py
ADDED
|
File without changes
|
utils/constants.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# imports
|
| 2 |
+
import logging
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from typing import Union
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def setup_logging(level: Union[int, str] = logging.INFO) -> None:
|
| 9 |
+
"""
|
| 10 |
+
Configure the root logger format and level.
|
| 11 |
+
"""
|
| 12 |
+
logging.basicConfig(format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", level=level)
|
| 13 |
+
|
| 14 |
+
# setup_logging - usage
|
| 15 |
+
# from constants import setup_logging
|
| 16 |
+
# setup_logging(logging.INFO)
|
| 17 |
+
|
| 18 |
+
cp_sam_model = "/mnt/WorkingDos/cellpose_sam/models/cp_sam_hdrg_topoint_model"
|
| 19 |
+
MODEL = cp_sam_model # "cyto3_restore" # "/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/train/models/cellpose_1746568542.462492" # "cyto3_restore"
|
| 20 |
+
SCALING_FACTOR = 0.2125 # 0.10625 # 0.2125
|
| 21 |
+
IMG_HEIGHT, IMG_WIDTH = 1024, 1024 # 640, 640
|
| 22 |
+
CELL_DIAMETER = 30.0
|
| 23 |
+
# CONFIG_DIR = Path('/Users/discovery/Downloads/xenium_testing_jit/ish_hDGR_samples_fr')
|
| 24 |
+
CONFIG_DIR = Path('/mnt/WorkingDos/cellpose_sam/spinal_cord_segmentation/data')
|
| 25 |
+
|
| 26 |
+
TIF_IMAGES_DIR = CONFIG_DIR / '1_tif_images'
|
| 27 |
+
|
| 28 |
+
PNG_IMAGES_DIR = CONFIG_DIR / '2_png_images'
|
| 29 |
+
SPLIT_IMAGES_DIR = CONFIG_DIR / '3_split_images'
|
| 30 |
+
CELLPOSE_MASKS_DIR = CONFIG_DIR / '4_cellpose_masks'
|
| 31 |
+
STITCHED_MASKS_DIR = CONFIG_DIR / '5_stitched_masks'
|
| 32 |
+
OUTPUT_DIR = CONFIG_DIR / '6_output_masks'
|
| 33 |
+
TRAIN_MASKS_DIR = CONFIG_DIR / '7_train_masks'
|
| 34 |
+
TRAIN_SPLIT_IMG_MASKS_DIR = CONFIG_DIR / '8_train_split_img_masks'
|
| 35 |
+
GEOJSON_OUTS_DIR = CONFIG_DIR / '9_geojson_outs'
|
| 36 |
+
|
| 37 |
+
GEOJSON_DIR = Path('/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/geojsons_dir') # training param
|
| 38 |
+
|
| 39 |
+
MASKS_DIR = 'masks'
|
| 40 |
+
PREVIEW_DIR = 'preview'
|
| 41 |
+
SEGMENTATION_DIR = 'segmentation'
|
utils/generate_combine_masks.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Developed by Nikhil Nageshwar Inturi
|
| 4 |
+
|
| 5 |
+
This module provides MaskStitcher for stitching tiled .npy masks
|
| 6 |
+
back into full-size masks, one per original image stem.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import re
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
import numpy as np
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
class NPYMaskStitcher:
|
| 15 |
+
"""
|
| 16 |
+
Scans an input directory for files matching
|
| 17 |
+
<stem>_<row>_<col>.npy, groups them by stem, and
|
| 18 |
+
stitches each group into a single full-size mask.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
TILE_PATTERN = re.compile(r'^(?P<stem>.+)_(?P<row>\d+)_(?P<col>\d+)\.npy$')
|
| 22 |
+
|
| 23 |
+
def __init__(self, input_dir: Path, output_dir: Path) -> None:
|
| 24 |
+
self.input_dir = Path(input_dir)
|
| 25 |
+
self.output_dir = Path(output_dir)
|
| 26 |
+
self.logger = logging.getLogger(self.__class__.__name__)
|
| 27 |
+
self._setup_output_directory()
|
| 28 |
+
|
| 29 |
+
def _setup_output_directory(self) -> None:
|
| 30 |
+
try:
|
| 31 |
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
| 32 |
+
self.logger.debug(f"Output directory ready: {self.output_dir}")
|
| 33 |
+
except Exception as e:
|
| 34 |
+
self.logger.error(f"Could not create output directory {self.output_dir}: {e}")
|
| 35 |
+
raise
|
| 36 |
+
|
| 37 |
+
def stitch_all(self) -> None:
|
| 38 |
+
"""
|
| 39 |
+
Find all .npy tiles, group by stem, and stitch each group.
|
| 40 |
+
"""
|
| 41 |
+
all_files = list(self.input_dir.glob("*.npy"))
|
| 42 |
+
if not all_files:
|
| 43 |
+
self.logger.warning(f"No .npy files found in {self.input_dir}")
|
| 44 |
+
return
|
| 45 |
+
|
| 46 |
+
# group files by stem
|
| 47 |
+
stems = {}
|
| 48 |
+
for p in all_files:
|
| 49 |
+
m = self.TILE_PATTERN.match(p.name)
|
| 50 |
+
if not m:
|
| 51 |
+
self.logger.warning(f"Skipping unrecognized file name: {p.name}")
|
| 52 |
+
continue
|
| 53 |
+
stem = m.group("stem")
|
| 54 |
+
stems.setdefault(stem, []).append(p)
|
| 55 |
+
|
| 56 |
+
for stem, paths in stems.items():
|
| 57 |
+
try:
|
| 58 |
+
self._stitch_stem(stem, paths)
|
| 59 |
+
self.logger.info(f"Stitched mask for '{stem}' → {stem}.npy")
|
| 60 |
+
except Exception:
|
| 61 |
+
self.logger.exception(f"Failed to stitch tiles for '{stem}'")
|
| 62 |
+
|
| 63 |
+
def _stitch_stem(self, stem: str, paths: list[Path]) -> None:
|
| 64 |
+
"""
|
| 65 |
+
Given all tile paths for a single stem, reconstruct the full mask.
|
| 66 |
+
"""
|
| 67 |
+
# load each tile into a dict keyed by (row, col)
|
| 68 |
+
mask_map = {}
|
| 69 |
+
rows = set()
|
| 70 |
+
cols = set()
|
| 71 |
+
|
| 72 |
+
for p in paths:
|
| 73 |
+
m = self.TILE_PATTERN.match(p.name)
|
| 74 |
+
row, col = int(m.group("row")), int(m.group("col"))
|
| 75 |
+
tile = np.load(p)
|
| 76 |
+
mask_map[(row, col)] = tile
|
| 77 |
+
rows.add(row)
|
| 78 |
+
cols.add(col)
|
| 79 |
+
|
| 80 |
+
all_rows = sorted(rows)
|
| 81 |
+
all_cols = sorted(cols)
|
| 82 |
+
|
| 83 |
+
# determine max height per row, max width per col
|
| 84 |
+
row_heights = {r: max(mask_map[(r, c)].shape[0]
|
| 85 |
+
for c in all_cols if (r, c) in mask_map)
|
| 86 |
+
for r in all_rows}
|
| 87 |
+
col_widths = {c: max(mask_map[(r, c)].shape[1]
|
| 88 |
+
for r in all_rows if (r, c) in mask_map)
|
| 89 |
+
for c in all_cols}
|
| 90 |
+
|
| 91 |
+
# compute offsets
|
| 92 |
+
row_offsets = {r: sum(row_heights[rr] for rr in all_rows if rr < r)
|
| 93 |
+
for r in all_rows}
|
| 94 |
+
col_offsets = {c: sum(col_widths[cc] for cc in all_cols if cc < c)
|
| 95 |
+
for c in all_cols}
|
| 96 |
+
|
| 97 |
+
# total dims
|
| 98 |
+
total_h = sum(row_heights.values())
|
| 99 |
+
total_w = sum(col_widths.values())
|
| 100 |
+
|
| 101 |
+
# create canvas
|
| 102 |
+
full_mask = np.zeros((total_h, total_w), dtype=np.uint16)
|
| 103 |
+
|
| 104 |
+
# place tiles
|
| 105 |
+
for (r, c), tile in mask_map.items():
|
| 106 |
+
y0, x0 = row_offsets[r], col_offsets[c]
|
| 107 |
+
h, w = tile.shape
|
| 108 |
+
full_mask[y0:y0+h, x0:x0+w] = tile
|
| 109 |
+
|
| 110 |
+
# save combined mask
|
| 111 |
+
out_path = self.output_dir / f"{stem}.npy"
|
| 112 |
+
np.save(out_path, full_mask)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
# # Path to mask files
|
| 118 |
+
# mask_folder = image_dir # update this
|
| 119 |
+
# mask_files = [f for f in os.listdir(mask_folder) if f.endswith('.npy')]
|
| 120 |
+
|
| 121 |
+
# # Pattern to extract row and column
|
| 122 |
+
# pattern = re.compile(r'_(\d+)_(\d+)\.npy')
|
| 123 |
+
|
| 124 |
+
# # Map to hold each mask and its (row, col)
|
| 125 |
+
# mask_map = {}
|
| 126 |
+
# row_col_set = set()
|
| 127 |
+
|
| 128 |
+
# # Organize masks by (row, col)
|
| 129 |
+
# for f in mask_files:
|
| 130 |
+
# match = pattern.search(f)
|
| 131 |
+
# if match:
|
| 132 |
+
# row = int(match.group(1)) # y
|
| 133 |
+
# col = int(match.group(2)) # x
|
| 134 |
+
# mask = np.load(os.path.join(mask_folder, f))
|
| 135 |
+
# mask_map[(row, col)] = mask
|
| 136 |
+
# row_col_set.add((row, col))
|
| 137 |
+
|
| 138 |
+
# # Determine row and column counts
|
| 139 |
+
# all_rows = sorted({r for r, _ in row_col_set})
|
| 140 |
+
# all_cols = sorted({c for _, c in row_col_set})
|
| 141 |
+
|
| 142 |
+
# # Build a lookup for tile dimensions per row/col
|
| 143 |
+
# row_heights = {}
|
| 144 |
+
# col_widths = {}
|
| 145 |
+
|
| 146 |
+
# for row in all_rows:
|
| 147 |
+
# for col in all_cols:
|
| 148 |
+
# if (row, col) in mask_map:
|
| 149 |
+
# h, w = mask_map[(row, col)].shape
|
| 150 |
+
# row_heights[row] = max(row_heights.get(row, 0), h)
|
| 151 |
+
# col_widths[col] = max(col_widths.get(col, 0), w)
|
| 152 |
+
|
| 153 |
+
# # Compute cumulative row/column positions
|
| 154 |
+
# row_offsets = {r: sum(row_heights[rr] for rr in all_rows if rr < r) for r in all_rows}
|
| 155 |
+
# col_offsets = {c: sum(col_widths[cc] for cc in all_cols if cc < c) for c in all_cols}
|
| 156 |
+
|
| 157 |
+
# # Total dimensions
|
| 158 |
+
# total_height = sum(row_heights[r] for r in all_rows)
|
| 159 |
+
# total_width = sum(col_widths[c] for c in all_cols)
|
| 160 |
+
|
| 161 |
+
# # Create blank canvas
|
| 162 |
+
# combined_mask = np.zeros((total_height, total_width), dtype=np.uint16)
|
| 163 |
+
|
| 164 |
+
# # Stitch masks into the full canvas
|
| 165 |
+
# for (row, col), mask in mask_map.items():
|
| 166 |
+
# y = row_offsets[row]
|
| 167 |
+
# x = col_offsets[col]
|
| 168 |
+
# h, w = mask.shape
|
| 169 |
+
# combined_mask[y:y+h, x:x+w] = mask
|
| 170 |
+
|
| 171 |
+
# # Save result
|
| 172 |
+
# np.save('combined_full_mask_testing_model.npy', combined_mask)
|
utils/generate_geojson_qp_mask.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# utils/mask_to_geojson.py
|
| 2 |
+
#!/usr/bin/env python3
|
| 3 |
+
"""
|
| 4 |
+
Developed by Nikhil Nageshwar Inturi
|
| 5 |
+
|
| 6 |
+
This module converts full-size .npy mask files into GeoJSON polygon files,
|
| 7 |
+
scaling coordinates back to the original image resolution using a scale factor.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import json
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
import numpy as np
|
| 13 |
+
import cv2
|
| 14 |
+
import logging
|
| 15 |
+
|
| 16 |
+
class MaskToGeoJSONConverter:
|
| 17 |
+
"""
|
| 18 |
+
Scans a directory of .npy masks, finds contours for each labeled region,
|
| 19 |
+
scales coordinates by upscale_factor, and writes out a GeoJSON file per mask.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(self, mask_dir: Path, output_dir: Path, upscale_factor: float = 1.0):
|
| 23 |
+
self.mask_dir = Path(mask_dir)
|
| 24 |
+
self.output_dir = Path(output_dir)
|
| 25 |
+
self.upscale = 1/(upscale_factor)
|
| 26 |
+
self.logger = logging.getLogger(self.__class__.__name__)
|
| 27 |
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
| 28 |
+
|
| 29 |
+
def convert_all(self) -> None:
|
| 30 |
+
mask_files = list(self.mask_dir.glob("*.npy"))
|
| 31 |
+
if not mask_files:
|
| 32 |
+
self.logger.warning(f"No .npy mask files found in {self.mask_dir}")
|
| 33 |
+
return
|
| 34 |
+
|
| 35 |
+
for mask_fp in mask_files:
|
| 36 |
+
try:
|
| 37 |
+
self._convert_file(mask_fp)
|
| 38 |
+
self.logger.info(f"Converted {mask_fp.name} to GeoJSON")
|
| 39 |
+
except Exception:
|
| 40 |
+
self.logger.exception(f"Failed to convert {mask_fp.name}")
|
| 41 |
+
|
| 42 |
+
def _convert_file(self, mask_fp: Path) -> None:
|
| 43 |
+
mask = np.load(mask_fp)
|
| 44 |
+
labels = np.unique(mask)
|
| 45 |
+
labels = labels[labels != 0]
|
| 46 |
+
features = []
|
| 47 |
+
|
| 48 |
+
for label in labels:
|
| 49 |
+
binary = (mask == label).astype(np.uint8)
|
| 50 |
+
contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 51 |
+
|
| 52 |
+
for cnt in contours:
|
| 53 |
+
coords = cnt.squeeze().tolist()
|
| 54 |
+
if len(coords) < 3:
|
| 55 |
+
continue
|
| 56 |
+
# scale coordinates back to original resolution
|
| 57 |
+
scaled = [[int(x * self.upscale), int(y * self.upscale)] for [x, y] in coords]
|
| 58 |
+
if scaled[0] != scaled[-1]:
|
| 59 |
+
scaled.append(scaled[0])
|
| 60 |
+
|
| 61 |
+
feature = {
|
| 62 |
+
"type": "Feature",
|
| 63 |
+
"properties": {"label": int(label)},
|
| 64 |
+
"geometry": {
|
| 65 |
+
"type": "Polygon",
|
| 66 |
+
"coordinates": [scaled]
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
features.append(feature)
|
| 70 |
+
|
| 71 |
+
geojson = {"type": "FeatureCollection", "features": features}
|
| 72 |
+
out_fp = self.output_dir / f"{mask_fp.stem}.geojson"
|
| 73 |
+
with open(out_fp, "w") as f:
|
| 74 |
+
json.dump(geojson, f)
|
utils/generate_image_overlays.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Developed by Nikhil Nageshwar Inturi
|
| 4 |
+
|
| 5 |
+
Overlay original PNGs with their corresponding stitched masks,
|
| 6 |
+
then generate side-by-side comparison mosaics.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
# imports
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Union
|
| 12 |
+
from PIL import Image, ImageOps, ImageEnhance
|
| 13 |
+
import os
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class OverlayGenerator:
|
| 17 |
+
"""
|
| 18 |
+
For each image in original_dir, find matching mask in mask_dir,
|
| 19 |
+
create an overlay with transparency, and a side-by-side composite.
|
| 20 |
+
"""
|
| 21 |
+
def __init__(self, original_dir: Union[str, Path], mask_dir: Union[str, Path], output_dir: Union[str, Path],
|
| 22 |
+
mask_color: tuple = (255, 0, 0), alpha: float = 0.8) -> None:
|
| 23 |
+
self.original_dir = Path(original_dir)
|
| 24 |
+
self.mask_dir = Path(mask_dir)
|
| 25 |
+
self.output_dir = Path(output_dir)
|
| 26 |
+
self.mask_color = mask_color
|
| 27 |
+
self.alpha = alpha
|
| 28 |
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
| 29 |
+
|
| 30 |
+
def run(self) -> None:
|
| 31 |
+
pngs = list(self.original_dir.glob("*.png"))
|
| 32 |
+
for orig_path in pngs:
|
| 33 |
+
stem = orig_path.stem
|
| 34 |
+
mask_path = self.mask_dir / f"{stem}_mask_stitched.png"
|
| 35 |
+
if not mask_path.exists():
|
| 36 |
+
print(f"Warning: mask not found for {stem}")
|
| 37 |
+
continue
|
| 38 |
+
self._make_overlay(orig_path, mask_path)
|
| 39 |
+
self._make_comparison(orig_path, mask_path)
|
| 40 |
+
|
| 41 |
+
def _make_overlay(self, orig_path: Path, mask_path: Path) -> None:
|
| 42 |
+
orig = Image.open(orig_path).convert("RGBA")
|
| 43 |
+
mask = Image.open(mask_path).convert("L")
|
| 44 |
+
# if mask.size != orig.size:
|
| 45 |
+
# mask = mask.resize(orig.size, resample=Image.NEAREST)
|
| 46 |
+
color_mask = Image.new("RGBA", orig.size, self.mask_color + (0,))
|
| 47 |
+
color_mask.putalpha(ImageEnhance.Brightness(mask).enhance(self.alpha))
|
| 48 |
+
overlay = Image.alpha_composite(orig, color_mask)
|
| 49 |
+
out_path = self.output_dir / f"{orig_path.stem}_overlay.png"
|
| 50 |
+
overlay.save(out_path)
|
| 51 |
+
|
| 52 |
+
def _make_comparison(self, orig_path: Path, mask_path: Path) -> None:
|
| 53 |
+
orig = Image.open(orig_path).convert("RGB")
|
| 54 |
+
mask = Image.open(mask_path).convert("RGB")
|
| 55 |
+
if orig.size != mask.size:
|
| 56 |
+
mask = mask.resize(orig.size, resample=Image.NEAREST)
|
| 57 |
+
comp = Image.new("RGB", (orig.width * 2, orig.height))
|
| 58 |
+
comp.paste(orig, (0, 0))
|
| 59 |
+
comp.paste(mask, (orig.width, 0))
|
| 60 |
+
out_path = self.output_dir / f"{orig_path.stem}_compare.png"
|
| 61 |
+
comp.save(out_path)
|
utils/generate_masks.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Developed by Nikhil Nageshwar Inturi
|
| 4 |
+
|
| 5 |
+
Stitch tiled mask .npy files and mask PNGs into mosaics,
|
| 6 |
+
based on a Cellpose output root (4_cellpose_masks) containing
|
| 7 |
+
'segmentation' and 'masks' subfolders, ignoring 'preview'.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
# imports
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
import logging, numpy as np, tifffile
|
| 14 |
+
# local imports
|
| 15 |
+
from utils.constants import *
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class MaskStitcher:
|
| 19 |
+
"""
|
| 20 |
+
Stitch both .npy masks and mask PNGs from a Cellpose output root:
|
| 21 |
+
- Expects root with subfolders SEGMENTATION_DIR (.npy) and MASKS_DIR (.png)
|
| 22 |
+
- Outputs mosaics in STITCHED_MASKS_DIR
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self, input_dir: Path, output_dir: Path = None) -> None:
|
| 26 |
+
self.input_dir = Path(input_dir)
|
| 27 |
+
self.seg_dir = self.input_dir / SEGMENTATION_DIR
|
| 28 |
+
self.png_dir = self.input_dir / MASKS_DIR
|
| 29 |
+
self.output_dir = Path(output_dir) if output_dir is not None else Path(STITCHED_MASKS_DIR)
|
| 30 |
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
| 31 |
+
self.logger = logging.getLogger(self.__class__.__name__)
|
| 32 |
+
|
| 33 |
+
@staticmethod
|
| 34 |
+
def _parse(fname: str):
|
| 35 |
+
stem = Path(fname).stem
|
| 36 |
+
base, row, col = stem.rsplit("_", 2)
|
| 37 |
+
return base, int(row), int(col)
|
| 38 |
+
|
| 39 |
+
def _read_npy(self, fp: Path) -> np.ndarray:
|
| 40 |
+
data = np.load(fp, allow_pickle=True)
|
| 41 |
+
if data.dtype == object:
|
| 42 |
+
data = data.item().get('masks')
|
| 43 |
+
return data.astype(np.int32, copy=False)
|
| 44 |
+
|
| 45 |
+
def _read_png(self, fp: Path) -> np.ndarray:
|
| 46 |
+
arr = np.array(Image.open(fp))
|
| 47 |
+
return arr.astype(np.int32, copy=False)
|
| 48 |
+
|
| 49 |
+
def _groups(self, directory: Path, pattern: str):
|
| 50 |
+
groups = {}
|
| 51 |
+
for fp in directory.glob(pattern):
|
| 52 |
+
base, _, _ = self._parse(fp.name)
|
| 53 |
+
groups.setdefault(base, []).append(fp)
|
| 54 |
+
return groups
|
| 55 |
+
|
| 56 |
+
def _layout(self, files, read_func):
|
| 57 |
+
row_h = {}
|
| 58 |
+
col_w = {}
|
| 59 |
+
for fp in files:
|
| 60 |
+
_, r, c = self._parse(fp.name)
|
| 61 |
+
h, w = read_func(fp).shape
|
| 62 |
+
row_h[r] = max(row_h.get(r, 0), h)
|
| 63 |
+
col_w[c] = max(col_w.get(c, 0), w)
|
| 64 |
+
y_off = {}
|
| 65 |
+
x_off = {}
|
| 66 |
+
y = x = 0
|
| 67 |
+
for r in sorted(row_h):
|
| 68 |
+
y_off[r] = y
|
| 69 |
+
y += row_h[r]
|
| 70 |
+
for c in sorted(col_w):
|
| 71 |
+
x_off[c] = x
|
| 72 |
+
x += col_w[c]
|
| 73 |
+
return y_off, x_off, y, x
|
| 74 |
+
|
| 75 |
+
def _stitch(self, files, read_func):
|
| 76 |
+
y_off, x_off, H, W = self._layout(files, read_func)
|
| 77 |
+
mosaic = np.zeros((H, W), dtype=np.int32)
|
| 78 |
+
next_lbl = 1
|
| 79 |
+
for fp in files:
|
| 80 |
+
_, r, c = self._parse(fp.name)
|
| 81 |
+
tile = read_func(fp)
|
| 82 |
+
for lbl in np.unique(tile)[1:]:
|
| 83 |
+
mask_region = (tile == lbl)
|
| 84 |
+
yy = y_off[r]
|
| 85 |
+
xx = x_off[c]
|
| 86 |
+
region = mosaic[yy:yy+tile.shape[0], xx:xx+tile.shape[1]]
|
| 87 |
+
region[mask_region] = next_lbl
|
| 88 |
+
next_lbl += 1
|
| 89 |
+
return mosaic
|
| 90 |
+
|
| 91 |
+
def stitch_all(self) -> None:
|
| 92 |
+
seg_groups = self._groups(self.seg_dir, "*.npy")
|
| 93 |
+
for base, files in seg_groups.items():
|
| 94 |
+
self.logger.info(f"Stitching segmentation for '{base}' ")
|
| 95 |
+
mosaic = self._stitch(files, self._read_npy)
|
| 96 |
+
out_npy = self.output_dir / f"{base}_stitched.npy"
|
| 97 |
+
np.save(out_npy, mosaic)
|
| 98 |
+
self.logger.info(f"Saved stitched .npy: {out_npy}")
|
| 99 |
+
out_tif = self.output_dir / f"{base}_stitched.tif"
|
| 100 |
+
tifffile.imwrite(out_tif, (mosaic>0).astype(np.uint8)*255, photometric="minisblack")
|
| 101 |
+
self.logger.info(f"Saved stitched TIFF: {out_tif}")
|
| 102 |
+
|
| 103 |
+
png_groups = self._groups(self.png_dir, "*.png")
|
| 104 |
+
for base, files in png_groups.items():
|
| 105 |
+
self.logger.info(f"Stitching mask PNGs for '{base}' ")
|
| 106 |
+
mosaic = self._stitch(files, self._read_png)
|
| 107 |
+
out_png = self.output_dir / f"{base}_mask_stitched.png"
|
| 108 |
+
Image.fromarray(mosaic.astype(np.uint16)).save(out_png)
|
| 109 |
+
self.logger.info(f"Saved stitched mask PNG: {out_png}")
|
utils/generate_metrics.py
ADDED
|
File without changes
|
utils/generate_plots.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Developed by Nikhil Nageshwar Inturi
|
| 4 |
+
|
| 5 |
+
This module provides PlotGenerator to process all masks in a directory:
|
| 6 |
+
- For each mask, find its image by matching name stem, then output:
|
| 7 |
+
1) a binary mask PNG,
|
| 8 |
+
2) an overlay PNG with colored mask + boundaries.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
+
import numpy as np
|
| 13 |
+
from PIL import Image
|
| 14 |
+
from skimage.segmentation import find_boundaries
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
import logging
|
| 17 |
+
|
| 18 |
+
class PlotGenerator:
|
| 19 |
+
"""
|
| 20 |
+
Process every .npy mask in mask_dir and generate
|
| 21 |
+
corresponding plots using images from image_dir.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
image_dir: Path,
|
| 27 |
+
mask_dir: Path,
|
| 28 |
+
output_dir: Path,
|
| 29 |
+
overlay_color: tuple[int,int,int] = (238,144,144),
|
| 30 |
+
boundary_color: tuple[int,int,int] = (100,100,255),
|
| 31 |
+
alpha: float = 0.5
|
| 32 |
+
) -> None:
|
| 33 |
+
self.image_dir = Path(image_dir)
|
| 34 |
+
self.mask_dir = Path(mask_dir)
|
| 35 |
+
self.output_dir = Path(output_dir)
|
| 36 |
+
self.overlay_color = np.array(overlay_color, dtype=np.uint8)
|
| 37 |
+
self.boundary_color = np.array(boundary_color, dtype=np.uint8)
|
| 38 |
+
self.alpha = alpha
|
| 39 |
+
self.logger = logging.getLogger(self.__class__.__name__)
|
| 40 |
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
| 41 |
+
|
| 42 |
+
def run(self) -> None:
|
| 43 |
+
mask_paths = list(self.mask_dir.glob("*.npy"))
|
| 44 |
+
if not mask_paths:
|
| 45 |
+
self.logger.warning(f"No .npy masks found in {self.mask_dir}")
|
| 46 |
+
return
|
| 47 |
+
|
| 48 |
+
for mask_path in mask_paths:
|
| 49 |
+
stem = mask_path.stem
|
| 50 |
+
img_candidates = list(self.image_dir.glob(f"{stem}*.png"))
|
| 51 |
+
if not img_candidates:
|
| 52 |
+
self.logger.warning(f"No image found for mask '{stem}'")
|
| 53 |
+
continue
|
| 54 |
+
image_path = img_candidates[0]
|
| 55 |
+
|
| 56 |
+
img = np.array(Image.open(image_path).convert("RGB"))
|
| 57 |
+
mask = np.load(mask_path)
|
| 58 |
+
|
| 59 |
+
# binary mask plot
|
| 60 |
+
binary = (mask > 0).astype(np.uint8)
|
| 61 |
+
plt.figure(figsize=(10,10))
|
| 62 |
+
plt.imshow(binary, cmap='gray')
|
| 63 |
+
plt.axis('off')
|
| 64 |
+
plt.title(f"{stem} - Binary Mask")
|
| 65 |
+
out_gray = self.output_dir / f"{stem}_binary.png"
|
| 66 |
+
plt.savefig(out_gray, bbox_inches='tight', dpi=300)
|
| 67 |
+
plt.close()
|
| 68 |
+
self.logger.info(f"Saved binary mask plot: {out_gray.name}")
|
| 69 |
+
|
| 70 |
+
# overlay with boundaries
|
| 71 |
+
overlay = img.copy()
|
| 72 |
+
mask_bool = mask > 0
|
| 73 |
+
overlay[mask_bool] = (
|
| 74 |
+
(1 - self.alpha) * img[mask_bool] + self.alpha * self.overlay_color
|
| 75 |
+
).astype(np.uint8)
|
| 76 |
+
boundaries = find_boundaries(mask_bool, mode='outer')
|
| 77 |
+
overlay[boundaries] = self.boundary_color
|
| 78 |
+
|
| 79 |
+
plt.figure(figsize=(10,10))
|
| 80 |
+
plt.imshow(overlay)
|
| 81 |
+
plt.axis('off')
|
| 82 |
+
plt.title(f"{stem} - Mask Overlay")
|
| 83 |
+
out_overlay = self.output_dir / f"{stem}_overlay.png"
|
| 84 |
+
plt.savefig(out_overlay, bbox_inches='tight', dpi=300)
|
| 85 |
+
plt.close()
|
| 86 |
+
self.logger.info(f"Saved overlay plot: {out_overlay.name}")
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# # main.py snippet (to run plots for all masks)
|
| 90 |
+
# from utils.generate_plots import PlotGenerator
|
| 91 |
+
|
| 92 |
+
# plotter = PlotGenerator(
|
| 93 |
+
# image_dir=PNG_IMAGES_DIR,
|
| 94 |
+
# mask_dir=STITCHED_MASKS_DIR,
|
| 95 |
+
# output_dir=OUTPUT_DIR,
|
| 96 |
+
# overlay_color=(238,144,144),
|
| 97 |
+
# boundary_color=(100,100,255),
|
| 98 |
+
# alpha=0.5
|
| 99 |
+
# )
|
| 100 |
+
# plotter.run()
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# import matplotlib.pyplot as plt
|
| 106 |
+
# import numpy as np
|
| 107 |
+
|
| 108 |
+
# # Create a binary mask (optional: if combined_mask contains labels like 1,2,3...)
|
| 109 |
+
# binary_mask = (combined_mask > 0).astype(np.uint8)
|
| 110 |
+
|
| 111 |
+
# plt.figure(figsize=(10, 10))
|
| 112 |
+
# plt.imshow(binary_mask, cmap='gray') # all non-zero values will be gray
|
| 113 |
+
# plt.title('Combined Mask - Single Color')
|
| 114 |
+
# plt.axis('off')
|
| 115 |
+
|
| 116 |
+
# # Save the figure as PNG
|
| 117 |
+
# plt.savefig('combined_mask_grey_testing_model.png', bbox_inches='tight', dpi=300)
|
| 118 |
+
|
| 119 |
+
# # Show the plot
|
| 120 |
+
# plt.show()
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
# from PIL import Image
|
| 126 |
+
# import numpy as np
|
| 127 |
+
|
| 128 |
+
# image = Image.open("jayden_img.ome.png").convert("RGB") # PNG_IMAGES_DIR = CONFIG_DIR / '2_png_images'
|
| 129 |
+
# image_np = np.array(image)
|
| 130 |
+
# mask = np.load("combined_full_mask_testing_model.npy") # STITCHED_MASKS_DIR = CONFIG_DIR / '5_stitched_masks'
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
# import numpy as np
|
| 137 |
+
# import matplotlib.pyplot as plt
|
| 138 |
+
# from PIL import Image
|
| 139 |
+
# from skimage.segmentation import find_boundaries
|
| 140 |
+
|
| 141 |
+
# # Define colors
|
| 142 |
+
# overlay_color = np.array([238, 144, 144]) # Light green
|
| 143 |
+
# boundary_color = np.array([100, 100, 255]) # Navy blue
|
| 144 |
+
# alpha = 0.5 # Transparency for overlay
|
| 145 |
+
|
| 146 |
+
# # Ensure mask is binary
|
| 147 |
+
# mask = (mask > 0).astype(np.uint8)
|
| 148 |
+
|
| 149 |
+
# # Create a copy for overlay
|
| 150 |
+
# overlay = image_np.copy()
|
| 151 |
+
|
| 152 |
+
# # Apply overlay color where mask is 1
|
| 153 |
+
# overlay[mask == 1] = ((1 - alpha) * image_np[mask == 1] + alpha * overlay_color).astype(np.uint8)
|
| 154 |
+
|
| 155 |
+
# # --- Add navy blue boundaries ---
|
| 156 |
+
# from skimage.segmentation import find_boundaries
|
| 157 |
+
|
| 158 |
+
# # Find boundaries in the mask
|
| 159 |
+
# boundaries = find_boundaries(mask, mode='outer')
|
| 160 |
+
|
| 161 |
+
# # Draw boundary color
|
| 162 |
+
# overlay[boundaries] = boundary_color
|
| 163 |
+
|
| 164 |
+
# # Show plot
|
| 165 |
+
# plt.figure(figsize=(10, 10))
|
| 166 |
+
# plt.imshow(overlay)
|
| 167 |
+
# plt.axis("off")
|
| 168 |
+
# plt.title("Image with Mask Overlay and Navy Blue Boundary")
|
| 169 |
+
# plt.show()
|
| 170 |
+
|
| 171 |
+
# # Save the image
|
| 172 |
+
# output = Image.fromarray(overlay)
|
| 173 |
+
# output.save("0_image_with_mask_overlay_with_white_boundary_model.png")
|
| 174 |
+
|
| 175 |
+
# # output dir where pltos needs to be saved: OUTPUT_DIR = CONFIG_DIR / '6_output_masks'
|
utils/generate_pngs.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Developed by Nikhil Nageshwar Inturi
|
| 4 |
+
|
| 5 |
+
This module provides TiffToPngConverter for converting TIFF images to PNG format,
|
| 6 |
+
applying a scaling factor to resize the images.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
# imports
|
| 10 |
+
import logging, tifffile
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import Union
|
| 13 |
+
from PIL import Image
|
| 14 |
+
# local imports
|
| 15 |
+
from utils.constants import *
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class TiffToPngConverter:
|
| 19 |
+
"""
|
| 20 |
+
Converts all TIFF images in a source directory to PNG format,
|
| 21 |
+
applying a scaling factor to resize the images.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, scaling_factor: float, tif_dir: Union[str, Path], output_dir: Union[str, Path]) -> None:
|
| 25 |
+
self.scaling_factor = scaling_factor
|
| 26 |
+
self.tif_dir = Path(tif_dir)
|
| 27 |
+
self.output_dir = Path(output_dir)
|
| 28 |
+
self.logger = logging.getLogger(self.__class__.__name__)
|
| 29 |
+
self._setup_output_directory()
|
| 30 |
+
|
| 31 |
+
def _setup_output_directory(self) -> None:
|
| 32 |
+
try:
|
| 33 |
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
| 34 |
+
self.logger.debug(f"Output directory ready: {self.output_dir}")
|
| 35 |
+
except Exception as e:
|
| 36 |
+
self.logger.error(f"Failed to create output directory {self.output_dir}: {e}")
|
| 37 |
+
raise
|
| 38 |
+
|
| 39 |
+
def convert_all(self) -> None:
|
| 40 |
+
"""
|
| 41 |
+
Convert all .tif files in the source directory.
|
| 42 |
+
"""
|
| 43 |
+
tif_files = list(self.tif_dir.glob("*.tif"))
|
| 44 |
+
if not tif_files:
|
| 45 |
+
self.logger.warning(f"No .tif files found in {self.tif_dir}")
|
| 46 |
+
return
|
| 47 |
+
|
| 48 |
+
for tif_path in tif_files:
|
| 49 |
+
try:
|
| 50 |
+
self.convert_file(tif_path)
|
| 51 |
+
except Exception:
|
| 52 |
+
self.logger.exception(f"Error converting file: {tif_path}")
|
| 53 |
+
|
| 54 |
+
def convert_file(self, tif_path: Path) -> None:
|
| 55 |
+
"""
|
| 56 |
+
Convert a single TIFF file to PNG, resizing by the scaling factor.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
tif_path: Path to the input .tif file.
|
| 60 |
+
"""
|
| 61 |
+
img_array = tifffile.imread(str(tif_path), level=0)
|
| 62 |
+
self.logger.debug(f"Read {tif_path.name} with shape {img_array.shape}")
|
| 63 |
+
img = Image.fromarray(img_array)
|
| 64 |
+
new_size = (int(img_array.shape[1] * self.scaling_factor), int(img_array.shape[0] * self.scaling_factor))
|
| 65 |
+
img_resized = img.resize(new_size, resample=Image.LANCZOS)
|
| 66 |
+
output_path = self.output_dir / tif_path.with_suffix(".png").name
|
| 67 |
+
img_resized.save(output_path, format="PNG")
|
| 68 |
+
self.logger.info(f"Converted {tif_path.name} to {output_path}")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# testing
|
| 72 |
+
# setup_logging(logging.INFO)
|
| 73 |
+
# converter = TiffToPngConverter(0.2125, 'path/to/tifs', 'path/to/output')
|
| 74 |
+
# converter.convert_all()
|
utils/generate_split_images.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Developed by Nikhil Nageshwar Inturi
|
| 4 |
+
|
| 5 |
+
This module provides ImageSplitter for splitting PNG images into
|
| 6 |
+
equal-sized sub-images and saving them to an output directory.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
# imports
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
import numpy as np, cv2, logging
|
| 12 |
+
from PIL import Image
|
| 13 |
+
# local imports
|
| 14 |
+
from utils.constants import setup_logging
|
| 15 |
+
Image.MAX_IMAGE_PIXELS = None
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class ImageSplitter:
|
| 19 |
+
"""
|
| 20 |
+
Splits all PNG images in a source directory into sub-images of specified width and height.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, source_dir: Path, output_dir: Path, sub_image_width: int, sub_image_height: int) -> None:
|
| 24 |
+
self.source_dir = Path(source_dir)
|
| 25 |
+
self.output_dir = Path(output_dir)
|
| 26 |
+
self.sub_w = sub_image_width
|
| 27 |
+
self.sub_h = sub_image_height
|
| 28 |
+
self.logger = logging.getLogger(self.__class__.__name__)
|
| 29 |
+
self._setup_output_directory()
|
| 30 |
+
|
| 31 |
+
def _setup_output_directory(self) -> None:
|
| 32 |
+
try:
|
| 33 |
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
| 34 |
+
self.logger.debug(f"Output directory ready: {self.output_dir}")
|
| 35 |
+
except Exception as e:
|
| 36 |
+
self.logger.error(f"Failed to create output directory {self.output_dir}: {e}")
|
| 37 |
+
raise
|
| 38 |
+
|
| 39 |
+
def split_all(self) -> None:
|
| 40 |
+
"""
|
| 41 |
+
Iterate over all PNG files in source_dir and split them.
|
| 42 |
+
"""
|
| 43 |
+
png_files = list(self.source_dir.glob("*.png"))
|
| 44 |
+
if not png_files:
|
| 45 |
+
self.logger.warning(f"No .png files found in {self.source_dir}")
|
| 46 |
+
return
|
| 47 |
+
|
| 48 |
+
for png_file in png_files:
|
| 49 |
+
try:
|
| 50 |
+
self.split_file(png_file)
|
| 51 |
+
except Exception:
|
| 52 |
+
self.logger.exception(f"Error splitting file: {png_file}")
|
| 53 |
+
|
| 54 |
+
def split_file(self, png_path: Path) -> None:
|
| 55 |
+
"""
|
| 56 |
+
Split a single PNG image into sub-images.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
png_path: Path to the input .png file.
|
| 60 |
+
"""
|
| 61 |
+
with Image.open(png_path) as pil_img:
|
| 62 |
+
img = np.array(pil_img)
|
| 63 |
+
self.logger.debug(f"Loaded {png_path.name} with shape {img.shape}")
|
| 64 |
+
|
| 65 |
+
height, width = img.shape[:2]
|
| 66 |
+
cols = (width + self.sub_w - 1) // self.sub_w
|
| 67 |
+
rows = (height + self.sub_h - 1) // self.sub_h
|
| 68 |
+
|
| 69 |
+
for row in range(rows):
|
| 70 |
+
for col in range(cols):
|
| 71 |
+
x0 = col * self.sub_w
|
| 72 |
+
y0 = row * self.sub_h
|
| 73 |
+
x1 = min(x0 + self.sub_w, width)
|
| 74 |
+
y1 = min(y0 + self.sub_h, height)
|
| 75 |
+
sub_img = img[y0:y1, x0:x1]
|
| 76 |
+
|
| 77 |
+
output_name = f"{png_path.stem}_{row}_{col}.png"
|
| 78 |
+
output_path = self.output_dir / output_name
|
| 79 |
+
# success = cv2.imwrite(str(output_path), sub_img)
|
| 80 |
+
sub_img_bgr = cv2.cvtColor(sub_img, cv2.COLOR_RGB2BGR)
|
| 81 |
+
success = cv2.imwrite(str(output_path), sub_img_bgr)
|
| 82 |
+
if success:
|
| 83 |
+
self.logger.info(f"Saved sub-image: {output_name}")
|
| 84 |
+
else:
|
| 85 |
+
self.logger.error(f"Failed to save sub-image: {output_name}")
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# testing
|
| 89 |
+
# from bin.constants import setup_logging
|
| 90 |
+
# from bin.generate_split_images import ImageSplitter
|
| 91 |
+
# setup_logging(logging.INFO)
|
| 92 |
+
# splitter = ImageSplitter(source_dir=Path("path/to/pngs"), output_dir=Path("path/to/splits"), sub_image_width=640, sub_image_height=640)
|
| 93 |
+
# splitter.split_all()
|
utils/generate_training_dataset.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Create a uint16 mask PNG from a GeoJSON file and write two down‑sampled
|
| 3 |
+
previews (black‑and‑white and random‑colour) for easy visual inspection.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
import warnings, numpy as np, geopandas as gpd, rasterio, cv2
|
| 8 |
+
from shapely.geometry import Polygon, MultiPolygon
|
| 9 |
+
from PIL import Image
|
| 10 |
+
from tifffile import imread
|
| 11 |
+
from utils.constants import *
|
| 12 |
+
Image.MAX_IMAGE_PIXELS = None
|
| 13 |
+
warnings.filterwarnings("ignore", category=Image.DecompressionBombWarning)
|
| 14 |
+
|
| 15 |
+
def geojson_to_mask_png(image_path: Path, geojson_path: Path, mask_out: Path) -> None:
|
| 16 |
+
"""
|
| 17 |
+
Rasterize GeoJSON polygons into a uint16 label mask PNG.
|
| 18 |
+
"""
|
| 19 |
+
with rasterio.open(image_path) as src:
|
| 20 |
+
transform = src.transform
|
| 21 |
+
height, width = src.height, src.width
|
| 22 |
+
crs = src.crs
|
| 23 |
+
|
| 24 |
+
mask = np.zeros((height, width), dtype=np.uint16)
|
| 25 |
+
gdf = gpd.read_file(geojson_path)
|
| 26 |
+
|
| 27 |
+
if crs is not None and gdf.crs is not None and gdf.crs != crs:
|
| 28 |
+
gdf = gdf.to_crs(crs)
|
| 29 |
+
|
| 30 |
+
def world_to_px(tx, x, y):
|
| 31 |
+
c, r = ~tx * (x, y)
|
| 32 |
+
return int(round(c)), int(round(r))
|
| 33 |
+
|
| 34 |
+
for idx, geom in enumerate(gdf.geometry, start=1):
|
| 35 |
+
polys = geom.geoms if isinstance(geom, MultiPolygon) else [geom]
|
| 36 |
+
for poly in polys:
|
| 37 |
+
pts = np.array(
|
| 38 |
+
[world_to_px(transform, x, y) for x, y in poly.exterior.coords],
|
| 39 |
+
dtype=np.int32
|
| 40 |
+
)
|
| 41 |
+
cv2.fillPoly(mask, [pts], color=idx)
|
| 42 |
+
|
| 43 |
+
cv2.imwrite(str(mask_out), mask)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def make_bw_preview(label_mask: np.ndarray, out_png: Path, downsample: int = 8):
|
| 47 |
+
preview = (label_mask > 0).astype(np.uint8) * 255
|
| 48 |
+
if downsample > 1:
|
| 49 |
+
preview = preview[::downsample, ::downsample]
|
| 50 |
+
Image.fromarray(preview, mode="L").save(out_png)
|
| 51 |
+
print(f"saved B/W preview → {out_png}")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def make_colored_preview(label_mask: np.ndarray, out_png: Path, downsample: int = 8):
|
| 55 |
+
rng = np.random.default_rng(42)
|
| 56 |
+
lut = np.vstack(
|
| 57 |
+
[np.zeros((1, 3), np.uint8),
|
| 58 |
+
rng.integers(0, 256, size=(label_mask.max(), 3), dtype=np.uint8)]
|
| 59 |
+
)
|
| 60 |
+
rgb = lut[label_mask]
|
| 61 |
+
if downsample > 1:
|
| 62 |
+
rgb = rgb[::downsample, ::downsample]
|
| 63 |
+
Image.fromarray(rgb, mode="RGB").save(out_png)
|
| 64 |
+
print(f"saved colour preview → {out_png}")
|
utils/generate_training_split_img_masks.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Split every TIFF in IMG_DIR and its matching mask in MASK_DIR into 640×640 or 1024x1024 tiles.
|
| 4 |
+
Image tiles are saved as 8‑bit TIFFs: mask tiles are saved
|
| 5 |
+
as 16‑bit TIFFs with “_mask” suffix so Cellpose can pair them automatically.
|
| 6 |
+
|
| 7 |
+
Result:
|
| 8 |
+
OUT_DIR/
|
| 9 |
+
<stem>_0_0.tif
|
| 10 |
+
<stem>_0_0_mask.tif
|
| 11 |
+
<stem>_0_1.tif
|
| 12 |
+
<stem>_0_1_mask.tif
|
| 13 |
+
...
|
| 14 |
+
|
| 15 |
+
Author : Nikhil Nageshwar Inturi (modified to handle separate img/mask dirs)
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
# imports
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
import logging, numpy as np, tifffile, cv2
|
| 21 |
+
from utils.constants import setup_logging
|
| 22 |
+
|
| 23 |
+
setup_logging(logging.INFO)
|
| 24 |
+
|
| 25 |
+
# IMG_DIR = Path("/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/1_tif_images")
|
| 26 |
+
# MASK_DIR = Path("/Users/discovery/Downloads/xenium_testing_jit/spinal_cord_samples_fr/7_mask_images")
|
| 27 |
+
# OUT_DIR = IMG_DIR.parent / "8_split_masks"
|
| 28 |
+
|
| 29 |
+
# TILE_H = TILE_W = 1024
|
| 30 |
+
|
| 31 |
+
def read_tif(path: Path) -> np.ndarray:
|
| 32 |
+
arr = tifffile.imread(path)
|
| 33 |
+
# (C,H,W) → (H,W,C) if few channels
|
| 34 |
+
if arr.ndim == 3 and arr.shape[0] <= 4:
|
| 35 |
+
arr = np.transpose(arr, (1, 2, 0))
|
| 36 |
+
return arr
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def pad(tile: np.ndarray, th: int, tw: int) -> np.ndarray:
|
| 40 |
+
dh, dw = th - tile.shape[0], tw - tile.shape[1]
|
| 41 |
+
if dh == 0 and dw == 0:
|
| 42 |
+
return tile
|
| 43 |
+
pads = ((0, dh), (0, dw)) + ((0, 0),) * (tile.ndim - 2)
|
| 44 |
+
return np.pad(tile, pads, mode="constant", constant_values=0)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def tile_pair(img_path: Path, mask_path: Path, out_dir: Path, TILE_H: int, TILE_W: int):
|
| 48 |
+
stem = img_path.stem
|
| 49 |
+
img = read_tif(img_path)
|
| 50 |
+
mask = read_tif(mask_path).astype(np.uint16)
|
| 51 |
+
if img.shape[:2] != mask.shape[:2]:
|
| 52 |
+
raise ValueError(f"Dimension mismatch {img_path.name} vs {mask_path.name}")
|
| 53 |
+
|
| 54 |
+
H, W = img.shape[:2]
|
| 55 |
+
nrows = (H + TILE_H - 1) // TILE_H
|
| 56 |
+
ncols = (W + TILE_W - 1) // TILE_W
|
| 57 |
+
|
| 58 |
+
for r in range(nrows):
|
| 59 |
+
for c in range(ncols):
|
| 60 |
+
y0, x0 = r * TILE_H, c * TILE_W
|
| 61 |
+
y1, x1 = min(y0 + TILE_H, H), min(x0 + TILE_W, W)
|
| 62 |
+
img_tile = pad(img[y0:y1, x0:x1], TILE_H, TILE_W)
|
| 63 |
+
msk_tile = pad(mask[y0:y1, x0:x1], TILE_H, TILE_W)
|
| 64 |
+
img_name = f"{stem}_{r}_{c}.tif"
|
| 65 |
+
msk_name = f"{stem}_{r}_{c}_masks.tif"
|
| 66 |
+
|
| 67 |
+
if img_tile.dtype != np.uint8:
|
| 68 |
+
img_write = cv2.normalize(
|
| 69 |
+
img_tile, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)
|
| 70 |
+
else:
|
| 71 |
+
img_write = img_tile
|
| 72 |
+
tifffile.imwrite(out_dir / img_name, img_write)
|
| 73 |
+
tifffile.imwrite(out_dir / msk_name, msk_tile)
|
| 74 |
+
logging.info("saved %s / %s", img_name, msk_name)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def split_folder(img_dir: Path, mask_dir: Path, out_dir: Path, TILE_H: int, TILE_W: int):
|
| 78 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 79 |
+
for img_path in img_dir.glob("*.tif"):
|
| 80 |
+
if img_path.name.endswith("_masks.tif"):
|
| 81 |
+
continue
|
| 82 |
+
mask_path = mask_dir / f"{img_path.stem}_masks.tif"
|
| 83 |
+
if not mask_path.exists():
|
| 84 |
+
logging.warning("no mask found for %s, skipping", img_path.name)
|
| 85 |
+
continue
|
| 86 |
+
tile_pair(img_path, mask_path, out_dir, TILE_H, TILE_W)
|