Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .DS_Store +0 -0
- .gitattributes +53 -0
- .gitignore +6 -0
- README.md +26 -0
- __pycache__/pipeline.cpython-313.pyc +0 -0
- __pycache__/pipeline.cpython-39.pyc +0 -0
- alopecia/.DS_Store +0 -0
- alopecia/__init__.py +0 -0
- alopecia/calculate_hair_count.py +317 -0
- alopecia/calculate_hair_thickness.py +174 -0
- alopecia/count_result/.DS_Store +0 -0
- alopecia/count_result/density.json +47 -0
- alopecia/count_result/hair_count.csv +0 -0
- alopecia/count_result/visualizations/vis_230219_A191_1.jpg +3 -0
- alopecia/count_result/visualizations/vis_230219_A191_1_test.jpg +3 -0
- alopecia/count_result/visualizations/vis_230219_A200_4.jpg +3 -0
- alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-3.jpg +3 -0
- alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-4.jpg +3 -0
- alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-5.jpg +3 -0
- alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-6.jpg +3 -0
- alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-7.jpg +3 -0
- alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-9.jpg +3 -0
- alopecia/thickness_result/230219_A191_1.npy +3 -0
- alopecia/thickness_result/230219_A191_1_test.npy +3 -0
- alopecia/thickness_result/230219_A191_1_test_vis.png +0 -0
- alopecia/thickness_result/230219_A191_1_vis.png +0 -0
- alopecia/thickness_result/230219_A200_4.npy +3 -0
- alopecia/thickness_result/230219_A200_4_vis.png +0 -0
- alopecia/thickness_result/IMG_02C458E4D7CE-3.npy +3 -0
- alopecia/thickness_result/IMG_02C458E4D7CE-3_vis.png +0 -0
- alopecia/thickness_result/IMG_02C458E4D7CE-4.npy +3 -0
- alopecia/thickness_result/IMG_02C458E4D7CE-4_vis.png +0 -0
- alopecia/thickness_result/IMG_02C458E4D7CE-5.npy +3 -0
- alopecia/thickness_result/IMG_02C458E4D7CE-5_vis.png +0 -0
- alopecia/thickness_result/IMG_02C458E4D7CE-6.npy +3 -0
- alopecia/thickness_result/IMG_02C458E4D7CE-6_vis.png +0 -0
- alopecia/thickness_result/IMG_02C458E4D7CE-7.npy +3 -0
- alopecia/thickness_result/IMG_02C458E4D7CE-7_vis.png +0 -0
- alopecia/thickness_result/IMG_02C458E4D7CE-9.npy +3 -0
- alopecia/thickness_result/IMG_02C458E4D7CE-9_vis.png +0 -0
- auto_run.py +26 -0
- complete_pipeline_out/binary_230219_A191_1.jpg +0 -0
- complete_pipeline_out/binary_230219_A191_1_test.jpg +0 -0
- complete_pipeline_out/binary_230219_A200_4.jpg +0 -0
- complete_pipeline_out/binary_IMG_02C458E4D7CE-3.jpeg +0 -0
- complete_pipeline_out/binary_IMG_02C458E4D7CE-4.jpeg +0 -0
- complete_pipeline_out/binary_IMG_02C458E4D7CE-5.jpeg +0 -0
- complete_pipeline_out/binary_IMG_02C458E4D7CE-6.jpeg +0 -0
- complete_pipeline_out/complete_pipeline_230219_A191_1.jpg +3 -0
- complete_pipeline_out/complete_pipeline_230219_A191_1_test.jpg +3 -0
.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
.gitattributes
CHANGED
|
@@ -33,3 +33,56 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
alopecia/count_result/visualizations/vis_230219_A191_1.jpg filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
alopecia/count_result/visualizations/vis_230219_A191_1_test.jpg filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
alopecia/count_result/visualizations/vis_230219_A200_4.jpg filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-3.jpg filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-4.jpg filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-5.jpg filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-6.jpg filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-7.jpg filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-9.jpg filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
complete_pipeline_out/complete_pipeline_230219_A191_1.jpg filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
complete_pipeline_out/complete_pipeline_230219_A191_1_test.jpg filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
complete_pipeline_out/complete_pipeline_230219_A200_4.jpg filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
complete_pipeline_out/complete_pipeline_IMG_02C458E4D7CE-3.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
complete_pipeline_out/complete_pipeline_IMG_02C458E4D7CE-4.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
complete_pipeline_out/complete_pipeline_IMG_02C458E4D7CE-5.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
complete_pipeline_out/complete_pipeline_IMG_02C458E4D7CE-6.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
complete_pipeline_out/labeled_230219_A191_1.jpg filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
complete_pipeline_out/labeled_230219_A191_1_test.jpg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
complete_pipeline_out/labeled_230219_A200_4.jpg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
complete_pipeline_out/labeled_IMG_02C458E4D7CE-3.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
complete_pipeline_out/labeled_IMG_02C458E4D7CE-4.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
complete_pipeline_out/labeled_IMG_02C458E4D7CE-5.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
complete_pipeline_out/labeled_IMG_02C458E4D7CE-6.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
complete_pipeline_out/lines_mask_IMG_02C458E4D7CE-3.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
complete_pipeline_out/lines_mask_IMG_02C458E4D7CE-5.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
complete_pipeline_out/lines_mask_IMG_02C458E4D7CE-6.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
complete_pipeline_out/merged_foreground_IMG_02C458E4D7CE-3.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
complete_pipeline_out/merged_foreground_IMG_02C458E4D7CE-6.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
datasets/data/230219_A191_1.jpg filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
datasets/data/230219_A191_1_test.jpg filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
datasets/data/IMG_02C458E4D7CE-3.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
datasets/data/IMG_02C458E4D7CE-4.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
datasets/data/IMG_02C458E4D7CE-5.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
datasets/data/IMG_02C458E4D7CE-6.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
datasets/data/IMG_02C458E4D7CE-7.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
datasets/data/IMG_02C458E4D7CE-9.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
prediction/ensemble_result/ensemble_val/IMG_02C458E4D7CE-5.jpg filter=lfs diff=lfs merge=lfs -text
|
| 73 |
+
prediction/ensemble_result/ensemble_val/IMG_02C458E4D7CE-7.jpg filter=lfs diff=lfs merge=lfs -text
|
| 74 |
+
prediction/ensemble_result/test_val/test_step7_merged_foreground_IMG_02C458E4D7CE-3.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 75 |
+
prediction/ensemble_result/test_val/test_step7_merged_foreground_IMG_02C458E4D7CE-5.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 76 |
+
prediction/ensemble_result/test_val/test_step7_merged_foreground_IMG_02C458E4D7CE-6.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 77 |
+
prediction/ensemble_result/test_val/test_step7_merged_foreground_IMG_02C458E4D7CE-7.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 78 |
+
prediction/ensemble_result/test_val/test_step7_merged_foreground_IMG_02C458E4D7CE-9.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 79 |
+
prediction/sam_result/sam_val/230219_A191_1.jpg filter=lfs diff=lfs merge=lfs -text
|
| 80 |
+
prediction/sam_result/sam_val/230219_A191_1_test.jpg filter=lfs diff=lfs merge=lfs -text
|
| 81 |
+
prediction/sam_result/sam_val/230219_A200_4.jpg filter=lfs diff=lfs merge=lfs -text
|
| 82 |
+
prediction/sam_result/sam_val/IMG_02C458E4D7CE-5.jpg filter=lfs diff=lfs merge=lfs -text
|
| 83 |
+
prediction/sam_result/sam_val/IMG_02C458E4D7CE-7.jpg filter=lfs diff=lfs merge=lfs -text
|
| 84 |
+
prediction/test_val/test_step7_merged_foreground_IMG_02C458E4D7CE-3.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 85 |
+
prediction/test_val/test_step7_merged_foreground_IMG_02C458E4D7CE-5.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 86 |
+
prediction/test_val/test_step7_merged_foreground_IMG_02C458E4D7CE-6.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 87 |
+
prediction/test_val/test_step7_merged_foreground_IMG_02C458E4D7CE-7.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 88 |
+
prediction/test_val/test_step7_merged_foreground_IMG_02C458E4D7CE-9.jpeg filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/Users/Admin/ScalpVision/sam_vit_h_4b8939.pth
|
| 2 |
+
/Users/Admin/ScalpVision/alopecia/alopecia_prediction.py
|
| 3 |
+
/Users/Admin/ScalpVision/datasets/old_data
|
| 4 |
+
DiffuseIT-M
|
| 5 |
+
etc
|
| 6 |
+
_pycache_/
|
README.md
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## ScalpPipeline (`pipeline.py`)
|
| 2 |
+
|
| 3 |
+
Refactored pipeline combining all steps into a single class.
|
| 4 |
+
|
| 5 |
+
### Usage
|
| 6 |
+
```bash
|
| 7 |
+
python pipeline.py
|
| 8 |
+
```
|
| 9 |
+
|
| 10 |
+
### Dependencies (Files)
|
| 11 |
+
The pipeline requires the following files and directories to exist:
|
| 12 |
+
|
| 13 |
+
1. **Input Images**:
|
| 14 |
+
- `datasets/data/` : Directory containing input images (`.jpg`, `.jpeg`, `.png`).
|
| 15 |
+
|
| 16 |
+
2. **Model Weights**:
|
| 17 |
+
- `segmentation/model/U2NET.pth` : Pre-trained U2NET model.
|
| 18 |
+
- `sam_vit_h_4b8939.pth` : SAM (Segment Anything Model) checkpoint (ViT-H).
|
| 19 |
+
|
| 20 |
+
### Output
|
| 21 |
+
Results are saved in:
|
| 22 |
+
- `datasets/seg_train/` (U2NET masks)
|
| 23 |
+
- `prediction/sam_result/sam_val/` (SAM masks)
|
| 24 |
+
- `prediction/ensemble_result/ensemble_val/` (Ensemble masks)
|
| 25 |
+
- `alopecia/thickness_result/` (Thickness data & visualization)
|
| 26 |
+
- `alopecia/count_result/` (Hair count CSV & visualization)
|
__pycache__/pipeline.cpython-313.pyc
ADDED
|
Binary file (49.1 kB). View file
|
|
|
__pycache__/pipeline.cpython-39.pyc
ADDED
|
Binary file (23 kB). View file
|
|
|
alopecia/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
alopecia/__init__.py
ADDED
|
File without changes
|
alopecia/calculate_hair_count.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import os
|
| 3 |
+
import argparse
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import json
|
| 8 |
+
import csv
|
| 9 |
+
|
| 10 |
+
# --- CẤU HÌNH ĐƯỜNG DẪN CỦA BẠN ---
|
| 11 |
+
DEFAULT_SEGMENT_FOLDER = "../prediction/ensemble_result/ensemble_val"
|
| 12 |
+
DEFAULT_ORIGINAL_FOLDER = "../datasets/data"
|
| 13 |
+
DEFAULT_SAVE_PATH = "alopecia/count_result"
|
| 14 |
+
|
| 15 |
+
def load_segment_mask(img_path):
|
| 16 |
+
if not os.path.exists(img_path): return None
|
| 17 |
+
img_gray = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
|
| 18 |
+
if img_gray is None: return None
|
| 19 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
|
| 20 |
+
binary_filtered = cv2.morphologyEx(img_gray, cv2.MORPH_OPEN, kernel)
|
| 21 |
+
# ensure binary (0/255)
|
| 22 |
+
_, binary_filtered = cv2.threshold(binary_filtered, 127, 255, cv2.THRESH_BINARY)
|
| 23 |
+
return binary_filtered
|
| 24 |
+
|
| 25 |
+
def run_watershed_for_sep(binary_img, original_img, sep_factor):
|
| 26 |
+
"""
|
| 27 |
+
Chạy watershed cho 1 separation_factor, trả về markers (int32 array)
|
| 28 |
+
markers: -1 = boundary, 1 = background, >=2 = object labels (giữ tương tự structure cũ)
|
| 29 |
+
"""
|
| 30 |
+
# Distance transform
|
| 31 |
+
dist_transform = cv2.distanceTransform(binary_img, cv2.DIST_L2, 5)
|
| 32 |
+
# sure foreground (seeds)
|
| 33 |
+
_, sure_fg = cv2.threshold(dist_transform, sep_factor * dist_transform.max(), 255, 0)
|
| 34 |
+
sure_fg = np.uint8(sure_fg)
|
| 35 |
+
# sure background
|
| 36 |
+
kernel = np.ones((3,3), np.uint8)
|
| 37 |
+
sure_bg = cv2.dilate(binary_img, kernel, iterations=3)
|
| 38 |
+
# unknown
|
| 39 |
+
unknown = cv2.subtract(sure_bg, sure_fg)
|
| 40 |
+
# markers
|
| 41 |
+
ret, markers = cv2.connectedComponents(sure_fg)
|
| 42 |
+
markers = markers + 1 # background = 1
|
| 43 |
+
markers[unknown == 255] = 0
|
| 44 |
+
# prepare original color
|
| 45 |
+
if len(original_img.shape) == 2:
|
| 46 |
+
original_color = cv2.cvtColor(original_img, cv2.COLOR_GRAY2BGR)
|
| 47 |
+
else:
|
| 48 |
+
original_color = original_img.copy()
|
| 49 |
+
# watershed
|
| 50 |
+
markers_w = markers.copy().astype(np.int32)
|
| 51 |
+
cv2.watershed(original_color, markers_w)
|
| 52 |
+
# After watershed: boundary pixels labeled -1; keep it
|
| 53 |
+
return markers_w
|
| 54 |
+
|
| 55 |
+
def apply_watershed_hierarchical(binary_img, original_img, min_area, min_aspect_ratio, min_length,
|
| 56 |
+
separation_factor=0.2, hierarchy_levels=3):
|
| 57 |
+
"""
|
| 58 |
+
Multi-level hierarchical watershed refinement.
|
| 59 |
+
- Chạy watershed ở nhiều mức (tăng dần separation_factor)
|
| 60 |
+
- Bắt đầu từ coarse level (nhỏ nhất), refine bằng cách thay parent bằng các children từ level sau
|
| 61 |
+
- Chỉ chấp nhận split nếu child components thỏa điều kiện hình thái để tránh over-segmentation
|
| 62 |
+
"""
|
| 63 |
+
# 0. chuẩn bị (distance transform dùng bên trong run_watershed_for_sep)
|
| 64 |
+
# tạo danh sách separation levels (từ nhẹ -> mạnh)
|
| 65 |
+
# scale factors: từ sep*0.7 -> sep*1.6 (tùy hierarchy_levels)
|
| 66 |
+
low = max(0.01, separation_factor * 0.7)
|
| 67 |
+
high = separation_factor * 1.6
|
| 68 |
+
if hierarchy_levels <= 1:
|
| 69 |
+
sep_levels = [separation_factor]
|
| 70 |
+
else:
|
| 71 |
+
sep_levels = list(np.linspace(low, high, hierarchy_levels))
|
| 72 |
+
|
| 73 |
+
# 1. chạy watershed cho mọi level
|
| 74 |
+
markers_levels = []
|
| 75 |
+
for s in sep_levels:
|
| 76 |
+
markers_levels.append(run_watershed_for_sep(binary_img, original_img, s))
|
| 77 |
+
|
| 78 |
+
# 2. Bắt đầu từ coarse (level 0), dần refine bằng level 1,2,...
|
| 79 |
+
current = markers_levels[0].copy().astype(np.int32)
|
| 80 |
+
# normalize: ensure background value 1, boundaries -1
|
| 81 |
+
# We'll create new labels map 'cur_labels' where labels >=2 are objects
|
| 82 |
+
# start label id counter
|
| 83 |
+
next_label = int(current.max()) + 1
|
| 84 |
+
|
| 85 |
+
# helper: compute morphological properties for a mask
|
| 86 |
+
def region_props_from_mask(mask_uint8):
|
| 87 |
+
# returns a dict with area, major_axis(length), minor_axis, aspect_ratio, centroid
|
| 88 |
+
cnts, _ = cv2.findContours(mask_uint8, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 89 |
+
props = []
|
| 90 |
+
for cnt in cnts:
|
| 91 |
+
area = cv2.contourArea(cnt)
|
| 92 |
+
if area <= 0:
|
| 93 |
+
continue
|
| 94 |
+
if len(cnt) >= 5:
|
| 95 |
+
try:
|
| 96 |
+
(x, y), (MA, ma), angle = cv2.fitEllipse(cnt)
|
| 97 |
+
except:
|
| 98 |
+
MA = ma = 0
|
| 99 |
+
x = y = 0
|
| 100 |
+
else:
|
| 101 |
+
# approximate with bounding rect
|
| 102 |
+
x, y, w, h = cv2.boundingRect(cnt)
|
| 103 |
+
MA = max(w,h)
|
| 104 |
+
ma = min(w,h)
|
| 105 |
+
angle = 0
|
| 106 |
+
minor = ma if ma > 0 else 1e-6
|
| 107 |
+
aspect = float(max(MA, ma)) / (minor + 1e-6)
|
| 108 |
+
props.append({
|
| 109 |
+
'area': area,
|
| 110 |
+
'major': max(MA, ma),
|
| 111 |
+
'minor': minor,
|
| 112 |
+
'aspect': aspect,
|
| 113 |
+
'centroid': (float(x), float(y)) if 'x' in locals() else (0,0),
|
| 114 |
+
'contour': cnt
|
| 115 |
+
})
|
| 116 |
+
return props
|
| 117 |
+
|
| 118 |
+
# iterate refinement levels
|
| 119 |
+
for lvl in range(1, len(markers_levels)):
|
| 120 |
+
finer = markers_levels[lvl]
|
| 121 |
+
new_current = current.copy()
|
| 122 |
+
unique_parents = np.unique(current)
|
| 123 |
+
# ignore background and boundary labels
|
| 124 |
+
for parent_label in unique_parents:
|
| 125 |
+
if parent_label <= 1: # 1 background, 0/ -1 unknown/boundary
|
| 126 |
+
continue
|
| 127 |
+
parent_mask = (current == parent_label)
|
| 128 |
+
if parent_mask.sum() == 0:
|
| 129 |
+
continue
|
| 130 |
+
# find overlapping child labels in finer segmentation (exclude background/boundary)
|
| 131 |
+
overlapped = finer[parent_mask]
|
| 132 |
+
child_labels = np.unique(overlapped[(overlapped > 1)]) # only valid object labels
|
| 133 |
+
if len(child_labels) <= 1:
|
| 134 |
+
# no split present in finer-> keep parent
|
| 135 |
+
continue
|
| 136 |
+
|
| 137 |
+
# Evaluate each child component restricted to parent mask
|
| 138 |
+
accepted_children = []
|
| 139 |
+
for cl in child_labels:
|
| 140 |
+
child_mask = np.logical_and(finer == cl, parent_mask)
|
| 141 |
+
child_mask_uint8 = (child_mask.astype(np.uint8) * 255)
|
| 142 |
+
props = region_props_from_mask(child_mask_uint8)
|
| 143 |
+
if len(props) == 0:
|
| 144 |
+
continue
|
| 145 |
+
# take the largest prop if multiple contours
|
| 146 |
+
p = max(props, key=lambda x: x['area'])
|
| 147 |
+
# morphological filters
|
| 148 |
+
if p['area'] >= min_area and p['major'] >= min_length and p['aspect'] >= min_aspect_ratio:
|
| 149 |
+
accepted_children.append((child_mask_uint8, p))
|
| 150 |
+
# Decide whether to replace parent by children
|
| 151 |
+
if len(accepted_children) >= 2:
|
| 152 |
+
# remove parent label and assign new labels for each accepted child
|
| 153 |
+
new_current[parent_mask] = 0 # clear parent region
|
| 154 |
+
for (cmask_uint8, p) in accepted_children:
|
| 155 |
+
new_label = next_label
|
| 156 |
+
next_label += 1
|
| 157 |
+
new_current[cmask_uint8 == 255] = new_label
|
| 158 |
+
# else keep parent as-is (not split)
|
| 159 |
+
# after processing all parents, set current to new_current
|
| 160 |
+
# fill any 0 inside the binary foreground (remaining unknown areas) with background=1 to avoid gaps
|
| 161 |
+
# but keep watershed boundaries (-1) from original finer? We will not carry -1 boundaries forward.
|
| 162 |
+
current = new_current
|
| 163 |
+
# optional: small postprocess: fill small holes inside current using morphological close
|
| 164 |
+
# (skip for now to keep deterministic)
|
| 165 |
+
|
| 166 |
+
# final_labels in 'current' (labels >=2 are hair candidates)
|
| 167 |
+
final_labels = current
|
| 168 |
+
|
| 169 |
+
# At the end, extract contours for each final label and apply ellipse fitting + final filters
|
| 170 |
+
valid_hairs = []
|
| 171 |
+
unique_labels = np.unique(final_labels)
|
| 172 |
+
for label in unique_labels:
|
| 173 |
+
if label <= 1: continue
|
| 174 |
+
mask = (final_labels == label).astype(np.uint8) * 255
|
| 175 |
+
cnts, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 176 |
+
for cnt in cnts:
|
| 177 |
+
area = cv2.contourArea(cnt)
|
| 178 |
+
if area < min_area:
|
| 179 |
+
continue
|
| 180 |
+
if len(cnt) < 5:
|
| 181 |
+
continue
|
| 182 |
+
try:
|
| 183 |
+
(x, y), (MA, ma), angle = cv2.fitEllipse(cnt)
|
| 184 |
+
major_axis = max(MA, ma)
|
| 185 |
+
minor_axis = min(MA, ma)
|
| 186 |
+
aspect_ratio = major_axis / (minor_axis + 1e-6)
|
| 187 |
+
if major_axis >= min_length and aspect_ratio >= min_aspect_ratio:
|
| 188 |
+
valid_hairs.append({
|
| 189 |
+
'centroid': (x, y),
|
| 190 |
+
'ellipse': ((x, y), (MA, ma), angle),
|
| 191 |
+
'length': major_axis,
|
| 192 |
+
'thickness': minor_axis,
|
| 193 |
+
'area': area,
|
| 194 |
+
'label': int(label)
|
| 195 |
+
})
|
| 196 |
+
except Exception:
|
| 197 |
+
continue
|
| 198 |
+
|
| 199 |
+
return len(valid_hairs), valid_hairs
|
| 200 |
+
|
| 201 |
+
def create_visualization(true_original, sam_background, hair_info, filename, save_dir):
|
| 202 |
+
h, w = true_original.shape[:2]
|
| 203 |
+
overlay = sam_background.copy()
|
| 204 |
+
if overlay.shape[:2] != (h, w):
|
| 205 |
+
overlay = cv2.resize(overlay, (w, h), interpolation=cv2.INTER_LINEAR)
|
| 206 |
+
for i, info in enumerate(hair_info):
|
| 207 |
+
cv2.ellipse(overlay, info['ellipse'], (0, 255, 0), 2)
|
| 208 |
+
cx, cy = map(int, info['centroid'])
|
| 209 |
+
if w > 300:
|
| 210 |
+
cv2.putText(overlay, str(i), (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)
|
| 211 |
+
border = np.zeros((h, 5, 3), dtype=np.uint8)
|
| 212 |
+
combined = np.hstack([true_original, border, overlay])
|
| 213 |
+
header_height = 50
|
| 214 |
+
header = np.zeros((header_height, combined.shape[1], 3), dtype=np.uint8)
|
| 215 |
+
info_text = f"{filename} | Count: {len(hair_info)}"
|
| 216 |
+
cv2.putText(header, info_text, (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
|
| 217 |
+
final_vis = np.vstack([header, combined])
|
| 218 |
+
cv2.imwrite(os.path.join(save_dir, f'vis_{filename}'), final_vis)
|
| 219 |
+
|
| 220 |
+
def main(args, im):
|
| 221 |
+
segment_path = os.path.join(args.img_folder, im)
|
| 222 |
+
original_path = os.path.join(args.original_folder, im)
|
| 223 |
+
sam_path = os.path.join(args.sam_folder, im)
|
| 224 |
+
if not os.path.exists(segment_path): return 0, {}
|
| 225 |
+
binary = load_segment_mask(segment_path)
|
| 226 |
+
if binary is None: return 0, {}
|
| 227 |
+
true_original = cv2.imread(original_path)
|
| 228 |
+
if true_original is None:
|
| 229 |
+
true_original = np.zeros((binary.shape[0], binary.shape[1], 3), dtype=np.uint8)
|
| 230 |
+
sam_background = cv2.imread(sam_path)
|
| 231 |
+
if sam_background is None:
|
| 232 |
+
sam_background = cv2.cvtColor(binary, cv2.COLOR_GRAY2BGR)
|
| 233 |
+
|
| 234 |
+
hair_count, hair_info = apply_watershed_hierarchical(
|
| 235 |
+
binary,
|
| 236 |
+
true_original,
|
| 237 |
+
min_area=args.min_area,
|
| 238 |
+
min_aspect_ratio=args.min_ratio,
|
| 239 |
+
min_length=args.min_length,
|
| 240 |
+
separation_factor=args.separation_factor,
|
| 241 |
+
hierarchy_levels=args.hierarchy_levels
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
density_data = {
|
| 245 |
+
'count': hair_count,
|
| 246 |
+
'avg_thickness': float(np.mean([h['thickness'] for h in hair_info]) if hair_info else 0),
|
| 247 |
+
'avg_length': float(np.mean([h['length'] for h in hair_info]) if hair_info else 0)
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
if args.draw_lines == 1:
|
| 251 |
+
vis_dir = os.path.join(args.save_path, 'visualizations')
|
| 252 |
+
os.makedirs(vis_dir, exist_ok=True)
|
| 253 |
+
create_visualization(true_original, sam_background, hair_info, im, vis_dir)
|
| 254 |
+
|
| 255 |
+
return hair_count, density_data
|
| 256 |
+
|
| 257 |
+
if __name__ == "__main__":
|
| 258 |
+
parser = argparse.ArgumentParser()
|
| 259 |
+
parser.add_argument('--img_folder', type=str, default=DEFAULT_SEGMENT_FOLDER)
|
| 260 |
+
parser.add_argument('--original_folder', type=str, default=DEFAULT_ORIGINAL_FOLDER)
|
| 261 |
+
parser.add_argument('--sam_folder', type=str, default=DEFAULT_SEGMENT_FOLDER,
|
| 262 |
+
help="Thư mục chứa ảnh SAM segment (Visual bên phải).")
|
| 263 |
+
parser.add_argument('--save_path', type=str, default=DEFAULT_SAVE_PATH)
|
| 264 |
+
parser.add_argument('--label_csv', type=str, default="alopecia.csv")
|
| 265 |
+
parser.add_argument('--min_area', type=int, default=1500, help="Diện tích tối thiểu (pixels).")
|
| 266 |
+
parser.add_argument('--min_length', type=int, default=20, help="Chiều dài tối thiểu (pixels).")
|
| 267 |
+
parser.add_argument('--min_ratio', type=float, default=1.0, help="Tỷ lệ Dài/Rộng tối thiểu.")
|
| 268 |
+
parser.add_argument('--separation_factor', type=float, default=0.3,
|
| 269 |
+
help="Độ mạnh khi tách tóc dính (0.1-0.9). Cao = Tách nhiều.")
|
| 270 |
+
parser.add_argument('--hierarchy_levels', type=int, default=2,
|
| 271 |
+
help="Số mức hierarchical (multi-level) để refine segmentation. 1 = no hierarchy.")
|
| 272 |
+
parser.add_argument('--start', type=int, default=0)
|
| 273 |
+
parser.add_argument('--end', type=int, default=20000)
|
| 274 |
+
parser.add_argument('--draw_lines', type=int, default=1)
|
| 275 |
+
args = parser.parse_args()
|
| 276 |
+
|
| 277 |
+
print(f"🚀 Bắt đầu đếm tóc (Hierarchical Watershed).")
|
| 278 |
+
print(f" separation_factor: {args.separation_factor}, hierarchy_levels: {args.hierarchy_levels}")
|
| 279 |
+
os.makedirs(args.save_path, exist_ok=True)
|
| 280 |
+
|
| 281 |
+
img_names = []
|
| 282 |
+
if os.path.exists(args.label_csv):
|
| 283 |
+
try:
|
| 284 |
+
df = pd.read_csv(args.label_csv)
|
| 285 |
+
img_names = df["img_name"].values[args.start:args.end]
|
| 286 |
+
except:
|
| 287 |
+
pass
|
| 288 |
+
if len(img_names) == 0:
|
| 289 |
+
import glob
|
| 290 |
+
for ext in ['*.jpg', '*.png', '*.jpeg']:
|
| 291 |
+
full_paths = glob.glob(os.path.join(args.img_folder, ext))
|
| 292 |
+
img_names.extend([os.path.basename(p) for p in full_paths])
|
| 293 |
+
if len(img_names) == 0:
|
| 294 |
+
img_names = ['230219_A191_1.jpg']
|
| 295 |
+
|
| 296 |
+
print(f"🔢 Số lượng ảnh cần xử lý: {len(img_names)}")
|
| 297 |
+
results = {}
|
| 298 |
+
density_results = {}
|
| 299 |
+
|
| 300 |
+
for im in tqdm(img_names, desc="Processing"):
|
| 301 |
+
count, density = main(args, im)
|
| 302 |
+
if count > 0 or density:
|
| 303 |
+
results[im] = count
|
| 304 |
+
density_results[im] = density
|
| 305 |
+
|
| 306 |
+
csv_path = os.path.join(args.save_path, 'hair_count.csv')
|
| 307 |
+
with open(csv_path, 'w', newline='') as f:
|
| 308 |
+
w = csv.writer(f)
|
| 309 |
+
w.writerow(['image_name', 'hair_count'])
|
| 310 |
+
for k, v in results.items():
|
| 311 |
+
w.writerow([k, v])
|
| 312 |
+
|
| 313 |
+
json_path = os.path.join(args.save_path, 'density.json')
|
| 314 |
+
with open(json_path, 'w') as f:
|
| 315 |
+
json.dump(density_results, f, indent=2)
|
| 316 |
+
|
| 317 |
+
print(f"✅ Hoàn thành! Visualizations tại: {os.path.join(args.save_path, 'visualizations')}")
|
alopecia/calculate_hair_thickness.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import glob
|
| 4 |
+
import argparse
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
import os
|
| 7 |
+
import warnings
|
| 8 |
+
warnings.filterwarnings('ignore')
|
| 9 |
+
|
| 10 |
+
def nms(boxes, thresh):
|
| 11 |
+
if len(boxes) == 0:
|
| 12 |
+
return []
|
| 13 |
+
pick = []
|
| 14 |
+
x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
|
| 15 |
+
area = (x2 - x1 + 1) * (y2 - y1 + 1)
|
| 16 |
+
idxs = np.argsort(y2)
|
| 17 |
+
while len(idxs) > 0:
|
| 18 |
+
last = len(idxs) - 1
|
| 19 |
+
i = idxs[last]
|
| 20 |
+
pick.append(i)
|
| 21 |
+
xx1 = np.maximum(x1[i], x1[idxs[:last]])
|
| 22 |
+
yy1 = np.maximum(y1[i], y1[idxs[:last]])
|
| 23 |
+
xx2 = np.minimum(x2[i], x2[idxs[:last]])
|
| 24 |
+
yy2 = np.minimum(y2[i], y2[idxs[:last]])
|
| 25 |
+
w = np.maximum(0, xx2 - xx1 + 1)
|
| 26 |
+
h = np.maximum(0, yy2 - yy1 + 1)
|
| 27 |
+
overlap = (w * h) / area[idxs[:last]]
|
| 28 |
+
idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > thresh)[0])))
|
| 29 |
+
return boxes[pick]
|
| 30 |
+
|
| 31 |
+
def find_pts_on_line(og, slope, d):
|
| 32 |
+
cx, cy = og
|
| 33 |
+
x1 = cx - d / ((1 + slope ** 2) ** 0.5)
|
| 34 |
+
y1 = cy - slope * cx + x1 * slope
|
| 35 |
+
if np.isnan(x1) or np.isnan(y1):
|
| 36 |
+
x1 = y1 = -1
|
| 37 |
+
return x1, y1
|
| 38 |
+
|
| 39 |
+
def find_intersection_points2(center, slope, img, threshold):
|
| 40 |
+
p2 = p1 = (-1, -1)
|
| 41 |
+
w, h = img.shape
|
| 42 |
+
step, searching_len = 100, 50
|
| 43 |
+
for d in range(1, step * searching_len):
|
| 44 |
+
px, py = find_pts_on_line(center, slope, d / step)
|
| 45 |
+
if (0 < int(px) < h) and (0 < int(py) < w) and img[int(py)][int(px)] > threshold:
|
| 46 |
+
p1 = (px, py)
|
| 47 |
+
else:
|
| 48 |
+
break
|
| 49 |
+
for d in range(1, step * searching_len):
|
| 50 |
+
px, py = find_pts_on_line(center, slope, -d / step)
|
| 51 |
+
if (0 < int(px) < h) and (0 < int(py) < w) and img[int(py)][int(px)] > threshold:
|
| 52 |
+
p2 = (px, py)
|
| 53 |
+
else:
|
| 54 |
+
break
|
| 55 |
+
dst = 0 if p1 == (-1, -1) or p2 == (-1, -1) else np.linalg.norm(np.asarray(p1) - np.asarray(p2))
|
| 56 |
+
return [p1, p2], dst
|
| 57 |
+
|
| 58 |
+
def get_direction2(bbox_pixels):
|
| 59 |
+
nonzero_indices = np.column_stack(np.nonzero(bbox_pixels))
|
| 60 |
+
nonzero_indices = np.float32(nonzero_indices)
|
| 61 |
+
if len(nonzero_indices) >= 2:
|
| 62 |
+
mean, eigenvectors = cv2.PCACompute(nonzero_indices, mean=None)
|
| 63 |
+
cntr = ((mean[0, 1]), (mean[0, 0]))
|
| 64 |
+
return eigenvectors[0], cntr
|
| 65 |
+
else:
|
| 66 |
+
return (0,0), (0,0)
|
| 67 |
+
|
| 68 |
+
def main(img_path, save_path, save_img=True):
|
| 69 |
+
img = cv2.imread(img_path)
|
| 70 |
+
imgray = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
|
| 71 |
+
img_name = os.path.splitext(os.path.basename(img_path))[0]
|
| 72 |
+
|
| 73 |
+
if np.all(imgray == 255) or np.all(imgray == 0):
|
| 74 |
+
np.save(os.path.join(save_path, img_name), np.array([]))
|
| 75 |
+
return
|
| 76 |
+
|
| 77 |
+
ret, binary_map = cv2.threshold(imgray, 127, 255, 0)
|
| 78 |
+
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(binary_map, None, None, None, 8, cv2.CV_32S)
|
| 79 |
+
areas = stats[1:, cv2.CC_STAT_AREA]
|
| 80 |
+
result = np.zeros((labels.shape), np.uint8)
|
| 81 |
+
for i in range(nlabels - 1):
|
| 82 |
+
if areas[i] >= 250:
|
| 83 |
+
result[labels == i + 1] = 255
|
| 84 |
+
re_copy = result.copy()
|
| 85 |
+
|
| 86 |
+
# skeletonization
|
| 87 |
+
skel = np.zeros(result.shape, np.uint8)
|
| 88 |
+
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
|
| 89 |
+
while True:
|
| 90 |
+
open = cv2.morphologyEx(result, cv2.MORPH_OPEN, element)
|
| 91 |
+
temp = cv2.subtract(result, open)
|
| 92 |
+
eroded = cv2.erode(result, element)
|
| 93 |
+
skel = cv2.bitwise_or(skel, temp)
|
| 94 |
+
result = eroded.copy()
|
| 95 |
+
if cv2.countNonZero(result) == 0:
|
| 96 |
+
break
|
| 97 |
+
|
| 98 |
+
# remove small skeleton noise
|
| 99 |
+
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(skel, None, None, None, 8, cv2.CV_32S)
|
| 100 |
+
areas = stats[1:, cv2.CC_STAT_AREA]
|
| 101 |
+
skel = np.zeros((labels.shape), np.uint8)
|
| 102 |
+
for i in range(nlabels - 1):
|
| 103 |
+
if areas[i] >= 5:
|
| 104 |
+
skel[labels == i + 1] = 255
|
| 105 |
+
|
| 106 |
+
filtered_image = cv2.cvtColor(re_copy, cv2.COLOR_GRAY2BGR)
|
| 107 |
+
|
| 108 |
+
# visualize skeleton
|
| 109 |
+
filtered_image[skel == 255] = [0, 255, 0]
|
| 110 |
+
|
| 111 |
+
# bounding box extraction
|
| 112 |
+
white_pixels = np.where(skel == 255)
|
| 113 |
+
x_coords, y_coords = white_pixels[1], white_pixels[0]
|
| 114 |
+
filter_size = (20, 20)
|
| 115 |
+
x1, y1 = x_coords - filter_size[0]//2, y_coords - filter_size[1]//2
|
| 116 |
+
x2, y2 = x_coords + filter_size[0]//2, y_coords + filter_size[1]//2
|
| 117 |
+
white_regions = np.column_stack((x1, y1, x2, y2))
|
| 118 |
+
white_regions = nms(white_regions, thresh=0.1)
|
| 119 |
+
|
| 120 |
+
directions, center_points, thicknesses = [], [], []
|
| 121 |
+
|
| 122 |
+
for coor in white_regions:
|
| 123 |
+
x1, y1, x2, y2 = coor
|
| 124 |
+
bbox_pixels = skel[y1:y2, x1:x2]
|
| 125 |
+
direction, mean = get_direction2(bbox_pixels)
|
| 126 |
+
directions.append(direction)
|
| 127 |
+
center_points.append((mean[0] + x1, mean[1] + y1))
|
| 128 |
+
|
| 129 |
+
perpendicular_slope = []
|
| 130 |
+
for direction in directions:
|
| 131 |
+
if direction[1] != 0:
|
| 132 |
+
perpendicular_slope.append(-1 / (direction[0] / direction[1]))
|
| 133 |
+
else:
|
| 134 |
+
perpendicular_slope.append(0)
|
| 135 |
+
|
| 136 |
+
for center_point, perp_slope in zip(center_points, perpendicular_slope):
|
| 137 |
+
intersection, dst = find_intersection_points2(center_point, perp_slope, re_copy, 200)
|
| 138 |
+
if dst != 0:
|
| 139 |
+
thicknesses.append(dst)
|
| 140 |
+
if intersection[0] != (-1, -1) and intersection[1] != (-1, -1):
|
| 141 |
+
cv2.line(filtered_image,
|
| 142 |
+
(int(intersection[0][0]), int(intersection[0][1])),
|
| 143 |
+
(int(intersection[1][0]), int(intersection[1][1])),
|
| 144 |
+
(0, 255, 255), 1)
|
| 145 |
+
for pt in intersection:
|
| 146 |
+
cv2.circle(filtered_image, (int(pt[0]), int(pt[1])), 3, (0, 0, 255), -1)
|
| 147 |
+
|
| 148 |
+
# display text
|
| 149 |
+
if len(thicknesses) > 0:
|
| 150 |
+
avg_thickness = np.mean(thicknesses)
|
| 151 |
+
cv2.putText(filtered_image, f"Avg thickness: {avg_thickness:.2f}px",
|
| 152 |
+
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), 2)
|
| 153 |
+
|
| 154 |
+
if save_img:
|
| 155 |
+
#save_img_path = os.path.join(save_path, f"{img_name}_vis.jpg")
|
| 156 |
+
save_img_path = os.path.join(save_path, f"{img_name}_vis.png")
|
| 157 |
+
|
| 158 |
+
cv2.imwrite(save_img_path, filtered_image)
|
| 159 |
+
|
| 160 |
+
np.save(os.path.join(save_path, img_name), np.sort(thicknesses))
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
if __name__ == '__main__':
|
| 164 |
+
parser = argparse.ArgumentParser()
|
| 165 |
+
parser.add_argument('--img_folder', type=str, default='prediction/ensemble_result/ensemble_val')
|
| 166 |
+
#parser.add_argument('--img_folder', type=str, default='datasets/seg_train/')
|
| 167 |
+
parser.add_argument('--save_path', type=str, default='alopecia/thickness_result')
|
| 168 |
+
args = parser.parse_args()
|
| 169 |
+
|
| 170 |
+
os.makedirs(args.save_path, exist_ok=True)
|
| 171 |
+
for im in tqdm(sorted(glob.glob(os.path.join(args.img_folder, '*.jpg')))):
|
| 172 |
+
#for im in tqdm(sorted(glob.glob(os.path.join(args.img_folder, '*.png')))):
|
| 173 |
+
|
| 174 |
+
main(im, args.save_path, save_img=True)
|
alopecia/count_result/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
alopecia/count_result/density.json
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"test_step7_merged_foreground_230219_A200_4.jpg": {
|
| 3 |
+
"count": 8,
|
| 4 |
+
"avg_thickness": 46.929224491119385,
|
| 5 |
+
"avg_length": 450.0047779083252
|
| 6 |
+
},
|
| 7 |
+
"test_step7_merged_foreground_230219_A191_1_test.jpg": {
|
| 8 |
+
"count": 10,
|
| 9 |
+
"avg_thickness": 54.611060333251956,
|
| 10 |
+
"avg_length": 241.15121002197264
|
| 11 |
+
},
|
| 12 |
+
"test_step7_merged_foreground_230219_A191_1.jpg": {
|
| 13 |
+
"count": 10,
|
| 14 |
+
"avg_thickness": 54.45850448608398,
|
| 15 |
+
"avg_length": 238.0105888366699
|
| 16 |
+
},
|
| 17 |
+
"test_step7_merged_foreground_IMG_02C458E4D7CE-6.jpeg": {
|
| 18 |
+
"count": 14,
|
| 19 |
+
"avg_thickness": 51.27924510410854,
|
| 20 |
+
"avg_length": 175.85291181291853
|
| 21 |
+
},
|
| 22 |
+
"test_step7_merged_foreground_IMG_02C458E4D7CE-7.jpeg": {
|
| 23 |
+
"count": 8,
|
| 24 |
+
"avg_thickness": 54.27548885345459,
|
| 25 |
+
"avg_length": 192.99601650238037
|
| 26 |
+
},
|
| 27 |
+
"test_step7_merged_foreground_IMG_02C458E4D7CE-3.jpeg": {
|
| 28 |
+
"count": 8,
|
| 29 |
+
"avg_thickness": 61.52065181732178,
|
| 30 |
+
"avg_length": 262.84897232055664
|
| 31 |
+
},
|
| 32 |
+
"test_step7_merged_foreground_IMG_02C458E4D7CE-4.jpeg": {
|
| 33 |
+
"count": 10,
|
| 34 |
+
"avg_thickness": 56.64297752380371,
|
| 35 |
+
"avg_length": 114.51317901611328
|
| 36 |
+
},
|
| 37 |
+
"test_step7_merged_foreground_IMG_02C458E4D7CE-9.jpeg": {
|
| 38 |
+
"count": 11,
|
| 39 |
+
"avg_thickness": 57.8118123141202,
|
| 40 |
+
"avg_length": 165.69457591663706
|
| 41 |
+
},
|
| 42 |
+
"test_step7_merged_foreground_IMG_02C458E4D7CE-5.jpeg": {
|
| 43 |
+
"count": 11,
|
| 44 |
+
"avg_thickness": 47.383614626797765,
|
| 45 |
+
"avg_length": 199.2529823996804
|
| 46 |
+
}
|
| 47 |
+
}
|
alopecia/count_result/hair_count.csv
ADDED
|
File without changes
|
alopecia/count_result/visualizations/vis_230219_A191_1.jpg
ADDED
|
Git LFS Details
|
alopecia/count_result/visualizations/vis_230219_A191_1_test.jpg
ADDED
|
Git LFS Details
|
alopecia/count_result/visualizations/vis_230219_A200_4.jpg
ADDED
|
Git LFS Details
|
alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-3.jpg
ADDED
|
Git LFS Details
|
alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-4.jpg
ADDED
|
Git LFS Details
|
alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-5.jpg
ADDED
|
Git LFS Details
|
alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-6.jpg
ADDED
|
Git LFS Details
|
alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-7.jpg
ADDED
|
Git LFS Details
|
alopecia/count_result/visualizations/vis_IMG_02C458E4D7CE-9.jpg
ADDED
|
Git LFS Details
|
alopecia/thickness_result/230219_A191_1.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e6f01b1d91d76255879fd6e524486b74cef38076ccf9996864338fec28e6f6ec
|
| 3 |
+
size 1488
|
alopecia/thickness_result/230219_A191_1_test.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:607cc28b956e255a4e54b6685e8124f85a76eaa14f2c19edb2d449f4bc9f0b5b
|
| 3 |
+
size 1512
|
alopecia/thickness_result/230219_A191_1_test_vis.png
ADDED
|
alopecia/thickness_result/230219_A191_1_vis.png
ADDED
|
alopecia/thickness_result/230219_A200_4.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2351d330483fec1d79006cb84670aa1ebe97f707e7a8b2b9034eccc0b951b256
|
| 3 |
+
size 1744
|
alopecia/thickness_result/230219_A200_4_vis.png
ADDED
|
alopecia/thickness_result/IMG_02C458E4D7CE-3.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:199859cfc87e9aa66a5285c5adcd910ec64be9382b24ee942fef10461417d816
|
| 3 |
+
size 2368
|
alopecia/thickness_result/IMG_02C458E4D7CE-3_vis.png
ADDED
|
alopecia/thickness_result/IMG_02C458E4D7CE-4.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f88234e63aac39ba4c7eb6f1c63ad10608af32531d34b3e5fdac8a0af7952d5f
|
| 3 |
+
size 1696
|
alopecia/thickness_result/IMG_02C458E4D7CE-4_vis.png
ADDED
|
alopecia/thickness_result/IMG_02C458E4D7CE-5.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:56bd5bdebca503087087898b2bc2143f935c90ced8bd95b2c137251eb7b00a87
|
| 3 |
+
size 2600
|
alopecia/thickness_result/IMG_02C458E4D7CE-5_vis.png
ADDED
|
alopecia/thickness_result/IMG_02C458E4D7CE-6.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6380a99045f6ff977c1fa03b5364ae84d66e1429b510353272813161573397ea
|
| 3 |
+
size 2608
|
alopecia/thickness_result/IMG_02C458E4D7CE-6_vis.png
ADDED
|
alopecia/thickness_result/IMG_02C458E4D7CE-7.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4900a99a4b11a1041c48c204e2e9eb4aaa0bdb7bd2f14be84b8f6373003473b5
|
| 3 |
+
size 3360
|
alopecia/thickness_result/IMG_02C458E4D7CE-7_vis.png
ADDED
|
alopecia/thickness_result/IMG_02C458E4D7CE-9.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c1250d1867a193a28c301bba6f74cdd97bf549ebeb438bdc0e75ac560a2ac1a0
|
| 3 |
+
size 1952
|
alopecia/thickness_result/IMG_02C458E4D7CE-9_vis.png
ADDED
|
auto_run.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import subprocess
|
| 2 |
+
|
| 3 |
+
def run_script(script_path):
|
| 4 |
+
print(f"\n🔹 Đang chạy: {script_path}")
|
| 5 |
+
result = subprocess.run(["python", script_path], capture_output=True, text=True)
|
| 6 |
+
|
| 7 |
+
if result.returncode == 0:
|
| 8 |
+
print(f"✅ Hoàn thành: {script_path}\n")
|
| 9 |
+
else:
|
| 10 |
+
print(f"❌ Lỗi khi chạy {script_path}:\n{result.stderr}\n")
|
| 11 |
+
|
| 12 |
+
if __name__ == "__main__":
|
| 13 |
+
# Danh sách file cần chạy tuần tự
|
| 14 |
+
scripts = [
|
| 15 |
+
"segmentation/u2net_test.py",
|
| 16 |
+
"segmentation/sam_guide.py",
|
| 17 |
+
"segmentation/sam_predict.py",
|
| 18 |
+
"segmentation/make_final_mask.py",
|
| 19 |
+
"alopecia/calculate_hair_thickness.py",
|
| 20 |
+
"alopecia/calculate_hair_count.py"
|
| 21 |
+
]
|
| 22 |
+
|
| 23 |
+
for script in scripts:
|
| 24 |
+
run_script(script)
|
| 25 |
+
|
| 26 |
+
print("🎉 Hoàn tất toàn bộ quy trình!")
|
complete_pipeline_out/binary_230219_A191_1.jpg
ADDED
|
complete_pipeline_out/binary_230219_A191_1_test.jpg
ADDED
|
complete_pipeline_out/binary_230219_A200_4.jpg
ADDED
|
complete_pipeline_out/binary_IMG_02C458E4D7CE-3.jpeg
ADDED
|
complete_pipeline_out/binary_IMG_02C458E4D7CE-4.jpeg
ADDED
|
complete_pipeline_out/binary_IMG_02C458E4D7CE-5.jpeg
ADDED
|
complete_pipeline_out/binary_IMG_02C458E4D7CE-6.jpeg
ADDED
|
complete_pipeline_out/complete_pipeline_230219_A191_1.jpg
ADDED
|
Git LFS Details
|
complete_pipeline_out/complete_pipeline_230219_A191_1_test.jpg
ADDED
|
Git LFS Details
|