Hussein El-Hadidy commited on
Commit ·
d28d04c
1
Parent(s): 1a5dbca
SkinBurns Segmentation
Browse files- .gitignore +4 -1
- SkinBurns_Segmentation.py +147 -43
- app.py +48 -0
.gitignore
CHANGED
|
@@ -3,4 +3,7 @@ venv/*
|
|
| 3 |
uploads/*
|
| 4 |
runs/*
|
| 5 |
screenshots/*
|
| 6 |
-
*.pyc
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
uploads/*
|
| 4 |
runs/*
|
| 5 |
screenshots/*
|
| 6 |
+
*.pyc
|
| 7 |
+
*.mp4
|
| 8 |
+
*.png
|
| 9 |
+
*.jpg
|
SkinBurns_Segmentation.py
CHANGED
|
@@ -1,68 +1,172 @@
|
|
|
|
|
|
|
|
| 1 |
import numpy as np
|
| 2 |
-
|
| 3 |
-
from
|
|
|
|
|
|
|
| 4 |
import tensorly as tl
|
| 5 |
from tensorly.decomposition import tucker
|
| 6 |
-
from sklearn.cluster import KMeans
|
| 7 |
import cv2
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
def
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
image = img_as_float(image)
|
| 14 |
-
|
| 15 |
|
| 16 |
-
#
|
|
|
|
|
|
|
| 17 |
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
|
| 18 |
image_lab[:, :, 0] = clahe.apply((image_lab[:, :, 0] * 255).astype(np.uint8)) / 255.0
|
| 19 |
|
| 20 |
-
# Gaussian
|
| 21 |
-
sigma_value = 1.5
|
| 22 |
image_lab_filtered = np.zeros_like(image_lab)
|
| 23 |
for i in range(3):
|
| 24 |
-
image_lab_filtered[:, :, i] = gaussian_filter(image_lab[:, :, i], sigma=
|
| 25 |
|
| 26 |
-
#
|
| 27 |
tensor_image = tl.tensor(image_lab_filtered)
|
|
|
|
|
|
|
| 28 |
|
| 29 |
-
#
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
#
|
| 34 |
-
|
|
|
|
|
|
|
| 35 |
|
| 36 |
-
#
|
| 37 |
-
|
|
|
|
|
|
|
| 38 |
|
| 39 |
-
#
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
|
|
|
|
|
|
| 45 |
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
-
#
|
| 57 |
segmentation_map = np.zeros((height, width, 3))
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
(burn_cluster + 2) % 3: [0, 0, 1]} # Blue for background
|
| 61 |
|
| 62 |
-
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
|
| 65 |
-
#
|
| 66 |
-
|
|
|
|
|
|
|
| 67 |
|
| 68 |
-
return
|
|
|
|
| 1 |
+
# ── Imports & Helper Functions ──
|
| 2 |
+
import os
|
| 3 |
import numpy as np
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
from skimage import io, color, img_as_float
|
| 6 |
+
from scipy.ndimage import gaussian_filter, uniform_filter, binary_closing, label
|
| 7 |
+
from sklearn.cluster import KMeans
|
| 8 |
import tensorly as tl
|
| 9 |
from tensorly.decomposition import tucker
|
|
|
|
| 10 |
import cv2
|
| 11 |
+
from skimage.color import rgb2gray
|
| 12 |
+
|
| 13 |
+
def compute_local_variance(img, window_size=9):
|
| 14 |
+
img_sq = img ** 2
|
| 15 |
+
mean = uniform_filter(img, size=window_size)
|
| 16 |
+
mean_sq = uniform_filter(img_sq, size=window_size)
|
| 17 |
+
return mean_sq - mean ** 2
|
| 18 |
|
| 19 |
+
def largest_rectangle_in_mask(mask):
|
| 20 |
+
h, w = mask.shape
|
| 21 |
+
heights = [0] * w
|
| 22 |
+
max_area = 0
|
| 23 |
+
best_coords = None # (y_start, y_end, x_start, x_end)
|
| 24 |
+
|
| 25 |
+
for y in range(h):
|
| 26 |
+
for x in range(w):
|
| 27 |
+
heights[x] = heights[x] + 1 if mask[y, x] == 1 else 0
|
| 28 |
+
|
| 29 |
+
stack = []
|
| 30 |
+
x = 0
|
| 31 |
+
while x <= w:
|
| 32 |
+
curr_h = heights[x] if x < w else 0
|
| 33 |
+
if not stack or curr_h >= heights[stack[-1]]:
|
| 34 |
+
stack.append(x)
|
| 35 |
+
x += 1
|
| 36 |
+
else:
|
| 37 |
+
top = stack.pop()
|
| 38 |
+
width_rect = x if not stack else x - stack[-1] - 1
|
| 39 |
+
area = heights[top] * width_rect
|
| 40 |
+
if area > max_area:
|
| 41 |
+
max_area = area
|
| 42 |
+
y_end = y
|
| 43 |
+
y_start = y - heights[top] + 1
|
| 44 |
+
x_end = x
|
| 45 |
+
x_start = x - width_rect
|
| 46 |
+
best_coords = (y_start, y_end, x_start, x_end)
|
| 47 |
+
return best_coords
|
| 48 |
+
# ── Main Segmentation Function ──
|
| 49 |
+
def segment_burn(patient_image_path,
|
| 50 |
+
reference_image_path,
|
| 51 |
+
threshold_texture=0.35,
|
| 52 |
+
threshold_color=10):
|
| 53 |
+
"""
|
| 54 |
+
Takes:
|
| 55 |
+
- patient_image_path: path to the RGB image
|
| 56 |
+
- reference_image_path: path to the healthy-reference RGB image
|
| 57 |
+
- threshold_texture, threshold_color: tuning parameters
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
- burn_crop_clean: the cropped burn region (as an ndarray)
|
| 61 |
+
- burn_crop_debug: original image with the crop rectangle overlaid
|
| 62 |
+
"""
|
| 63 |
+
# --- Load patient image ---
|
| 64 |
+
image = io.imread(patient_image_path)
|
| 65 |
+
if image.shape[-1] == 4:
|
| 66 |
+
image = image[..., :3]
|
| 67 |
image = img_as_float(image)
|
| 68 |
+
height, width, _ = image.shape
|
| 69 |
|
| 70 |
+
# --- Preprocessing & feature extraction ---
|
| 71 |
+
# 1. Convert to Lab + CLAHE on L
|
| 72 |
+
image_lab = color.rgb2lab(image)
|
| 73 |
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
|
| 74 |
image_lab[:, :, 0] = clahe.apply((image_lab[:, :, 0] * 255).astype(np.uint8)) / 255.0
|
| 75 |
|
| 76 |
+
# 2. Gaussian smoothing
|
|
|
|
| 77 |
image_lab_filtered = np.zeros_like(image_lab)
|
| 78 |
for i in range(3):
|
| 79 |
+
image_lab_filtered[:, :, i] = gaussian_filter(image_lab[:, :, i], sigma=1.5)
|
| 80 |
|
| 81 |
+
# 3. Tucker decomposition + reconstruction
|
| 82 |
tensor_image = tl.tensor(image_lab_filtered)
|
| 83 |
+
core, factors = tucker(tensor_image, rank=[30, 30, 3])
|
| 84 |
+
reconstructed = tl.tucker_to_tensor((core, factors))
|
| 85 |
|
| 86 |
+
# 4. Texture map (local variance)
|
| 87 |
+
gray = rgb2gray(image)
|
| 88 |
+
local_var = compute_local_variance(gray, window_size=9)
|
| 89 |
+
vmin, vmax = np.percentile(local_var, [5, 95])
|
| 90 |
+
local_var = np.clip((local_var - vmin) / (vmax - vmin), 0, 1)
|
| 91 |
|
| 92 |
+
# 5. Build feature vectors
|
| 93 |
+
lab_feat = reconstructed.reshape(-1, 3)
|
| 94 |
+
texture_feat = local_var.flatten()[:, None]
|
| 95 |
+
features = np.hstack((lab_feat, texture_feat))
|
| 96 |
|
| 97 |
+
# --- KMeans clustering & burn scoring ---
|
| 98 |
+
n_clusters = 5
|
| 99 |
+
kmeans = KMeans(n_clusters=n_clusters, n_init=10, random_state=42)
|
| 100 |
+
labels = kmeans.fit_predict(features).reshape(height, width)
|
| 101 |
|
| 102 |
+
# Score clusters for burn likelihood
|
| 103 |
+
cluster_means = [features[labels.flatten()==i].mean(axis=0) for i in range(n_clusters)]
|
| 104 |
+
burn_scores = []
|
| 105 |
+
for L, A, B, T in cluster_means:
|
| 106 |
+
sat = np.sqrt(A**2 + B**2)
|
| 107 |
+
red = A
|
| 108 |
+
dark = (1 - L) + (1 - sat)
|
| 109 |
+
burn_scores.append(0.4*red + 0.2*dark + 0.4*T)
|
| 110 |
|
| 111 |
+
burn_clusters = np.argsort(burn_scores)[-2:]
|
| 112 |
+
# skin cluster = cluster closest to healthy skin color
|
| 113 |
+
skin_cluster = np.argmin([np.linalg.norm(np.array([L,A,B]) - np.array([0.7,0,0]))
|
| 114 |
+
for L,A,B,T in cluster_means])
|
| 115 |
+
|
| 116 |
+
# Initial burn mask + morphology
|
| 117 |
+
burn_mask = np.isin(labels, burn_clusters).astype(np.uint8)
|
| 118 |
+
closed = binary_closing(burn_mask, structure=np.ones((10,10)))
|
| 119 |
+
labeled, _ = label(closed)
|
| 120 |
+
sizes = np.bincount(labeled.ravel()); sizes[0]=0
|
| 121 |
+
largest = (labeled == sizes.argmax()).astype(np.uint8)
|
| 122 |
+
|
| 123 |
+
# --- Reference-based refinement ---
|
| 124 |
+
if os.path.exists(reference_image_path):
|
| 125 |
+
ref = io.imread(reference_image_path)
|
| 126 |
+
if ref.shape[-1]==4:
|
| 127 |
+
ref = ref[..., :3]
|
| 128 |
+
ref = img_as_float(ref)
|
| 129 |
+
ref_lab = color.rgb2lab(ref)
|
| 130 |
+
ref_lab[:, :, 0] = clahe.apply((ref_lab[:, :, 0]*255).astype(np.uint8))/255.0
|
| 131 |
+
|
| 132 |
+
ref_lab_f = np.zeros_like(ref_lab)
|
| 133 |
+
for i in range(3):
|
| 134 |
+
ref_lab_f[:, :, i] = gaussian_filter(ref_lab[:, :, i], sigma=1.5)
|
| 135 |
+
|
| 136 |
+
ref_gray = rgb2gray(ref)
|
| 137 |
+
ref_var = compute_local_variance(ref_gray, window_size=9)
|
| 138 |
+
rvmin, rvmax = np.percentile(ref_var, [5,95])
|
| 139 |
+
ref_var = np.clip((ref_var - rvmin)/(rvmax - rvmin),0,1)
|
| 140 |
+
|
| 141 |
+
ref_feat = np.hstack((ref_lab_f.reshape(-1,3), ref_var.flatten()[:,None]))
|
| 142 |
+
healthy_vec = ref_feat.mean(axis=0)
|
| 143 |
|
| 144 |
+
final_burn = np.zeros_like(largest)
|
| 145 |
+
for y in range(height):
|
| 146 |
+
for x in range(width):
|
| 147 |
+
if largest[y,x]:
|
| 148 |
+
pix_vec = np.append(reconstructed[y,x,:], local_var[y,x])
|
| 149 |
+
dist = np.linalg.norm(pix_vec - healthy_vec)
|
| 150 |
+
if not (dist < threshold_color and local_var[y,x] < threshold_texture):
|
| 151 |
+
final_burn[y,x] = 1
|
| 152 |
+
else:
|
| 153 |
+
final_burn = largest
|
| 154 |
|
| 155 |
+
# (Optional) segmentation map if you need it later
|
| 156 |
segmentation_map = np.zeros((height, width, 3))
|
| 157 |
+
segmentation_map[labels==skin_cluster] = [0,1,0]
|
| 158 |
+
segmentation_map[final_burn==1] = [1,0,0]
|
|
|
|
| 159 |
|
| 160 |
+
# --- Crop the largest solid rectangle within the burn mask ---
|
| 161 |
+
coords = largest_rectangle_in_mask(final_burn)
|
| 162 |
+
if coords is None:
|
| 163 |
+
raise ValueError("No solid rectangular burn area found.")
|
| 164 |
+
y1,y2,x1,x2 = coords
|
| 165 |
+
burn_crop_clean = image[y1:y2+1, x1:x2+1]
|
| 166 |
|
| 167 |
+
# --- Build debug overlay image ---
|
| 168 |
+
debug_img = (image * 255).astype(np.uint8).copy()
|
| 169 |
+
cv2.rectangle(debug_img, (x1,y1), (x2,y2), (0,255,255), thickness=3)
|
| 170 |
+
burn_crop_debug = debug_img
|
| 171 |
|
| 172 |
+
return burn_crop_clean, burn_crop_debug
|
app.py
CHANGED
|
@@ -24,6 +24,8 @@ import cv2
|
|
| 24 |
import time
|
| 25 |
from CPR.CPRAnalyzer import CPRAnalyzer
|
| 26 |
import tempfile
|
|
|
|
|
|
|
| 27 |
|
| 28 |
|
| 29 |
|
|
@@ -145,6 +147,52 @@ async def predict_burn(file: UploadFile = File(...)):
|
|
| 145 |
return JSONResponse(content={"error": str(e)}, status_code=500)
|
| 146 |
|
| 147 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
|
| 149 |
# ✅ Optimize and transform image URL
|
| 150 |
@app.get("/cloudinary/transform")
|
|
|
|
| 24 |
import time
|
| 25 |
from CPR.CPRAnalyzer import CPRAnalyzer
|
| 26 |
import tempfile
|
| 27 |
+
import matplotlib.pyplot as plt
|
| 28 |
+
|
| 29 |
|
| 30 |
|
| 31 |
|
|
|
|
| 147 |
return JSONResponse(content={"error": str(e)}, status_code=500)
|
| 148 |
|
| 149 |
|
| 150 |
+
@app.post("/segment_burn")
|
| 151 |
+
async def segment_burn_endpoint(reference: UploadFile = File(...), patient: UploadFile = File(...)):
|
| 152 |
+
try:
|
| 153 |
+
# Save the reference image temporarily
|
| 154 |
+
reference_path = f"temp_ref_{reference.filename}"
|
| 155 |
+
reference_bytes = await reference.read()
|
| 156 |
+
with open(reference_path, "wb") as ref_file:
|
| 157 |
+
ref_file.write(reference_bytes)
|
| 158 |
+
|
| 159 |
+
# Save the patient image temporarily
|
| 160 |
+
patient_path = f"temp_patient_{patient.filename}"
|
| 161 |
+
patient_bytes = await patient.read()
|
| 162 |
+
with open(patient_path, "wb") as pat_file:
|
| 163 |
+
pat_file.write(patient_bytes)
|
| 164 |
+
|
| 165 |
+
# Call the segmentation logic
|
| 166 |
+
burn_crop_clean, burn_crop_debug = segment_burn(patient_path, reference_path)
|
| 167 |
+
|
| 168 |
+
# Save the cropped outputs
|
| 169 |
+
burn_crop_clean_path = f"temp_burn_crop_clean_{uuid.uuid4()}.png"
|
| 170 |
+
burn_crop_debug_path = f"temp_burn_crop_debug_{uuid.uuid4()}.png"
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
plt.imsave(burn_crop_clean_path, burn_crop_clean)
|
| 174 |
+
plt.imsave(burn_crop_debug_path, burn_crop_debug)
|
| 175 |
+
|
| 176 |
+
# Upload to Cloudinary
|
| 177 |
+
crop_clean_upload = cloudinary.uploader.upload(burn_crop_clean_path, public_id=f"ref_{reference.filename}")
|
| 178 |
+
crop_debug_upload = cloudinary.uploader.upload(burn_crop_debug_path, public_id=f"pat_{patient.filename}")
|
| 179 |
+
crop_clean_url = crop_clean_upload["secure_url"]
|
| 180 |
+
crop_debug_url = crop_debug_upload["secure_url"]
|
| 181 |
+
|
| 182 |
+
# Clean up temp files
|
| 183 |
+
|
| 184 |
+
os.remove(burn_crop_clean_path)
|
| 185 |
+
os.remove(burn_crop_debug_path)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
return {
|
| 189 |
+
"crop_clean_url": crop_clean_url,
|
| 190 |
+
"crop_debug_url": crop_debug_url
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
except Exception as e:
|
| 194 |
+
return JSONResponse(content={"error": str(e)}, status_code=500)
|
| 195 |
+
|
| 196 |
|
| 197 |
# ✅ Optimize and transform image URL
|
| 198 |
@app.get("/cloudinary/transform")
|