Hussein El-Hadidy commited on
Commit ·
8ed1da1
1
Parent(s): d460d97
Updated Burns
Browse files- SkinBurns_Classification.py +146 -59
- SkinBurns_Preprocessing.py +0 -0
- SkinBurns_Segmentation.py +45 -54
- app.py +2 -2
SkinBurns_Classification.py
CHANGED
|
@@ -1,65 +1,152 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import cv2
|
| 3 |
-
import numpy as
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
import
|
| 9 |
-
|
| 10 |
-
from sklearn.
|
| 11 |
-
import
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
return
|
| 51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
return None
|
| 60 |
-
color_hist = extract_color_histogram(image)
|
| 61 |
-
lbp_features = extract_lbp_features(image)
|
| 62 |
-
glcm_features = extract_glcm_features(image)
|
| 63 |
-
invariant_moments = extract_invariant_moments(image)
|
| 64 |
-
return np.hstack([color_hist, lbp_features, glcm_features, invariant_moments])
|
| 65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os as _os
|
| 2 |
+
import cv2 as _cv2
|
| 3 |
+
import numpy as _np
|
| 4 |
+
import glob as _glob
|
| 5 |
+
import joblib as _jb
|
| 6 |
+
import matplotlib.pyplot as _plt
|
| 7 |
+
|
| 8 |
+
from skimage.feature import local_binary_pattern as _lbp, graycomatrix as _gcm, graycoprops as _gcp
|
| 9 |
+
from sklearn.model_selection import train_test_split as _tts
|
| 10 |
+
from sklearn.metrics import classification_report as _cr, accuracy_score as _acc
|
| 11 |
+
from sklearn.svm import SVC as _S
|
| 12 |
+
from sklearn.ensemble import RandomForestClassifier as _RF
|
| 13 |
+
from sklearn.linear_model import LogisticRegression as _LR
|
| 14 |
+
|
| 15 |
+
# distances and angles for GLCM
|
| 16 |
+
def generateAngles():
|
| 17 |
+
angs = [_np.pi * i / 5 for i in range(5)] # five angles at 0, π/5, 2π/5, 3π/5, 4π/5
|
| 18 |
+
s = "1.2,2.2,3.1" # string of distances
|
| 19 |
+
dists = [float(x) for x in s.split(",")] # parse distances to floats
|
| 20 |
+
return angs, dists
|
| 21 |
+
|
| 22 |
+
(_bestangles, _bestglcmdist) = generateAngles()
|
| 23 |
+
|
| 24 |
+
# best parameters for LBP
|
| 25 |
+
_bestpoints = 9
|
| 26 |
+
_bestradius = 1
|
| 27 |
+
|
| 28 |
+
# SVM hyperparameters
|
| 29 |
+
def generateSVM_Args():
|
| 30 |
+
kern = 'linear'
|
| 31 |
+
Cval = 2.1
|
| 32 |
+
gamma = 'scale'
|
| 33 |
+
rs = 42
|
| 34 |
+
return {'kernel': kern, 'C': Cval, 'gamma': gamma, 'random_state': rs}
|
| 35 |
+
|
| 36 |
+
# Random Forest hyperparameters
|
| 37 |
+
def generateRF_Args():
|
| 38 |
+
ne = 110
|
| 39 |
+
rs = 40
|
| 40 |
+
nj = -1
|
| 41 |
+
return {'n_estimators': ne, 'random_state': rs, 'n_jobs': nj}
|
| 42 |
+
|
| 43 |
+
# Logistic Regression hyperparameters
|
| 44 |
+
def generateLR_Args():
|
| 45 |
+
mc = 'multinomial'
|
| 46 |
+
solv = 'lbfgs'
|
| 47 |
+
mi = 900
|
| 48 |
+
rs = 37
|
| 49 |
+
return {'multi_class': mc, 'solver': solv, 'max_iter': mi, 'random_state': rs}
|
| 50 |
+
|
| 51 |
+
# Color feature extraction: compute 3D histogram component A
|
| 52 |
+
def fun_colorA(img):
|
| 53 |
+
return _cv2.calcHist([img], [0,1,2], None, (5,5,5), [0,256,0,256,0,256])
|
| 54 |
+
|
| 55 |
+
# Color feature extraction: normalize and flatten histogram
|
| 56 |
+
def fun_colorB(hist_raw):
|
| 57 |
+
_cv2.normalize(hist_raw, hist_raw)
|
| 58 |
+
return hist_raw.flatten()
|
| 59 |
+
|
| 60 |
+
# Full color histogram feature
|
| 61 |
+
def fun_color(img):
|
| 62 |
+
return fun_colorB(fun_colorA(img))
|
| 63 |
+
|
| 64 |
+
# Convert image to grayscale for LBP
|
| 65 |
+
def lbpGray(img):
|
| 66 |
+
return _cv2.cvtColor(img, _cv2.COLOR_BGR2GRAY)
|
| 67 |
+
|
| 68 |
+
# Compute LBP map with specified points and radius
|
| 69 |
+
def lbpMap(gray_img):
|
| 70 |
+
return _lbp(gray_img, _bestpoints, _bestradius, method='uniform')
|
| 71 |
|
| 72 |
+
# Compute normalized histogram of LBP map
|
| 73 |
+
def lbpHist(lbp_map):
|
| 74 |
+
bins = _np.arange(0, _bestpoints + 3) # bin edges from 0 to points+2
|
| 75 |
+
h, _ = _np.histogram(lbp_map.ravel(), bins=bins, range=(0, _bestpoints + 2))
|
| 76 |
+
h = h.astype('float')
|
| 77 |
+
return h / (h.sum() + 1e-6) # avoid division by zero
|
| 78 |
|
| 79 |
+
# Full LBP feature pipeline: grayscale → map → histogram
|
| 80 |
+
def lbpFeature(img):
|
| 81 |
+
g = lbpGray(img)
|
| 82 |
+
lm = lbpMap(g)
|
| 83 |
+
return lbpHist(lm)
|
| 84 |
|
| 85 |
+
# Convert image to grayscale for GLCM
|
| 86 |
+
def glcmGray(img):
|
| 87 |
+
return _cv2.cvtColor(img, _cv2.COLOR_BGR2GRAY)
|
| 88 |
|
| 89 |
+
# Compute GLCM matrix given distances and angles
|
| 90 |
+
def glcmMatrix(gray_img):
|
| 91 |
+
return _gcm(gray_img, distances=_bestglcmdist, angles=_bestangles, symmetric=True, normed=True)
|
| 92 |
+
|
| 93 |
+
# Extract a single GLCM property vector
|
| 94 |
+
def glcmProperties(glcm_mat, prop_name):
|
| 95 |
+
return _gcp(glcm_mat, prop_name).flatten()
|
| 96 |
+
|
| 97 |
+
# Full GLCM feature pipeline: grayscale → GLCM matrix → properties
|
| 98 |
+
def glcmFeature(img):
|
| 99 |
+
gray = glcmGray(img)
|
| 100 |
+
glcm = glcmMatrix(gray)
|
| 101 |
+
features = []
|
| 102 |
+
properties = ["contrast", "dissimilarity", "homogeneity", "energy"]
|
| 103 |
+
for prop in properties:
|
| 104 |
+
features.extend(glcmProperties(glcm, prop)) # append each property’s flattened values
|
| 105 |
+
return _np.array(features)
|
| 106 |
+
|
| 107 |
+
# Convert image to grayscale for Hu moments
|
| 108 |
+
def huGray(img):
|
| 109 |
+
return _cv2.cvtColor(img, _cv2.COLOR_BGR2GRAY)
|
| 110 |
+
|
| 111 |
+
# Compute Hu Moments from grayscale image
|
| 112 |
+
def huMoments(img):
|
| 113 |
+
g = huGray(img)
|
| 114 |
+
m = _cv2.moments(g)
|
| 115 |
+
return _cv2.HuMoments(m).flatten()
|
| 116 |
+
|
| 117 |
+
# Combine all feature types for a given image path
|
| 118 |
+
def FullFeautures(pathF):
|
| 119 |
+
img = _cv2.imread(pathF)
|
| 120 |
+
if img is None:
|
| 121 |
+
print(f"[Warning] cannot read {pathF}")
|
| 122 |
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 123 |
|
| 124 |
+
# compute each feature block
|
| 125 |
+
ch = fun_color(img) # color histogram length = 125
|
| 126 |
+
lbph = lbpFeature(img) # LBP histogram length = 11
|
| 127 |
+
glc = glcmFeature(img) # GLCM feature length = 60
|
| 128 |
+
hu = huMoments(img) # Hu moments length = 7
|
| 129 |
+
|
| 130 |
+
fv = []
|
| 131 |
+
|
| 132 |
+
# split color histogram into two halves and extend
|
| 133 |
+
_cl = len(ch)
|
| 134 |
+
fv.extend(ch[:_cl//2])
|
| 135 |
+
fv.extend(ch[_cl//2:])
|
| 136 |
+
|
| 137 |
+
# split LBP histogram into two halves and extend
|
| 138 |
+
_ll = len(lbph)
|
| 139 |
+
fv.extend(lbph[:_ll//2])
|
| 140 |
+
fv.extend(lbph[_ll//2:])
|
| 141 |
+
|
| 142 |
+
# split GLCM features into two halves and extend
|
| 143 |
+
_gl = len(glc)
|
| 144 |
+
fv.extend(glc[:_gl//2])
|
| 145 |
+
fv.extend(glc[_gl//2:])
|
| 146 |
+
|
| 147 |
+
# split Hu moments into two halves and extend
|
| 148 |
+
_hl = len(hu)
|
| 149 |
+
fv.extend(hu[:_hl//2])
|
| 150 |
+
fv.extend(hu[_hl//2:])
|
| 151 |
+
|
| 152 |
+
return _np.array(fv)
|
SkinBurns_Preprocessing.py
DELETED
|
File without changes
|
SkinBurns_Segmentation.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
# ── Imports & Helper Functions ──
|
| 2 |
import os
|
| 3 |
import numpy as np
|
| 4 |
import matplotlib.pyplot as plt
|
|
@@ -10,96 +9,88 @@ from tensorly.decomposition import tucker
|
|
| 10 |
import cv2
|
| 11 |
from skimage.color import rgb2gray
|
| 12 |
|
| 13 |
-
def
|
| 14 |
img_sq = img ** 2
|
| 15 |
mean = uniform_filter(img, size=window_size)
|
| 16 |
mean_sq = uniform_filter(img_sq, size=window_size)
|
| 17 |
return mean_sq - mean ** 2
|
| 18 |
|
| 19 |
-
def
|
| 20 |
h, w = mask.shape
|
| 21 |
heights = [0] * w
|
| 22 |
-
|
| 23 |
-
|
| 24 |
|
| 25 |
-
for
|
| 26 |
-
|
| 27 |
-
heights[
|
|
|
|
|
|
|
| 28 |
|
| 29 |
stack = []
|
| 30 |
-
|
| 31 |
-
while
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
|
|
|
| 36 |
else:
|
| 37 |
top = stack.pop()
|
| 38 |
-
|
| 39 |
-
area = heights[top] *
|
| 40 |
-
if area >
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
return
|
| 48 |
-
|
| 49 |
def segment_burn(patient_image_path,
|
| 50 |
reference_image_path,
|
| 51 |
threshold_texture=0.35,
|
| 52 |
threshold_color=10):
|
| 53 |
-
|
| 54 |
-
Takes:
|
| 55 |
-
- patient_image_path: path to the RGB image
|
| 56 |
-
- reference_image_path: path to the healthy-reference RGB image
|
| 57 |
-
- threshold_texture, threshold_color: tuning parameters
|
| 58 |
-
|
| 59 |
-
Returns:
|
| 60 |
-
- burn_crop_clean: the cropped burn region (as an ndarray)
|
| 61 |
-
- burn_crop_debug: original image with the crop rectangle overlaid
|
| 62 |
-
"""
|
| 63 |
-
# --- Load patient image ---
|
| 64 |
image = io.imread(patient_image_path)
|
| 65 |
if image.shape[-1] == 4:
|
| 66 |
image = image[..., :3]
|
| 67 |
image = img_as_float(image)
|
| 68 |
height, width, _ = image.shape
|
| 69 |
|
| 70 |
-
|
| 71 |
-
# 1. Convert to Lab + CLAHE on L
|
| 72 |
image_lab = color.rgb2lab(image)
|
| 73 |
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
|
| 74 |
image_lab[:, :, 0] = clahe.apply((image_lab[:, :, 0] * 255).astype(np.uint8)) / 255.0
|
| 75 |
|
| 76 |
-
|
| 77 |
image_lab_filtered = np.zeros_like(image_lab)
|
| 78 |
for i in range(3):
|
| 79 |
image_lab_filtered[:, :, i] = gaussian_filter(image_lab[:, :, i], sigma=1.5)
|
| 80 |
|
| 81 |
-
|
| 82 |
tensor_image = tl.tensor(image_lab_filtered)
|
| 83 |
core, factors = tucker(tensor_image, rank=[30, 30, 3])
|
| 84 |
reconstructed = tl.tucker_to_tensor((core, factors))
|
|
|
|
| 85 |
|
| 86 |
-
|
| 87 |
gray = rgb2gray(image)
|
| 88 |
-
local_var =
|
| 89 |
vmin, vmax = np.percentile(local_var, [5, 95])
|
| 90 |
local_var = np.clip((local_var - vmin) / (vmax - vmin), 0, 1)
|
| 91 |
|
| 92 |
-
|
| 93 |
lab_feat = reconstructed.reshape(-1, 3)
|
| 94 |
texture_feat = local_var.flatten()[:, None]
|
| 95 |
features = np.hstack((lab_feat, texture_feat))
|
| 96 |
|
| 97 |
-
|
| 98 |
n_clusters = 5
|
| 99 |
kmeans = KMeans(n_clusters=n_clusters, n_init=10, random_state=42)
|
| 100 |
labels = kmeans.fit_predict(features).reshape(height, width)
|
| 101 |
|
| 102 |
-
# Score clusters for burn likelihood
|
| 103 |
cluster_means = [features[labels.flatten()==i].mean(axis=0) for i in range(n_clusters)]
|
| 104 |
burn_scores = []
|
| 105 |
for L, A, B, T in cluster_means:
|
|
@@ -109,18 +100,18 @@ def segment_burn(patient_image_path,
|
|
| 109 |
burn_scores.append(0.4*red + 0.2*dark + 0.4*T)
|
| 110 |
|
| 111 |
burn_clusters = np.argsort(burn_scores)[-2:]
|
| 112 |
-
|
| 113 |
skin_cluster = np.argmin([np.linalg.norm(np.array([L,A,B]) - np.array([0.7,0,0]))
|
| 114 |
for L,A,B,T in cluster_means])
|
| 115 |
|
| 116 |
-
|
| 117 |
burn_mask = np.isin(labels, burn_clusters).astype(np.uint8)
|
| 118 |
closed = binary_closing(burn_mask, structure=np.ones((10,10)))
|
| 119 |
labeled, _ = label(closed)
|
| 120 |
sizes = np.bincount(labeled.ravel()); sizes[0]=0
|
| 121 |
largest = (labeled == sizes.argmax()).astype(np.uint8)
|
| 122 |
|
| 123 |
-
|
| 124 |
if os.path.exists(reference_image_path):
|
| 125 |
ref = io.imread(reference_image_path)
|
| 126 |
if ref.shape[-1]==4:
|
|
@@ -134,7 +125,7 @@ def segment_burn(patient_image_path,
|
|
| 134 |
ref_lab_f[:, :, i] = gaussian_filter(ref_lab[:, :, i], sigma=1.5)
|
| 135 |
|
| 136 |
ref_gray = rgb2gray(ref)
|
| 137 |
-
ref_var =
|
| 138 |
rvmin, rvmax = np.percentile(ref_var, [5,95])
|
| 139 |
ref_var = np.clip((ref_var - rvmin)/(rvmax - rvmin),0,1)
|
| 140 |
|
|
@@ -152,21 +143,21 @@ def segment_burn(patient_image_path,
|
|
| 152 |
else:
|
| 153 |
final_burn = largest
|
| 154 |
|
| 155 |
-
|
| 156 |
segmentation_map = np.zeros((height, width, 3))
|
| 157 |
segmentation_map[labels==skin_cluster] = [0,1,0]
|
| 158 |
segmentation_map[final_burn==1] = [1,0,0]
|
| 159 |
|
| 160 |
-
|
| 161 |
-
coords =
|
| 162 |
if coords is None:
|
| 163 |
raise ValueError("No solid rectangular burn area found.")
|
| 164 |
y1,y2,x1,x2 = coords
|
| 165 |
burn_crop_clean = image[y1:y2+1, x1:x2+1]
|
| 166 |
|
| 167 |
-
|
| 168 |
debug_img = (image * 255).astype(np.uint8).copy()
|
| 169 |
cv2.rectangle(debug_img, (x1,y1), (x2,y2), (0,255,255), thickness=3)
|
| 170 |
burn_crop_debug = debug_img
|
| 171 |
|
| 172 |
-
return burn_crop_clean, burn_crop_debug
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import numpy as np
|
| 3 |
import matplotlib.pyplot as plt
|
|
|
|
| 9 |
import cv2
|
| 10 |
from skimage.color import rgb2gray
|
| 11 |
|
| 12 |
+
def LocalVar(img, window_size=9):
|
| 13 |
img_sq = img ** 2
|
| 14 |
mean = uniform_filter(img, size=window_size)
|
| 15 |
mean_sq = uniform_filter(img_sq, size=window_size)
|
| 16 |
return mean_sq - mean ** 2
|
| 17 |
|
| 18 |
+
def largestRect(mask):
|
| 19 |
h, w = mask.shape
|
| 20 |
heights = [0] * w
|
| 21 |
+
Arealargest = 0
|
| 22 |
+
chosenCors = None
|
| 23 |
|
| 24 |
+
for row_idx, row in enumerate(mask):
|
| 25 |
+
heights = [
|
| 26 |
+
heights[col] + 1 if row[col] == 1 else 0
|
| 27 |
+
for col in range(w)
|
| 28 |
+
]
|
| 29 |
|
| 30 |
stack = []
|
| 31 |
+
col = 0
|
| 32 |
+
while col <= w:
|
| 33 |
+
currentH = heights[col] if col < w else 0
|
| 34 |
+
|
| 35 |
+
if not stack or currentH >= heights[stack[-1]]:
|
| 36 |
+
stack.append(col)
|
| 37 |
+
col += 1
|
| 38 |
else:
|
| 39 |
top = stack.pop()
|
| 40 |
+
rectangleW = col if not stack else col - stack[-1] - 1
|
| 41 |
+
area = heights[top] * rectangleW
|
| 42 |
+
if area > Arealargest:
|
| 43 |
+
Arealargest = area
|
| 44 |
+
bottomY = row_idx
|
| 45 |
+
topY = row_idx - heights[top] + 1
|
| 46 |
+
leftX = col - rectangleW
|
| 47 |
+
rightX = col - 1
|
| 48 |
+
chosenCors = (topY, bottomY, leftX, rightX)
|
| 49 |
+
return chosenCors
|
| 50 |
+
|
| 51 |
def segment_burn(patient_image_path,
|
| 52 |
reference_image_path,
|
| 53 |
threshold_texture=0.35,
|
| 54 |
threshold_color=10):
|
| 55 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
image = io.imread(patient_image_path)
|
| 57 |
if image.shape[-1] == 4:
|
| 58 |
image = image[..., :3]
|
| 59 |
image = img_as_float(image)
|
| 60 |
height, width, _ = image.shape
|
| 61 |
|
| 62 |
+
|
|
|
|
| 63 |
image_lab = color.rgb2lab(image)
|
| 64 |
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
|
| 65 |
image_lab[:, :, 0] = clahe.apply((image_lab[:, :, 0] * 255).astype(np.uint8)) / 255.0
|
| 66 |
|
| 67 |
+
|
| 68 |
image_lab_filtered = np.zeros_like(image_lab)
|
| 69 |
for i in range(3):
|
| 70 |
image_lab_filtered[:, :, i] = gaussian_filter(image_lab[:, :, i], sigma=1.5)
|
| 71 |
|
| 72 |
+
|
| 73 |
tensor_image = tl.tensor(image_lab_filtered)
|
| 74 |
core, factors = tucker(tensor_image, rank=[30, 30, 3])
|
| 75 |
reconstructed = tl.tucker_to_tensor((core, factors))
|
| 76 |
+
reconstructed_rgb = np.clip(color.lab2rgb(reconstructed), 0, 1)
|
| 77 |
|
| 78 |
+
|
| 79 |
gray = rgb2gray(image)
|
| 80 |
+
local_var = LocalVar(gray, window_size=9)
|
| 81 |
vmin, vmax = np.percentile(local_var, [5, 95])
|
| 82 |
local_var = np.clip((local_var - vmin) / (vmax - vmin), 0, 1)
|
| 83 |
|
| 84 |
+
|
| 85 |
lab_feat = reconstructed.reshape(-1, 3)
|
| 86 |
texture_feat = local_var.flatten()[:, None]
|
| 87 |
features = np.hstack((lab_feat, texture_feat))
|
| 88 |
|
| 89 |
+
|
| 90 |
n_clusters = 5
|
| 91 |
kmeans = KMeans(n_clusters=n_clusters, n_init=10, random_state=42)
|
| 92 |
labels = kmeans.fit_predict(features).reshape(height, width)
|
| 93 |
|
|
|
|
| 94 |
cluster_means = [features[labels.flatten()==i].mean(axis=0) for i in range(n_clusters)]
|
| 95 |
burn_scores = []
|
| 96 |
for L, A, B, T in cluster_means:
|
|
|
|
| 100 |
burn_scores.append(0.4*red + 0.2*dark + 0.4*T)
|
| 101 |
|
| 102 |
burn_clusters = np.argsort(burn_scores)[-2:]
|
| 103 |
+
|
| 104 |
skin_cluster = np.argmin([np.linalg.norm(np.array([L,A,B]) - np.array([0.7,0,0]))
|
| 105 |
for L,A,B,T in cluster_means])
|
| 106 |
|
| 107 |
+
|
| 108 |
burn_mask = np.isin(labels, burn_clusters).astype(np.uint8)
|
| 109 |
closed = binary_closing(burn_mask, structure=np.ones((10,10)))
|
| 110 |
labeled, _ = label(closed)
|
| 111 |
sizes = np.bincount(labeled.ravel()); sizes[0]=0
|
| 112 |
largest = (labeled == sizes.argmax()).astype(np.uint8)
|
| 113 |
|
| 114 |
+
|
| 115 |
if os.path.exists(reference_image_path):
|
| 116 |
ref = io.imread(reference_image_path)
|
| 117 |
if ref.shape[-1]==4:
|
|
|
|
| 125 |
ref_lab_f[:, :, i] = gaussian_filter(ref_lab[:, :, i], sigma=1.5)
|
| 126 |
|
| 127 |
ref_gray = rgb2gray(ref)
|
| 128 |
+
ref_var = LocalVar(ref_gray, window_size=9)
|
| 129 |
rvmin, rvmax = np.percentile(ref_var, [5,95])
|
| 130 |
ref_var = np.clip((ref_var - rvmin)/(rvmax - rvmin),0,1)
|
| 131 |
|
|
|
|
| 143 |
else:
|
| 144 |
final_burn = largest
|
| 145 |
|
| 146 |
+
|
| 147 |
segmentation_map = np.zeros((height, width, 3))
|
| 148 |
segmentation_map[labels==skin_cluster] = [0,1,0]
|
| 149 |
segmentation_map[final_burn==1] = [1,0,0]
|
| 150 |
|
| 151 |
+
|
| 152 |
+
coords = largestRect(final_burn)
|
| 153 |
if coords is None:
|
| 154 |
raise ValueError("No solid rectangular burn area found.")
|
| 155 |
y1,y2,x1,x2 = coords
|
| 156 |
burn_crop_clean = image[y1:y2+1, x1:x2+1]
|
| 157 |
|
| 158 |
+
|
| 159 |
debug_img = (image * 255).astype(np.uint8).copy()
|
| 160 |
cv2.rectangle(debug_img, (x1,y1), (x2,y2), (0,255,255), thickness=3)
|
| 161 |
burn_crop_debug = debug_img
|
| 162 |
|
| 163 |
+
return burn_crop_clean, burn_crop_debug
|
app.py
CHANGED
|
@@ -9,7 +9,7 @@ from pymongo.server_api import ServerApi
|
|
| 9 |
import cloudinary
|
| 10 |
import cloudinary.uploader
|
| 11 |
from cloudinary.utils import cloudinary_url
|
| 12 |
-
from SkinBurns_Classification import
|
| 13 |
from SkinBurns_Segmentation import segment_burn
|
| 14 |
import requests
|
| 15 |
import joblib
|
|
@@ -86,7 +86,7 @@ async def predict_burn(file: UploadFile = File(...)):
|
|
| 86 |
loaded_svm = pickle.load(model_file)
|
| 87 |
|
| 88 |
# Extract features from the uploaded image
|
| 89 |
-
features =
|
| 90 |
|
| 91 |
# Remove the temporary file
|
| 92 |
os.remove(temp_file_path)
|
|
|
|
| 9 |
import cloudinary
|
| 10 |
import cloudinary.uploader
|
| 11 |
from cloudinary.utils import cloudinary_url
|
| 12 |
+
from SkinBurns_Classification import FullFeautures
|
| 13 |
from SkinBurns_Segmentation import segment_burn
|
| 14 |
import requests
|
| 15 |
import joblib
|
|
|
|
| 86 |
loaded_svm = pickle.load(model_file)
|
| 87 |
|
| 88 |
# Extract features from the uploaded image
|
| 89 |
+
features = FullFeautures(temp_file_path)
|
| 90 |
|
| 91 |
# Remove the temporary file
|
| 92 |
os.remove(temp_file_path)
|