marrtinagg commited on
Commit
4b80d99
·
1 Parent(s): 05e999d

add gradio interface with preprocessing and model prediction

Browse files
app.py CHANGED
@@ -1,27 +1,65 @@
1
  import gradio as gr
2
  import tensorflow as tf
 
 
3
  from huggingface_hub import hf_hub_download
 
4
 
5
- def check_model():
6
- try:
7
- # 📥 descargamos el archivo simpleNet.h5 desde el repo del modelo
8
- model_path = hf_hub_download(repo_id="Martinagg/simpleNet", filename="simpleNet.h5")
9
-
10
- # 🔄 cargamos el modelo
11
- model = tf.keras.models.load_model(model_path)
12
-
13
- return "✅ Modelo cargado correctamente desde Hugging Face"
14
- except Exception as e:
15
- return f"❌ Error cargando modelo: {str(e)}"
16
-
17
- # 🎛️ Interfaz Gradio simple (solo un botón)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  demo = gr.Interface(
19
- fn=check_model,
20
- inputs=None,
21
- outputs="text",
22
- title="Prueba de carga - simpleNet",
23
- description="Este Space solo verifica que el modelo simpleNet se carga desde Hugging Face."
 
 
 
 
24
  )
25
 
26
  if __name__ == "__main__":
27
  demo.launch()
 
 
1
  import gradio as gr
2
  import tensorflow as tf
3
+ import numpy as np
4
+ import cv2
5
  from huggingface_hub import hf_hub_download
6
+ from tensorflow.keras.preprocessing import image
7
 
8
+ # === Importar funciones del pipeline ===
9
+ from src.preprocessing.zoom import apply_zoom
10
+ from src.preprocessing.hair_removal import quitar_pelos
11
+ from src.preprocessing.segmentation import segmentar_lesion
12
+
13
+ # Tamaño de entrada del modelo
14
+ ROWS, COLS = 224, 224
15
+
16
+ # === Cargar modelo desde Hugging Face Hub ===
17
+ model_path = hf_hub_download(repo_id="Martinagg/simpleNet", filename="simpleNet.h5")
18
+ model = tf.keras.models.load_model(model_path)
19
+
20
+ # === Función de predicción ===
21
+ def preprocess_and_predict(img_input):
22
+ # Convertir PIL a OpenCV (BGR)
23
+ img = np.array(img_input)[:, :, ::-1]
24
+
25
+ # 1. Zoom
26
+ zoomed = apply_zoom(img, zoom_factor=0.9)
27
+
28
+ # 2. Quitar pelos
29
+ rgb_clean = cv2.cvtColor(zoomed, cv2.COLOR_BGR2RGB)
30
+ clean = quitar_pelos(rgb_clean)
31
+
32
+ # 3. Segmentar (obtienes máscara y lesión recortada)
33
+ mask, lesion_rgb = segmentar_lesion(clean, size=(ROWS, COLS))
34
+
35
+ # 4. Preparar la imagen segmentada para el modelo
36
+ lesion_resized = cv2.resize(lesion_rgb, (ROWS, COLS))
37
+ img_array = image.img_to_array(lesion_resized) / 255.0
38
+ img_array = np.expand_dims(img_array, axis=0) # batch dimension
39
+
40
+ # 5. Predicción
41
+ probs = model.predict(img_array)[0]
42
+ classes = ["Benign", "Malignant"]
43
+ pred_idx = np.argmax(probs)
44
+ pred_label = classes[pred_idx]
45
+
46
+ result_text = f"Predicción: {pred_label} ({probs[pred_idx]*100:.2f}%)"
47
+
48
+ return mask, lesion_rgb, result_text
49
+
50
+ # === Interfaz Gradio ===
51
  demo = gr.Interface(
52
+ fn=preprocess_and_predict,
53
+ inputs=gr.Image(type="pil", label="Sube una imagen de lesión"),
54
+ outputs=[
55
+ gr.Image(type="numpy", label="Máscara Binaria"),
56
+ gr.Image(type="numpy", label="Lesión Segmentada"),
57
+ gr.Textbox(label="Resultado del Modelo")
58
+ ],
59
+ title="DermaScan - Clasificación de Lesiones",
60
+ description="Sube una imagen de piel. El sistema segmenta la lesión, muestra la máscara y la predicción (benigno vs maligno)."
61
  )
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
65
+
preprocessing/hair_removal.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+ def quitar_pelos(rgb):
5
+ """Elimina pelos de una imagen RGB usando black-hat + inpaint."""
6
+ gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)
7
+ kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9))
8
+ blackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, kernel)
9
+
10
+ _, hair_mask = cv2.threshold(blackhat, 10, 255, cv2.THRESH_BINARY)
11
+ rgb_clean = cv2.inpaint(rgb, hair_mask, 3, cv2.INPAINT_TELEA)
12
+
13
+ return rgb_clean
preprocessing/metrics.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import math
4
+
5
+ def _leer_mask(mask):
6
+ """Lee una máscara que puede venir como ruta o como array."""
7
+ if isinstance(mask, str): # si es ruta
8
+ mask = cv2.imread(mask, cv2.IMREAD_GRAYSCALE)
9
+ elif mask.ndim == 3: # si es RGB/BGR
10
+ mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
11
+
12
+ if mask is None:
13
+ raise ValueError("No se pudo leer la máscara.")
14
+ return mask
15
+
16
+
17
+ def calcular_area(mask):
18
+ mask = _leer_mask(mask)
19
+ _, mask_bin = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
20
+ contours, _ = cv2.findContours(mask_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
21
+ if not contours:
22
+ return 0.0
23
+ cnt = max(contours, key=cv2.contourArea)
24
+ area = cv2.contourArea(cnt)
25
+ if not np.isfinite(area):
26
+ area = 0.0
27
+ return round(float(area), 2)
28
+
29
+
30
+ def calcular_perimetro(mask):
31
+ mask = _leer_mask(mask)
32
+ _, mask_bin = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
33
+ contours, _ = cv2.findContours(mask_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
34
+ if not contours:
35
+ return 0.0
36
+ cnt = max(contours, key=cv2.contourArea)
37
+ perimetro = cv2.arcLength(cnt, True)
38
+ if not np.isfinite(perimetro):
39
+ perimetro = 0.0
40
+ return round(float(perimetro), 2)
41
+
42
+
43
+ def calcular_circularidad(mask):
44
+ mask = _leer_mask(mask)
45
+ _, mask_bin = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
46
+ contours, _ = cv2.findContours(mask_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
47
+
48
+ if not contours:
49
+ return 0.0
50
+
51
+ cnt = max(contours, key=cv2.contourArea)
52
+ area = cv2.contourArea(cnt)
53
+ perimetro = cv2.arcLength(cnt, True)
54
+
55
+ if perimetro == 0 or area == 0:
56
+ return 0.0
57
+
58
+ circ = (4 * math.pi * area) / (perimetro ** 2)
59
+
60
+ if not np.isfinite(circ):
61
+ circ = 0.0
62
+
63
+ circ = np.clip(circ, 0, 1)
64
+ return round(float(circ), 4)
65
+
66
+
67
+ def calcular_simetria(mask):
68
+ mask = _leer_mask(mask)
69
+ _, mask_bin = cv2.threshold(mask, 127, 1, cv2.THRESH_BINARY)
70
+
71
+ if np.sum(mask_bin) == 0:
72
+ return 0.0, 0.0
73
+
74
+ y, x = np.where(mask_bin > 0)
75
+ y_min, y_max = y.min(), y.max()
76
+ x_min, x_max = x.min(), x.max()
77
+ roi = mask_bin[y_min:y_max+1, x_min:x_max+1]
78
+
79
+ h, w = roi.shape
80
+ size = max(h, w)
81
+ canvas = np.zeros((size, size), dtype=np.uint8)
82
+ y_off, x_off = (size - h)//2, (size - w)//2
83
+ canvas[y_off:y_off+h, x_off:x_off+w] = roi
84
+ mask_centered = canvas
85
+
86
+ cy, cx = np.mean(np.column_stack(np.where(mask_centered > 0)), axis=0).astype(int)
87
+ area_total = np.sum(mask_centered)
88
+
89
+ if area_total == 0:
90
+ return 0.0, 0.0
91
+
92
+ # --- simetría vertical ---
93
+ left = mask_centered[:, :cx]
94
+ right = mask_centered[:, cx:]
95
+ right_flipped = np.fliplr(right)
96
+ min_width = min(left.shape[1], right_flipped.shape[1])
97
+ xor_v = np.logical_xor(left[:, :min_width], right_flipped[:, :min_width])
98
+ sim_v = 1 - (np.sum(xor_v) / area_total)
99
+
100
+ # --- simetría horizontal ---
101
+ top = mask_centered[:cy, :]
102
+ bottom = mask_centered[cy:, :]
103
+ bottom_flipped = np.flipud(bottom)
104
+ min_height = min(top.shape[0], bottom_flipped.shape[0])
105
+ xor_h = np.logical_xor(top[:min_height, :], bottom_flipped[:min_height, :])
106
+ sim_h = 1 - (np.sum(xor_h) / area_total)
107
+
108
+ sim_v = max(0.0, min(1.0, sim_v))
109
+ sim_h = max(0.0, min(1.0, sim_h))
110
+
111
+ return round(sim_v, 3), round(sim_h, 3)
preprocessing/segmentation.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+ def segmentar_lesion(rgb, size=(224, 224)):
5
+
6
+ # Redimensionar y convertir a HSV
7
+ rgb = cv2.resize(rgb, size)
8
+ hsv = cv2.cvtColor(rgb, cv2.COLOR_RGB2HSV)
9
+ S = hsv[:, :, 1]
10
+ S_blur = cv2.GaussianBlur(S, (5, 5), 0)
11
+
12
+ # Umbralización con Otsu
13
+ _, mask = cv2.threshold(S_blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
14
+
15
+ # Selección de la región más grande y centrada
16
+ num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(mask)
17
+ h, w = mask.shape
18
+ center = np.array([w / 2, h / 2])
19
+
20
+ valid_regions = []
21
+ for i in range(1, num_labels):
22
+ area = stats[i, cv2.CC_STAT_AREA]
23
+ cX, cY = centroids[i]
24
+ dist = np.linalg.norm(np.array([cX, cY]) - center)
25
+ if dist < min(w, h) / 3:
26
+ valid_regions.append((i, area, dist))
27
+
28
+ if len(valid_regions) == 0:
29
+ largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
30
+ else:
31
+ largest_label = max(valid_regions, key=lambda x: x[1])[0]
32
+
33
+ largest_mask = np.zeros_like(mask)
34
+ largest_mask[labels == largest_label] = 255
35
+
36
+ # Rellenar huecos internos
37
+ inv = cv2.bitwise_not(largest_mask)
38
+ num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(inv)
39
+ for i in range(1, num_labels):
40
+ x, y, w_box, h_box, area = stats[i]
41
+ if x > 0 and y > 0 and (x + w_box) < w and (y + h_box) < h:
42
+ inv[labels == i] = 0
43
+ filled_mask = cv2.bitwise_not(inv)
44
+
45
+ # Aplicar la máscara al RGB limpio
46
+ lesion_rgb = cv2.bitwise_and(rgb, rgb, mask=filled_mask)
47
+
48
+ return filled_mask, lesion_rgb
preprocessing/zoom.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+
3
+ def apply_zoom(image, zoom_factor=0.9):
4
+ """Aplica un zoom centrado a la imagen."""
5
+ h, w = image.shape[:2]
6
+ new_h, new_w = int(h * zoom_factor), int(w * zoom_factor)
7
+
8
+ top = (h - new_h) // 2
9
+ left = (w - new_w) // 2
10
+
11
+ cropped = image[top:top+new_h, left:left+new_w]
12
+ zoomed = cv2.resize(cropped, (w, h))
13
+ return zoomed
requirements.txt CHANGED
@@ -2,3 +2,6 @@ gradio
2
  tensorflow
3
  huggingface_hub
4
  numpy
 
 
 
 
2
  tensorflow
3
  huggingface_hub
4
  numpy
5
+ opencv-python-headless
6
+ pandas
7
+ tqdm