Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,22 +10,20 @@ import torch.nn.functional as F
|
|
| 10 |
import warnings
|
| 11 |
import os
|
| 12 |
|
| 13 |
-
#
|
| 14 |
warnings.filterwarnings("ignore")
|
| 15 |
|
| 16 |
-
print("🔍
|
| 17 |
|
| 18 |
-
# ---
|
| 19 |
-
# Separamos los modelos en dos categorías para mejor explicación al usuario.
|
| 20 |
-
# Los modelos especializados en piel son generalmente más fiables para esta tarea.
|
| 21 |
MODEL_CONFIGS = {
|
| 22 |
-
"
|
| 23 |
{
|
| 24 |
'name': 'Syaha Skin Cancer',
|
| 25 |
'id': 'syaha/skin_cancer_detection_model',
|
| 26 |
'type': 'custom',
|
| 27 |
'accuracy': 0.82,
|
| 28 |
-
'description': 'CNN
|
| 29 |
'emoji': '🩺'
|
| 30 |
},
|
| 31 |
{
|
|
@@ -33,7 +31,7 @@ MODEL_CONFIGS = {
|
|
| 33 |
'id': 'VRJBro/skin-cancer-detection',
|
| 34 |
'type': 'custom',
|
| 35 |
'accuracy': 0.85,
|
| 36 |
-
'description': '
|
| 37 |
'emoji': '🎯'
|
| 38 |
},
|
| 39 |
{
|
|
@@ -41,7 +39,7 @@ MODEL_CONFIGS = {
|
|
| 41 |
'id': 'Anwarkh1/Skin_Cancer-Image_Classification',
|
| 42 |
'type': 'vit',
|
| 43 |
'accuracy': 0.89,
|
| 44 |
-
'description': '
|
| 45 |
'emoji': '🧠'
|
| 46 |
},
|
| 47 |
{
|
|
@@ -49,34 +47,33 @@ MODEL_CONFIGS = {
|
|
| 49 |
'id': 'jhoppanne/SkinCancerClassifier_smote-V0',
|
| 50 |
'type': 'custom',
|
| 51 |
'accuracy': 0.86,
|
| 52 |
-
'description': '
|
| 53 |
'emoji': '⚖️'
|
| 54 |
},
|
| 55 |
-
# --- NUEVOS MODELOS ESPECIALIZADOS AÑADIDOS ---
|
| 56 |
{
|
| 57 |
'name': 'ViT ISIC Binary',
|
| 58 |
'id': 'ahishamm/vit-base-binary-isic-sharpened-patch-32',
|
| 59 |
'type': 'vit',
|
| 60 |
-
'accuracy': 0.89,
|
| 61 |
-
'description': 'ViT
|
| 62 |
'emoji': '🔬'
|
| 63 |
},
|
| 64 |
{
|
| 65 |
'name': 'ViT ISIC Multi-class',
|
| 66 |
'id': 'ahishamm/vit-base-isic-patch-16',
|
| 67 |
'type': 'vit',
|
| 68 |
-
'accuracy': 0.79,
|
| 69 |
-
'description': 'ViT
|
| 70 |
'emoji': '🔍'
|
| 71 |
}
|
| 72 |
],
|
| 73 |
-
"
|
| 74 |
{
|
| 75 |
'name': 'ViT Base General',
|
| 76 |
'id': 'google/vit-base-patch16-224',
|
| 77 |
'type': 'vit',
|
| 78 |
'accuracy': 0.78,
|
| 79 |
-
'description': 'ViT base pre-
|
| 80 |
'emoji': '📈'
|
| 81 |
},
|
| 82 |
{
|
|
@@ -84,7 +81,7 @@ MODEL_CONFIGS = {
|
|
| 84 |
'id': 'microsoft/resnet-50',
|
| 85 |
'type': 'custom',
|
| 86 |
'accuracy': 0.77,
|
| 87 |
-
'description': '
|
| 88 |
'emoji': '⚙️'
|
| 89 |
},
|
| 90 |
{
|
|
@@ -92,7 +89,7 @@ MODEL_CONFIGS = {
|
|
| 92 |
'id': 'facebook/deit-base-patch16-224',
|
| 93 |
'type': 'vit',
|
| 94 |
'accuracy': 0.79,
|
| 95 |
-
'description': 'Data-efficient Image Transformer,
|
| 96 |
'emoji': '💡'
|
| 97 |
},
|
| 98 |
{
|
|
@@ -100,7 +97,7 @@ MODEL_CONFIGS = {
|
|
| 100 |
'id': 'google/mobilenet_v2_1.0_224',
|
| 101 |
'type': 'custom',
|
| 102 |
'accuracy': 0.72,
|
| 103 |
-
'description': '
|
| 104 |
'emoji': '📱'
|
| 105 |
},
|
| 106 |
{
|
|
@@ -108,37 +105,32 @@ MODEL_CONFIGS = {
|
|
| 108 |
'id': 'microsoft/swin-tiny-patch4-window7-224',
|
| 109 |
'type': 'custom',
|
| 110 |
'accuracy': 0.81,
|
| 111 |
-
'description': 'Swin Transformer (Tiny),
|
| 112 |
'emoji': '🌀'
|
| 113 |
},
|
| 114 |
-
# Modelo de respaldo genérico final (si nada más funciona)
|
| 115 |
{
|
| 116 |
'name': 'ViT Base General (Fallback)',
|
| 117 |
'id': 'google/vit-base-patch16-224-in21k',
|
| 118 |
'type': 'vit',
|
| 119 |
'accuracy': 0.75,
|
| 120 |
-
'description': 'ViT
|
| 121 |
'emoji': '🔄'
|
| 122 |
}
|
| 123 |
]
|
| 124 |
}
|
| 125 |
|
| 126 |
-
# ---
|
| 127 |
loaded_models = {}
|
| 128 |
model_performance = {}
|
| 129 |
|
| 130 |
def load_model_safe(config):
|
| 131 |
-
"""
|
| 132 |
try:
|
| 133 |
model_id = config['id']
|
| 134 |
model_type = config['type']
|
| 135 |
-
print(f"🔄
|
| 136 |
-
|
| 137 |
-
# Intentar cargar con revisiones específicas para evitar problemas de safetensors/float16
|
| 138 |
-
# Si PyTorch es 2.6.0, es posible que 'safetensors' aún no sea 100% estable en todos los modelos/configuraciones
|
| 139 |
-
# y que el soporte de float16 requiera revisión específica.
|
| 140 |
-
revisions_to_try = ["main", "no_float16_weights", None] # None intentará el valor por defecto
|
| 141 |
|
|
|
|
| 142 |
processor = None
|
| 143 |
model = None
|
| 144 |
load_successful = False
|
|
@@ -146,57 +138,52 @@ def load_model_safe(config):
|
|
| 146 |
for revision in revisions_to_try:
|
| 147 |
try:
|
| 148 |
if revision:
|
| 149 |
-
print(f"
|
| 150 |
processor = AutoImageProcessor.from_pretrained(model_id, revision=revision)
|
| 151 |
model = AutoModelForImageClassification.from_pretrained(model_id, revision=revision)
|
| 152 |
else:
|
| 153 |
processor = AutoImageProcessor.from_pretrained(model_id)
|
| 154 |
model = AutoModelForImageClassification.from_pretrained(model_id)
|
| 155 |
load_successful = True
|
| 156 |
-
break
|
| 157 |
except Exception as e_rev:
|
| 158 |
-
print(f"
|
| 159 |
-
if model_type == 'vit' and revision is None:
|
| 160 |
try:
|
| 161 |
processor = ViTImageProcessor.from_pretrained(model_id)
|
| 162 |
model = ViTForImageClassification.from_pretrained(model_id)
|
| 163 |
load_successful = True
|
| 164 |
break
|
| 165 |
except Exception as e_vit:
|
| 166 |
-
print(f"
|
| 167 |
-
continue
|
| 168 |
|
| 169 |
if not load_successful:
|
| 170 |
-
raise Exception("
|
| 171 |
|
| 172 |
model.eval()
|
| 173 |
|
| 174 |
-
# Verificar que el modelo funciona con una entrada dummy
|
| 175 |
test_input = processor(Image.new('RGB', (224, 224), color='white'), return_tensors="pt")
|
| 176 |
with torch.no_grad():
|
| 177 |
-
|
| 178 |
|
| 179 |
-
print(f"✅ {config['emoji']} {config['name']}
|
| 180 |
|
| 181 |
return {
|
| 182 |
'processor': processor,
|
| 183 |
'model': model,
|
| 184 |
'config': config,
|
| 185 |
-
'
|
| 186 |
-
'category': config.get('category', 'general') # Añadimos la categoría aquí
|
| 187 |
}
|
| 188 |
|
| 189 |
except Exception as e:
|
| 190 |
-
print(f"❌ {config['emoji']} {config['name']}
|
| 191 |
-
print(f" Error detallado: {type(e).__name__}")
|
| 192 |
return None
|
| 193 |
|
| 194 |
-
|
| 195 |
-
print("\n📦
|
| 196 |
-
# Recorrer ambas categorías de modelos
|
| 197 |
for category, configs in MODEL_CONFIGS.items():
|
| 198 |
for config in configs:
|
| 199 |
-
# Añadir la categoría al diccionario de configuración antes de pasar a load_model_safe
|
| 200 |
config['category'] = category
|
| 201 |
model_data = load_model_safe(config)
|
| 202 |
if model_data:
|
|
@@ -204,187 +191,87 @@ for category, configs in MODEL_CONFIGS.items():
|
|
| 204 |
model_performance[config['name']] = config.get('accuracy', 0.8)
|
| 205 |
|
| 206 |
if not loaded_models:
|
| 207 |
-
print("❌ No
|
| 208 |
-
# Modelos de respaldo - más amplios
|
| 209 |
fallback_models = [
|
| 210 |
'google/vit-base-patch16-224-in21k',
|
| 211 |
-
'microsoft/resnet-50'
|
| 212 |
-
'google/vit-large-patch16-224'
|
| 213 |
]
|
| 214 |
|
| 215 |
for fallback_id in fallback_models:
|
| 216 |
try:
|
| 217 |
-
print(f"🔄
|
| 218 |
processor = AutoImageProcessor.from_pretrained(fallback_id)
|
| 219 |
model = AutoModelForImageClassification.from_pretrained(fallback_id)
|
| 220 |
model.eval()
|
| 221 |
|
| 222 |
-
loaded_models[f'
|
| 223 |
'processor': processor,
|
| 224 |
'model': model,
|
| 225 |
-
'config': {
|
| 226 |
-
|
| 227 |
-
'emoji': '🏥',
|
| 228 |
-
'accuracy': 0.75,
|
| 229 |
-
'type': 'fallback',
|
| 230 |
-
'category': 'general' # El de respaldo es general
|
| 231 |
-
},
|
| 232 |
-
'category': 'general', # El de respaldo es general
|
| 233 |
-
'type': 'standard'
|
| 234 |
}
|
| 235 |
-
print(f"✅
|
| 236 |
break
|
| 237 |
except Exception as e:
|
| 238 |
-
print(f"❌
|
| 239 |
continue
|
| 240 |
|
| 241 |
-
|
| 242 |
-
print(f"❌ ERROR CRÍTICO: No se pudo cargar ningún modelo")
|
| 243 |
-
print("💡 Verifica tu conexión a internet y que tengas transformers instalado")
|
| 244 |
-
loaded_models['Modelo Dummy'] = {
|
| 245 |
-
'type': 'dummy',
|
| 246 |
-
'config': {'name': 'Modelo No Disponible', 'emoji': '❌', 'accuracy': 0.0},
|
| 247 |
-
'category': 'dummy'
|
| 248 |
-
}
|
| 249 |
-
|
| 250 |
-
# Clases de lesiones de piel (HAM10000 dataset)
|
| 251 |
CLASSES = [
|
| 252 |
-
"
|
| 253 |
-
"
|
| 254 |
-
"
|
| 255 |
"Dermatofibroma (DF)",
|
| 256 |
-
"Melanoma
|
| 257 |
-
"Nevus
|
| 258 |
-
"
|
| 259 |
]
|
| 260 |
|
| 261 |
-
# Sistema de riesgo
|
| 262 |
RISK_LEVELS = {
|
| 263 |
-
0: {'level': '
|
| 264 |
-
1: {'level': '
|
| 265 |
-
2: {'level': '
|
| 266 |
-
3: {'level': '
|
| 267 |
-
4: {'level': '
|
| 268 |
-
5: {'level': '
|
| 269 |
-
6: {'level': '
|
| 270 |
}
|
| 271 |
|
| 272 |
-
MALIGNANT_INDICES = [0, 1, 4]
|
| 273 |
|
|
|
|
| 274 |
def predict_with_model(image, model_data):
|
| 275 |
-
"""Predicción con un modelo específico - versión mejorada"""
|
| 276 |
try:
|
| 277 |
config = model_data['config']
|
| 278 |
-
|
| 279 |
-
# Redimensionar imagen
|
| 280 |
image_resized = image.resize((224, 224), Image.LANCZOS)
|
| 281 |
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
if isinstance(results, list) and len(results) > 0:
|
| 287 |
-
mapped_probs = np.ones(7) / 7
|
| 288 |
-
confidence = results[0]['score'] if 'score' in results[0] else 0.5
|
| 289 |
-
|
| 290 |
-
label = results[0].get('label', '').lower()
|
| 291 |
-
if any(word in label for word in ['melanoma', 'mel', 'malignant', 'cancer']):
|
| 292 |
-
predicted_idx = 4
|
| 293 |
-
elif any(word in label for word in ['carcinoma', 'bcc', 'basal']):
|
| 294 |
-
predicted_idx = 1
|
| 295 |
-
elif any(word in label for word in ['keratosis', 'akiec']):
|
| 296 |
-
predicted_idx = 0
|
| 297 |
-
elif any(word in label for word in ['nevus', 'nv', 'benign']):
|
| 298 |
-
predicted_idx = 5
|
| 299 |
-
else:
|
| 300 |
-
predicted_idx = 2
|
| 301 |
-
|
| 302 |
-
mapped_probs[predicted_idx] = confidence
|
| 303 |
-
remaining_sum = (1.0 - confidence)
|
| 304 |
-
if remaining_sum < 0: remaining_sum = 0
|
| 305 |
-
|
| 306 |
-
num_other_classes = 6
|
| 307 |
-
if num_other_classes > 0:
|
| 308 |
-
remaining_per_class = remaining_sum / num_other_classes
|
| 309 |
-
for i in range(7):
|
| 310 |
-
if i != predicted_idx:
|
| 311 |
-
mapped_probs[i] = remaining_per_class
|
| 312 |
-
|
| 313 |
-
else:
|
| 314 |
-
mapped_probs = np.ones(7) / 7
|
| 315 |
-
predicted_idx = 5
|
| 316 |
-
confidence = 0.3
|
| 317 |
-
|
| 318 |
-
else: # Usar modelo estándar (AutoModel/ViT)
|
| 319 |
-
processor = model_data['processor']
|
| 320 |
-
model = model_data['model']
|
| 321 |
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
else: # Predicción de benigno
|
| 346 |
-
# Distribuimos la probabilidad benigna entre los tipos benignos conocidos, dando más peso al nevus
|
| 347 |
-
mapped_probs[5] = probabilities[0] * 0.6 # Nevus (más común)
|
| 348 |
-
mapped_probs[2] = probabilities[0] * 0.2 # BKL
|
| 349 |
-
mapped_probs[3] = probabilities[0] * 0.1 # DF
|
| 350 |
-
mapped_probs[6] = probabilities[0] * 0.1 # VASC
|
| 351 |
-
mapped_probs = mapped_probs / np.sum(mapped_probs) # Normalizar para que sumen 1
|
| 352 |
-
elif len(probabilities) in [1000, 900]: # Modelos generales como los de ImageNet (1000 clases) o modelos preentrenados en ImageNet-21k (900 clases)
|
| 353 |
-
mapped_probs = np.zeros(7)
|
| 354 |
-
# Intentar mapear las clases del modelo a las clases de piel si hay un id2label
|
| 355 |
-
if hasattr(model, 'config') and hasattr(model.config, 'id2label'):
|
| 356 |
-
model_labels = {v.lower(): k for k, v in model.config.id2label.items()}
|
| 357 |
-
# Asignar probabilidades a las clases de piel si coinciden
|
| 358 |
-
for i, skin_class in enumerate(CLASSES):
|
| 359 |
-
# Intentar buscar la etiqueta completa o una parte clave
|
| 360 |
-
key_words = skin_class.split('(')[1].rstrip(')').lower().split()
|
| 361 |
-
found = False
|
| 362 |
-
for key_word in key_words:
|
| 363 |
-
for model_label, model_idx in model_labels.items():
|
| 364 |
-
if key_word in model_label:
|
| 365 |
-
# Sumar la probabilidad de la clase del modelo a la clase de piel
|
| 366 |
-
mapped_probs[i] += probabilities[model_idx]
|
| 367 |
-
found = True
|
| 368 |
-
break
|
| 369 |
-
if found: break # Ya encontramos una coincidencia para esta clase de piel
|
| 370 |
-
|
| 371 |
-
# Si después del intento de mapeo, las probabilidades son cero o muy bajas,
|
| 372 |
-
# o si no hay id2label, usar la distribución uniforme (o heurística)
|
| 373 |
-
if np.sum(mapped_probs) == 0:
|
| 374 |
-
print(f"Advertencia: No se pudo mapear clases específicas para {config['name']} ({len(probabilities)} clases). Usando distribución heurística.")
|
| 375 |
-
mapped_probs = np.ones(7) / 7 # Empezamos con distribución uniforme
|
| 376 |
-
# Ajuste heurístico: Asignamos un poco más de peso a clases benignas por defecto
|
| 377 |
-
mapped_probs[5] += 0.1 # Aumentar Nevus (NV) ligeramente
|
| 378 |
-
mapped_probs[2] += 0.05 # Aumentar Lesión queratósica benigna (BKL) ligeramente
|
| 379 |
-
mapped_probs = mapped_probs / np.sum(mapped_probs) # Re-normalizar
|
| 380 |
-
else:
|
| 381 |
-
mapped_probs = mapped_probs / np.sum(mapped_probs) # Normalizar las probabilidades mapeadas
|
| 382 |
-
else: # Otros casos de dimensiones de salida no esperadas: distribución uniforme
|
| 383 |
-
print(f"Advertencia: Dimensión de salida inesperada para {config['name']} ({len(probabilities)} clases). Usando distribución uniforme.")
|
| 384 |
-
mapped_probs = np.ones(7) / 7
|
| 385 |
-
|
| 386 |
-
predicted_idx = int(np.argmax(mapped_probs))
|
| 387 |
-
confidence = float(mapped_probs[predicted_idx])
|
| 388 |
|
| 389 |
return {
|
| 390 |
'model': f"{config['emoji']} {config['name']}",
|
|
@@ -394,419 +281,63 @@ def predict_with_model(image, model_data):
|
|
| 394 |
'is_malignant': predicted_idx in MALIGNANT_INDICES,
|
| 395 |
'predicted_idx': predicted_idx,
|
| 396 |
'success': True,
|
| 397 |
-
'category': model_data['category']
|
| 398 |
}
|
| 399 |
|
| 400 |
except Exception as e:
|
| 401 |
-
print(f"❌ Error
|
| 402 |
-
return {
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
for i, bar in enumerate(bars):
|
| 446 |
-
height = bar.get_height()
|
| 447 |
-
ax1.text(bar.get_x() + bar.get_width()/2., height + 0.01,
|
| 448 |
-
f'{height:.2%}', ha='center', va='bottom', fontsize=9)
|
| 449 |
-
|
| 450 |
-
# Gráfico 2: Confianza por modelo
|
| 451 |
-
valid_predictions = [p for p in predictions if p.get('success', False)]
|
| 452 |
-
model_names = [pred['model'].split(' ')[1] if len(pred['model'].split(' ')) > 1 else pred['model'] for pred in valid_predictions]
|
| 453 |
-
confidences = [pred['confidence'] for pred in valid_predictions]
|
| 454 |
-
|
| 455 |
-
colors_conf = ['#ff6b35' if pred['is_malignant'] else '#44ff44' for pred in valid_predictions]
|
| 456 |
-
bars2 = ax2.bar(range(len(valid_predictions)), confidences, color=colors_conf, alpha=0.8)
|
| 457 |
-
|
| 458 |
-
ax2.set_xlabel('Modelos')
|
| 459 |
-
ax2.set_ylabel('Confianza')
|
| 460 |
-
ax2.set_title('🎯 Confianza por Modelo')
|
| 461 |
-
ax2.set_xticks(range(len(valid_predictions)))
|
| 462 |
-
ax2.set_xticklabels(model_names, rotation=45)
|
| 463 |
-
ax2.grid(True, alpha=0.3)
|
| 464 |
-
ax2.set_ylim(0, 1)
|
| 465 |
-
|
| 466 |
-
for i, bar in enumerate(bars2):
|
| 467 |
-
height = bar.get_height()
|
| 468 |
-
ax2.text(bar.get_x() + bar.get_width()/2., height + 0.01,
|
| 469 |
-
f'{height:.1%}', ha='center', va='bottom', fontsize=9)
|
| 470 |
-
|
| 471 |
-
plt.tight_layout()
|
| 472 |
-
|
| 473 |
-
buf = io.BytesIO()
|
| 474 |
-
plt.savefig(buf, format='png', dpi=300, bbox_inches='tight')
|
| 475 |
-
buf.seek(0)
|
| 476 |
-
chart_b64 = base64.b64encode(buf.getvalue()).decode()
|
| 477 |
-
plt.close()
|
| 478 |
-
|
| 479 |
-
return f'<img src="data:image/png;base64,{chart_b64}" style="width:100%; max-width:800px;">'
|
| 480 |
-
|
| 481 |
-
except Exception as e:
|
| 482 |
-
print(f"Error creando gráfico: {e}")
|
| 483 |
-
return "<p>❌ Error generando gráfico de probabilidades</p>"
|
| 484 |
-
|
| 485 |
-
def create_heatmap(predictions):
|
| 486 |
-
"""Crear mapa de calor de probabilidades por modelo"""
|
| 487 |
-
try:
|
| 488 |
-
valid_predictions = [p for p in predictions if p.get('success', False)]
|
| 489 |
-
|
| 490 |
-
if not valid_predictions:
|
| 491 |
-
return "<p>No hay datos suficientes para el mapa de calor</p>"
|
| 492 |
-
|
| 493 |
-
prob_matrix_list = []
|
| 494 |
-
model_names_for_heatmap = []
|
| 495 |
-
for pred in valid_predictions:
|
| 496 |
-
if isinstance(pred['probabilities'], np.ndarray) and len(pred['probabilities']) == 7 and not np.isnan(pred['probabilities']).any():
|
| 497 |
-
prob_matrix_list.append(pred['probabilities'])
|
| 498 |
-
model_names_for_heatmap.append(pred['model'])
|
| 499 |
-
else:
|
| 500 |
-
print(f"Advertencia: Probabilidades no válidas para heatmap de {pred['model']}: {pred['probabilities']}")
|
| 501 |
-
|
| 502 |
-
if not prob_matrix_list:
|
| 503 |
-
return "<p>No hay datos válidos para el mapa de calor después de filtrar.</p>"
|
| 504 |
-
|
| 505 |
-
prob_matrix = np.array(prob_matrix_list)
|
| 506 |
-
|
| 507 |
-
fig, ax = plt.subplots(figsize=(10, len(model_names_for_heatmap) * 0.8))
|
| 508 |
-
|
| 509 |
-
im = ax.imshow(prob_matrix, cmap='RdYlGn_r', aspect='auto', vmin=0, vmax=1)
|
| 510 |
-
|
| 511 |
-
ax.set_xticks(np.arange(7))
|
| 512 |
-
ax.set_yticks(np.arange(len(model_names_for_heatmap)))
|
| 513 |
-
ax.set_xticklabels([cls.split('(')[1].rstrip(')') for cls in CLASSES])
|
| 514 |
-
ax.set_yticklabels(model_names_for_heatmap)
|
| 515 |
-
|
| 516 |
-
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
|
| 517 |
-
|
| 518 |
-
for i in range(len(model_names_for_heatmap)):
|
| 519 |
-
for j in range(7):
|
| 520 |
-
text = ax.text(j, i, f'{prob_matrix[i, j]:.2f}',
|
| 521 |
-
ha="center", va="center", color="white" if prob_matrix[i, j] > 0.5 else "black",
|
| 522 |
-
fontsize=8)
|
| 523 |
-
|
| 524 |
-
ax.set_title("Mapa de Calor: Probabilidades por Modelo y Clase")
|
| 525 |
-
fig.tight_layout()
|
| 526 |
-
|
| 527 |
-
cbar = plt.colorbar(im, ax=ax)
|
| 528 |
-
cbar.set_label('Probabilidad', rotation=270, labelpad=15)
|
| 529 |
-
|
| 530 |
-
buf = io.BytesIO()
|
| 531 |
-
plt.savefig(buf, format='png', dpi=300, bbox_inches='tight')
|
| 532 |
-
buf.seek(0)
|
| 533 |
-
heatmap_b64 = base64.b64encode(buf.getvalue()).decode()
|
| 534 |
-
plt.close()
|
| 535 |
-
|
| 536 |
-
return f'<img src="data:image/png;base64,{heatmap_b64}" style="width:100%; max-width:800px;">'
|
| 537 |
-
|
| 538 |
-
except Exception as e:
|
| 539 |
-
print(f"Error creando mapa de calor: {e}")
|
| 540 |
-
return "<p>❌ Error generando mapa de calor</p>"
|
| 541 |
-
|
| 542 |
-
def analizar_lesion(img):
|
| 543 |
-
"""Función principal para analizar la lesión"""
|
| 544 |
-
try:
|
| 545 |
-
if img is None:
|
| 546 |
-
return "<h3>⚠️ Por favor, carga una imagen</h3>"
|
| 547 |
-
|
| 548 |
-
if not loaded_models or all(m.get('type') == 'dummy' for m in loaded_models.values()):
|
| 549 |
-
return "<h3>❌ Error del Sistema</h3><p>No hay modelos disponibles. Por favor, recarga la aplicación.</p>"
|
| 550 |
-
|
| 551 |
-
if img.mode != 'RGB':
|
| 552 |
-
img = img.convert('RGB')
|
| 553 |
-
|
| 554 |
-
predictions = []
|
| 555 |
-
|
| 556 |
-
for model_name, model_data in loaded_models.items():
|
| 557 |
-
if model_data.get('type') != 'dummy':
|
| 558 |
-
pred = predict_with_model(img, model_data)
|
| 559 |
-
if pred.get('success', False):
|
| 560 |
-
predictions.append(pred)
|
| 561 |
-
|
| 562 |
-
if not predictions:
|
| 563 |
-
return "<h3>❌ Error</h3><p>No se pudieron obtener predicciones de ningún modelo.</p>"
|
| 564 |
-
|
| 565 |
-
# Análisis de consenso
|
| 566 |
-
class_votes = {}
|
| 567 |
-
confidence_sum = {}
|
| 568 |
-
|
| 569 |
-
for pred in predictions:
|
| 570 |
-
class_name = pred['class']
|
| 571 |
-
confidence = pred['confidence']
|
| 572 |
-
|
| 573 |
-
if class_name not in class_votes:
|
| 574 |
-
class_votes[class_name] = 0
|
| 575 |
-
confidence_sum[class_name] = 0
|
| 576 |
-
|
| 577 |
-
class_votes[class_name] += 1
|
| 578 |
-
confidence_sum[class_name] += confidence
|
| 579 |
-
|
| 580 |
-
# Manejar el caso donde no hay votos por alguna razón (aunque predictions ya valida que hay)
|
| 581 |
-
if not class_votes:
|
| 582 |
-
return "<h3>❌ Error en el Consenso</h3><p>No se pudieron consolidar los votos de los modelos.</p>"
|
| 583 |
-
|
| 584 |
-
consensus_class = max(class_votes.keys(), key=lambda x: class_votes[x])
|
| 585 |
-
avg_confidence = confidence_sum[consensus_class] / class_votes[consensus_class]
|
| 586 |
-
|
| 587 |
-
consensus_idx = CLASSES.index(consensus_class)
|
| 588 |
-
is_malignant = consensus_idx in MALIGNANT_INDICES
|
| 589 |
-
risk_info = RISK_LEVELS[consensus_idx]
|
| 590 |
-
|
| 591 |
-
probability_chart = create_probability_chart(predictions, consensus_class)
|
| 592 |
-
heatmap = create_heatmap(predictions)
|
| 593 |
-
|
| 594 |
-
html_report = f"""
|
| 595 |
-
<div style="font-family: Arial, sans-serif; max-width: 1200px; margin: 0 auto;">
|
| 596 |
-
<h2 style="color: #2c3e50; text-align: center;">🏥 Análisis Completo de Lesión Cutánea</h2>
|
| 597 |
-
|
| 598 |
-
<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 20px; border-radius: 10px; margin: 20px 0;">
|
| 599 |
-
<h3 style="margin: 0; text-align: center;">📋 Resultado de Consenso</h3>
|
| 600 |
-
<p style="font-size: 18px; text-align: center; margin: 10px 0;"><strong>{consensus_class}</strong></p>
|
| 601 |
-
<p style="text-align: center; margin: 5px 0;">Confianza Promedio: <strong>{avg_confidence:.1%}</strong></p>
|
| 602 |
-
<p style="text-align: center; margin: 5px 0;">Consenso: <strong>{class_votes[consensus_class]}/{len(predictions)} modelos</strong></p>
|
| 603 |
-
</div>
|
| 604 |
-
|
| 605 |
-
<div style="background: {risk_info['color']}; color: white; padding: 15px; border-radius: 8px; margin: 15px 0;">
|
| 606 |
-
<h4 style="margin: 0;">⚠️ Nivel de Riesgo: {risk_info['level']}</h4>
|
| 607 |
-
<p style="margin: 5px 0;"><strong>{risk_info['urgency']}</strong></p>
|
| 608 |
-
<p style="margin: 5px 0;">Tipo: {'🔴 Potencialmente maligna' if is_malignant else '🟢 Probablemente benigna'}</p>
|
| 609 |
-
</div>
|
| 610 |
-
|
| 611 |
-
<div style="background: #e3f2fd; padding: 15px; border-radius: 8px; margin: 15px 0;">
|
| 612 |
-
<h4 style="color: #1976d2;">🤖 Resultados Individuales por Modelo</h4>
|
| 613 |
-
<p style="font-size: 0.9em; color: #555;">
|
| 614 |
-
A continuación se detallan las predicciones de cada modelo. Es importante destacar que los <strong>modelos entrenados específicamente en lesiones de piel (Categoría: Especializados) suelen ser más fiables</strong> para este tipo de análisis que los modelos generales.
|
| 615 |
-
</p>
|
| 616 |
-
"""
|
| 617 |
-
|
| 618 |
-
# RESULTADOS INDIVIDUALES DETALLADOS - Separados por categoría
|
| 619 |
-
|
| 620 |
-
# Especializados
|
| 621 |
-
html_report += """
|
| 622 |
-
<h5 style="color: #007bff; border-bottom: 1px solid #007bff; padding-bottom: 5px; margin-top: 20px;">
|
| 623 |
-
Modelos Especializados en Lesiones de Piel
|
| 624 |
-
</h5>
|
| 625 |
-
"""
|
| 626 |
-
specialized_models_found = False
|
| 627 |
-
for i, pred in enumerate(predictions):
|
| 628 |
-
if pred['success'] and pred['category'] == 'especializados':
|
| 629 |
-
specialized_models_found = True
|
| 630 |
-
model_risk = RISK_LEVELS[pred['predicted_idx']]
|
| 631 |
-
malignant_status = "🔴 Maligna" if pred['is_malignant'] else "🟢 Benigna"
|
| 632 |
-
|
| 633 |
-
html_report += f"""
|
| 634 |
-
<div style="margin: 15px 0; padding: 15px; background: white; border-radius: 8px; border-left: 5px solid {'#ff6b35' if pred['is_malignant'] else '#44ff44'}; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
|
| 635 |
-
<div style="display: flex; justify-content: space-between; align-items: center; margin-bottom: 10px;">
|
| 636 |
-
<h5 style="margin: 0; color: #333;">{pred['model']}</h5>
|
| 637 |
-
<span style="background: {model_risk['color']}; color: white; padding: 4px 8px; border-radius: 4px; font-size: 12px;">{model_risk['level']}</span>
|
| 638 |
-
</div>
|
| 639 |
-
|
| 640 |
-
<div style="display: grid; grid-template-columns: 1fr 1fr 1fr; gap: 10px; font-size: 14px;">
|
| 641 |
-
<div><strong>Diagnóstico:</strong><br>{pred['class']}</div>
|
| 642 |
-
<div><strong>Confianza:</strong><br>{pred['confidence']:.1%}</div>
|
| 643 |
-
<div><strong>Clasificación:</strong><br>{malignant_status}</div>
|
| 644 |
-
</div>
|
| 645 |
-
|
| 646 |
-
<div style="margin-top: 10px;">
|
| 647 |
-
<strong>Top 3 Probabilidades:</strong><br>
|
| 648 |
-
<div style="font-size: 12px; color: #666;">
|
| 649 |
-
"""
|
| 650 |
-
|
| 651 |
-
top_indices = np.argsort(pred['probabilities'])[-3:][::-1]
|
| 652 |
-
for idx in top_indices:
|
| 653 |
-
prob = pred['probabilities'][idx]
|
| 654 |
-
if prob > 0.01:
|
| 655 |
-
html_report += f"• {CLASSES[idx].split('(')[1].rstrip(')')}: {prob:.1%}<br>"
|
| 656 |
-
|
| 657 |
-
html_report += f"""
|
| 658 |
-
</div>
|
| 659 |
-
<div style="margin-top: 8px; font-size: 12px; color: #888;">
|
| 660 |
-
<strong>Recomendación:</strong> {model_risk['urgency']}
|
| 661 |
-
</div>
|
| 662 |
-
</div>
|
| 663 |
-
</div>
|
| 664 |
-
"""
|
| 665 |
-
if not specialized_models_found:
|
| 666 |
-
html_report += "<p style='color: #888;'>No se cargaron modelos especializados o fallaron al predecir.</p>"
|
| 667 |
-
|
| 668 |
-
# Generales
|
| 669 |
-
html_report += """
|
| 670 |
-
<h5 style="color: #6c757d; border-bottom: 1px solid #6c757d; padding-bottom: 5px; margin-top: 20px;">
|
| 671 |
-
Modelos Generales de Visión
|
| 672 |
-
</h5>
|
| 673 |
-
<p style="font-size: 0.85em; color: #777;">
|
| 674 |
-
Estos modelos son pre-entrenados en grandes datasets de imágenes generales (como ImageNet). Aunque no están optimizados específicamente para lesiones cutáneas, contribuyen al consenso general con su capacidad para reconocer patrones visuales. Sus predicciones son un complemento útil, pero pueden ser menos precisas que las de los modelos especializados.
|
| 675 |
-
</p>
|
| 676 |
-
"""
|
| 677 |
-
general_models_found = False
|
| 678 |
-
for i, pred in enumerate(predictions):
|
| 679 |
-
if pred['success'] and pred['category'] == 'generales':
|
| 680 |
-
general_models_found = True
|
| 681 |
-
model_risk = RISK_LEVELS[pred['predicted_idx']]
|
| 682 |
-
malignant_status = "🔴 Maligna" if pred['is_malignant'] else "🟢 Benigna"
|
| 683 |
-
|
| 684 |
-
html_report += f"""
|
| 685 |
-
<div style="margin: 15px 0; padding: 15px; background: white; border-radius: 8px; border-left: 5px solid {'#ff6b35' if pred['is_malignant'] else '#44ff44'}; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
|
| 686 |
-
<div style="display: flex; justify-content: space-between; align-items: center; margin-bottom: 10px;">
|
| 687 |
-
<h5 style="margin: 0; color: #333;">{pred['model']}</h5>
|
| 688 |
-
<span style="background: {model_risk['color']}; color: white; padding: 4px 8px; border-radius: 4px; font-size: 12px;">{model_risk['level']}</span>
|
| 689 |
-
</div>
|
| 690 |
-
|
| 691 |
-
<div style="display: grid; grid-template-columns: 1fr 1fr 1fr; gap: 10px; font-size: 14px;">
|
| 692 |
-
<div><strong>Diagnóstico:</strong><br>{pred['class']}</div>
|
| 693 |
-
<div><strong>Confianza:</strong><br>{pred['confidence']:.1%}</div>
|
| 694 |
-
<div><strong>Clasificación:</strong><br>{malignant_status}</div>
|
| 695 |
-
</div>
|
| 696 |
-
|
| 697 |
-
<div style="margin-top: 10px;">
|
| 698 |
-
<strong>Top 3 Probabilidades:</strong><br>
|
| 699 |
-
<div style="font-size: 12px; color: #666;">
|
| 700 |
-
"""
|
| 701 |
-
|
| 702 |
-
top_indices = np.argsort(pred['probabilities'])[-3:][::-1]
|
| 703 |
-
for idx in top_indices:
|
| 704 |
-
prob = pred['probabilities'][idx]
|
| 705 |
-
if prob > 0.01:
|
| 706 |
-
html_report += f"• {CLASSES[idx].split('(')[1].rstrip(')')}: {prob:.1%}<br>"
|
| 707 |
-
|
| 708 |
-
html_report += f"""
|
| 709 |
-
</div>
|
| 710 |
-
<div style="margin-top: 8px; font-size: 12px; color: #888;">
|
| 711 |
-
<strong>Recomendación:</strong> {model_risk['urgency']}
|
| 712 |
-
</div>
|
| 713 |
-
</div>
|
| 714 |
-
</div>
|
| 715 |
-
"""
|
| 716 |
-
if not general_models_found:
|
| 717 |
-
html_report += "<p style='color: #888;'>No se cargaron modelos generales o fallaron al predecir.</p>"
|
| 718 |
-
|
| 719 |
-
html_report += f"""
|
| 720 |
-
</div>
|
| 721 |
-
|
| 722 |
-
<div style="background: #f8f9fa; padding: 15px; border-radius: 8px; margin: 15px 0;">
|
| 723 |
-
<h4 style="color: #495057;">📊 Análisis Estadístico</h4>
|
| 724 |
-
<div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 15px;">
|
| 725 |
-
<div>
|
| 726 |
-
{probability_chart}
|
| 727 |
-
</div>
|
| 728 |
-
<div>
|
| 729 |
-
{heatmap}
|
| 730 |
-
</div>
|
| 731 |
-
</div>
|
| 732 |
-
</div>
|
| 733 |
-
|
| 734 |
-
<div style="background: #fff3cd; color: #856404; padding: 15px; border-radius: 8px; margin: 15px 0; border: 1px solid #ffeeba;">
|
| 735 |
-
<h4 style="margin-top: 0;">Disclaimer Importante:</h4>
|
| 736 |
-
<p style="font-size: 0.9em; margin-bottom: 5px;">
|
| 737 |
-
Esta herramienta es un <strong>prototipo de investigación</strong> y no debe ser utilizada como un diagnóstico médico definitivo. Los resultados son generados por modelos de inteligencia artificial y pueden contener errores.
|
| 738 |
-
</p>
|
| 739 |
-
<p style="font-size: 0.9em; margin-bottom: 5px;">
|
| 740 |
-
<strong>Siempre consulte a un profesional médico cualificado</strong> para cualquier inquietud sobre su salud. La automedicación o el autodiagnóstico basado en esta herramienta puede ser perjudicial.
|
| 741 |
-
</p>
|
| 742 |
-
<p style="font-size: 0.9em; margin-bottom: 0;">
|
| 743 |
-
La precisión de los modelos puede variar. Los modelos especializados en piel tienden a ser más fiables para estas tareas específicas.
|
| 744 |
-
</p>
|
| 745 |
-
</div>
|
| 746 |
-
</div>
|
| 747 |
-
"""
|
| 748 |
-
return html_report
|
| 749 |
-
|
| 750 |
-
except Exception as e:
|
| 751 |
-
error_message = f"<h3>❌ Error Inesperado en el Análisis:</h3><p>Se produjo un error durante el procesamiento: {str(e)}</p><p>Por favor, intenta con otra imagen o recarga la aplicación.</p>"
|
| 752 |
-
print(error_message)
|
| 753 |
-
return error_message
|
| 754 |
-
|
| 755 |
-
|
| 756 |
-
# --- INTERFAZ GRADIO ---
|
| 757 |
-
# Componentes de entrada y salida
|
| 758 |
-
image_input = gr.Image(type="pil", label="Sube una imagen de la lesión cutánea")
|
| 759 |
-
output_html = gr.HTML(label="Informe de Análisis")
|
| 760 |
-
|
| 761 |
-
# Títulos y descripción para la interfaz
|
| 762 |
-
title = "Skin Lesion Analysis AI"
|
| 763 |
-
description = """
|
| 764 |
-
<h1 style="text-align: center; color: #2c3e50;">🩺 Analizador de Lesiones Cutáneas impulsado por IA 🩺</h1>
|
| 765 |
-
<p style="text-align: center; font-size: 1.1em; color: #555;">
|
| 766 |
-
Esta herramienta utiliza una batería de modelos de Visión por Computadora (tanto especializados en lesiones de piel como generales) para analizar imágenes y ofrecer un consenso sobre el tipo de lesión.
|
| 767 |
-
Proporciona un informe detallado con diagnósticos individuales de cada modelo y un consenso general, incluyendo un nivel de riesgo.
|
| 768 |
-
</p>
|
| 769 |
-
<p style="text-align: center; font-size: 1.1em; color: #555;">
|
| 770 |
-
<strong>Instrucciones:</strong> Sube una imagen clara de la lesión cutánea (óptimamente con buena iluminación y sin reflejos).
|
| 771 |
-
</p>
|
| 772 |
-
<p style="text-align: center; font-size: 0.9em; color: #888;">
|
| 773 |
-
⚠️ **Importante:** Esta herramienta es solo para **fines de investigación y educativos**. No reemplaza el consejo médico profesional. Siempre consulta a un dermatólogo para un diagnóstico y tratamiento precisos.
|
| 774 |
-
</p>
|
| 775 |
-
"""
|
| 776 |
-
article = """
|
| 777 |
-
<div style="text-align: center; padding: 20px; background-color: #f0f2f5; border-top: 1px solid #e0e2e5;">
|
| 778 |
-
<h3 style="color: #333;">¿Cómo funciona?</h3>
|
| 779 |
-
<p style="color: #666;">
|
| 780 |
-
El sistema carga múltiples modelos de aprendizaje profundo (Convolutional Neural Networks y Vision Transformers) entrenados en diversos datasets, incluyendo conjuntos de datos médicos de lesiones cutáneas (como HAM10000 e ISIC) y datasets generales de imágenes (como ImageNet).
|
| 781 |
-
Cada modelo procesa la imagen de forma independiente y genera una predicción de probabilidad para cada una de las 7 clases de lesiones de piel más comunes.
|
| 782 |
-
Posteriormente, se realiza un análisis de consenso para consolidar las predicciones, ponderando la confianza de cada modelo y dando preferencia a los modelos entrenados específicamente para el dominio de la piel.
|
| 783 |
-
Finalmente, se genera un informe visual con gráficos de barras y mapas de calor para facilitar la interpretación de los resultados.
|
| 784 |
-
</p>
|
| 785 |
-
<h4 style="color: #333;">Clases de Lesiones Analizadas:</h4>
|
| 786 |
-
<ul style="list-style-type: none; padding: 0; color: #666; display: inline-block; text-align: left;">
|
| 787 |
-
<li><strong>AKIEC:</strong> Queratosis actínica / Carcinoma de Bowen</li>
|
| 788 |
-
<li><strong>BCC:</strong> Carcinoma de células basales</li>
|
| 789 |
-
<li><strong>BKL:</strong> Lesión queratósica benigna (verruga seborreica, queratosis actínica, liquen plano)</li>
|
| 790 |
-
<li><strong>DF:</strong> Dermatofibroma</li>
|
| 791 |
-
<li><strong>MEL:</strong> Melanoma maligno</li>
|
| 792 |
-
<li><strong>NV:</strong> Nevus melanocítico (Lunar)</li>
|
| 793 |
-
<li><strong>VASC:</strong> Lesión vascular (angiomas, telangiectasias)</li>
|
| 794 |
-
</ul>
|
| 795 |
-
<p style="font-size: 0.8em; color: #999; margin-top: 20px;">
|
| 796 |
-
Desarrollado con ❤️ para investigación en IA y salud.
|
| 797 |
-
</p>
|
| 798 |
-
</div>
|
| 799 |
-
"""
|
| 800 |
-
|
| 801 |
-
# Lanzar la interfaz Gradio
|
| 802 |
gr.Interface(
|
| 803 |
-
fn=
|
| 804 |
-
inputs=
|
| 805 |
-
outputs=
|
| 806 |
-
title=
|
| 807 |
-
description=
|
| 808 |
-
|
| 809 |
-
|
| 810 |
-
|
| 811 |
-
|
| 812 |
-
|
|
|
|
|
|
| 10 |
import warnings
|
| 11 |
import os
|
| 12 |
|
| 13 |
+
# Suppress warnings
|
| 14 |
warnings.filterwarnings("ignore")
|
| 15 |
|
| 16 |
+
print("🔍 Starting Skin Lesion Analysis System...")
|
| 17 |
|
| 18 |
+
# --- VERIFIED MODEL CONFIGURATIONS ---
|
|
|
|
|
|
|
| 19 |
MODEL_CONFIGS = {
|
| 20 |
+
"specialized": [
|
| 21 |
{
|
| 22 |
'name': 'Syaha Skin Cancer',
|
| 23 |
'id': 'syaha/skin_cancer_detection_model',
|
| 24 |
'type': 'custom',
|
| 25 |
'accuracy': 0.82,
|
| 26 |
+
'description': 'CNN trained on HAM10000 dataset',
|
| 27 |
'emoji': '🩺'
|
| 28 |
},
|
| 29 |
{
|
|
|
|
| 31 |
'id': 'VRJBro/skin-cancer-detection',
|
| 32 |
'type': 'custom',
|
| 33 |
'accuracy': 0.85,
|
| 34 |
+
'description': 'Specialized detector (2024)',
|
| 35 |
'emoji': '🎯'
|
| 36 |
},
|
| 37 |
{
|
|
|
|
| 39 |
'id': 'Anwarkh1/Skin_Cancer-Image_Classification',
|
| 40 |
'type': 'vit',
|
| 41 |
'accuracy': 0.89,
|
| 42 |
+
'description': 'Multi-class skin lesion classifier',
|
| 43 |
'emoji': '🧠'
|
| 44 |
},
|
| 45 |
{
|
|
|
|
| 47 |
'id': 'jhoppanne/SkinCancerClassifier_smote-V0',
|
| 48 |
'type': 'custom',
|
| 49 |
'accuracy': 0.86,
|
| 50 |
+
'description': 'ISIC 2024 model using SMOTE for class imbalance',
|
| 51 |
'emoji': '⚖️'
|
| 52 |
},
|
|
|
|
| 53 |
{
|
| 54 |
'name': 'ViT ISIC Binary',
|
| 55 |
'id': 'ahishamm/vit-base-binary-isic-sharpened-patch-32',
|
| 56 |
'type': 'vit',
|
| 57 |
+
'accuracy': 0.89,
|
| 58 |
+
'description': 'ViT model for binary ISIC lesion classification (benign/malignant)',
|
| 59 |
'emoji': '🔬'
|
| 60 |
},
|
| 61 |
{
|
| 62 |
'name': 'ViT ISIC Multi-class',
|
| 63 |
'id': 'ahishamm/vit-base-isic-patch-16',
|
| 64 |
'type': 'vit',
|
| 65 |
+
'accuracy': 0.79,
|
| 66 |
+
'description': 'ViT model for multi-class ISIC lesion classification',
|
| 67 |
'emoji': '🔍'
|
| 68 |
}
|
| 69 |
],
|
| 70 |
+
"general": [
|
| 71 |
{
|
| 72 |
'name': 'ViT Base General',
|
| 73 |
'id': 'google/vit-base-patch16-224',
|
| 74 |
'type': 'vit',
|
| 75 |
'accuracy': 0.78,
|
| 76 |
+
'description': 'ViT base pre-trained on ImageNet-1k.',
|
| 77 |
'emoji': '📈'
|
| 78 |
},
|
| 79 |
{
|
|
|
|
| 81 |
'id': 'microsoft/resnet-50',
|
| 82 |
'type': 'custom',
|
| 83 |
'accuracy': 0.77,
|
| 84 |
+
'description': 'Classic ResNet-50, robust and high-performing.',
|
| 85 |
'emoji': '⚙️'
|
| 86 |
},
|
| 87 |
{
|
|
|
|
| 89 |
'id': 'facebook/deit-base-patch16-224',
|
| 90 |
'type': 'vit',
|
| 91 |
'accuracy': 0.79,
|
| 92 |
+
'description': 'Data-efficient Image Transformer, efficient and accurate.',
|
| 93 |
'emoji': '💡'
|
| 94 |
},
|
| 95 |
{
|
|
|
|
| 97 |
'id': 'google/mobilenet_v2_1.0_224',
|
| 98 |
'type': 'custom',
|
| 99 |
'accuracy': 0.72,
|
| 100 |
+
'description': 'Lightweight model for mobile or low-resource environments.',
|
| 101 |
'emoji': '📱'
|
| 102 |
},
|
| 103 |
{
|
|
|
|
| 105 |
'id': 'microsoft/swin-tiny-patch4-window7-224',
|
| 106 |
'type': 'custom',
|
| 107 |
'accuracy': 0.81,
|
| 108 |
+
'description': 'Swin Transformer (Tiny), efficient and powerful.',
|
| 109 |
'emoji': '🌀'
|
| 110 |
},
|
|
|
|
| 111 |
{
|
| 112 |
'name': 'ViT Base General (Fallback)',
|
| 113 |
'id': 'google/vit-base-patch16-224-in21k',
|
| 114 |
'type': 'vit',
|
| 115 |
'accuracy': 0.75,
|
| 116 |
+
'description': 'Generic ViT fallback model',
|
| 117 |
'emoji': '🔄'
|
| 118 |
}
|
| 119 |
]
|
| 120 |
}
|
| 121 |
|
| 122 |
+
# --- SAFE MODEL LOADING ---
|
| 123 |
loaded_models = {}
|
| 124 |
model_performance = {}
|
| 125 |
|
| 126 |
def load_model_safe(config):
|
| 127 |
+
"""Safely loads a model with multiple revision fallbacks."""
|
| 128 |
try:
|
| 129 |
model_id = config['id']
|
| 130 |
model_type = config['type']
|
| 131 |
+
print(f"🔄 Loading {config['emoji']} {config['name']}...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
+
revisions_to_try = ["main", "no_float16_weights", None]
|
| 134 |
processor = None
|
| 135 |
model = None
|
| 136 |
load_successful = False
|
|
|
|
| 138 |
for revision in revisions_to_try:
|
| 139 |
try:
|
| 140 |
if revision:
|
| 141 |
+
print(f" Trying revision: {revision}")
|
| 142 |
processor = AutoImageProcessor.from_pretrained(model_id, revision=revision)
|
| 143 |
model = AutoModelForImageClassification.from_pretrained(model_id, revision=revision)
|
| 144 |
else:
|
| 145 |
processor = AutoImageProcessor.from_pretrained(model_id)
|
| 146 |
model = AutoModelForImageClassification.from_pretrained(model_id)
|
| 147 |
load_successful = True
|
| 148 |
+
break
|
| 149 |
except Exception as e_rev:
|
| 150 |
+
print(f" Failed with revision '{revision}': {e_rev}")
|
| 151 |
+
if model_type == 'vit' and revision is None:
|
| 152 |
try:
|
| 153 |
processor = ViTImageProcessor.from_pretrained(model_id)
|
| 154 |
model = ViTForImageClassification.from_pretrained(model_id)
|
| 155 |
load_successful = True
|
| 156 |
break
|
| 157 |
except Exception as e_vit:
|
| 158 |
+
print(f" Failed with ViTImageProcessor/ViTForImageClassification: {e_vit}")
|
| 159 |
+
continue
|
| 160 |
|
| 161 |
if not load_successful:
|
| 162 |
+
raise Exception("Failed to load model with all revisions.")
|
| 163 |
|
| 164 |
model.eval()
|
| 165 |
|
|
|
|
| 166 |
test_input = processor(Image.new('RGB', (224, 224), color='white'), return_tensors="pt")
|
| 167 |
with torch.no_grad():
|
| 168 |
+
model(**test_input)
|
| 169 |
|
| 170 |
+
print(f"✅ {config['emoji']} {config['name']} loaded successfully")
|
| 171 |
|
| 172 |
return {
|
| 173 |
'processor': processor,
|
| 174 |
'model': model,
|
| 175 |
'config': config,
|
| 176 |
+
'category': config.get('category', 'general')
|
|
|
|
| 177 |
}
|
| 178 |
|
| 179 |
except Exception as e:
|
| 180 |
+
print(f"❌ {config['emoji']} {config['name']} failed: {e}")
|
|
|
|
| 181 |
return None
|
| 182 |
|
| 183 |
+
|
| 184 |
+
print("\n📦 Loading models...")
|
|
|
|
| 185 |
for category, configs in MODEL_CONFIGS.items():
|
| 186 |
for config in configs:
|
|
|
|
| 187 |
config['category'] = category
|
| 188 |
model_data = load_model_safe(config)
|
| 189 |
if model_data:
|
|
|
|
| 191 |
model_performance[config['name']] = config.get('accuracy', 0.8)
|
| 192 |
|
| 193 |
if not loaded_models:
|
| 194 |
+
print("❌ No model could be loaded. Using fallback models...")
|
|
|
|
| 195 |
fallback_models = [
|
| 196 |
'google/vit-base-patch16-224-in21k',
|
| 197 |
+
'microsoft/resnet-50'
|
|
|
|
| 198 |
]
|
| 199 |
|
| 200 |
for fallback_id in fallback_models:
|
| 201 |
try:
|
| 202 |
+
print(f"🔄 Trying fallback: {fallback_id}")
|
| 203 |
processor = AutoImageProcessor.from_pretrained(fallback_id)
|
| 204 |
model = AutoModelForImageClassification.from_pretrained(fallback_id)
|
| 205 |
model.eval()
|
| 206 |
|
| 207 |
+
loaded_models[f'Fallback-{fallback_id.split("/")[-1]}'] = {
|
| 208 |
'processor': processor,
|
| 209 |
'model': model,
|
| 210 |
+
'config': {'name': f'Fallback {fallback_id}', 'emoji': '🏥'},
|
| 211 |
+
'category': 'general'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
}
|
| 213 |
+
print(f"✅ Fallback model {fallback_id} loaded")
|
| 214 |
break
|
| 215 |
except Exception as e:
|
| 216 |
+
print(f"❌ Fallback {fallback_id} failed: {e}")
|
| 217 |
continue
|
| 218 |
|
| 219 |
+
# --- SKIN LESION CLASSES ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 220 |
CLASSES = [
|
| 221 |
+
"Actinic Keratosis / Bowen (AKIEC)",
|
| 222 |
+
"Basal Cell Carcinoma (BCC)",
|
| 223 |
+
"Benign Keratosis (BKL)",
|
| 224 |
"Dermatofibroma (DF)",
|
| 225 |
+
"Malignant Melanoma (MEL)",
|
| 226 |
+
"Melanocytic Nevus (NV)",
|
| 227 |
+
"Vascular Lesion (VASC)"
|
| 228 |
]
|
| 229 |
|
|
|
|
| 230 |
RISK_LEVELS = {
|
| 231 |
+
0: {'level': 'High', 'color': '#ff6b35', 'urgency': 'Referral in 48h'},
|
| 232 |
+
1: {'level': 'Critical', 'color': '#cc0000', 'urgency': 'Immediate referral'},
|
| 233 |
+
2: {'level': 'Low', 'color': '#44ff44', 'urgency': 'Routine check'},
|
| 234 |
+
3: {'level': 'Low', 'color': '#44ff44', 'urgency': 'Routine check'},
|
| 235 |
+
4: {'level': 'Critical', 'color': '#990000', 'urgency': 'URGENT - Oncology'},
|
| 236 |
+
5: {'level': 'Low', 'color': '#66ff66', 'urgency': 'Follow-up in 6 months'},
|
| 237 |
+
6: {'level': 'Moderate', 'color': '#ffaa00', 'urgency': 'Check-up in 3 months'}
|
| 238 |
}
|
| 239 |
|
| 240 |
+
MALIGNANT_INDICES = [0, 1, 4]
|
| 241 |
|
| 242 |
+
# --- PREDICTION FUNCTION ---
|
| 243 |
def predict_with_model(image, model_data):
|
|
|
|
| 244 |
try:
|
| 245 |
config = model_data['config']
|
|
|
|
|
|
|
| 246 |
image_resized = image.resize((224, 224), Image.LANCZOS)
|
| 247 |
|
| 248 |
+
processor = model_data['processor']
|
| 249 |
+
model = model_data['model']
|
| 250 |
+
inputs = processor(image_resized, return_tensors="pt")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 251 |
|
| 252 |
+
with torch.no_grad():
|
| 253 |
+
outputs = model(**inputs)
|
| 254 |
+
logits = outputs.logits if hasattr(outputs, 'logits') else outputs[0]
|
| 255 |
+
probabilities = F.softmax(logits, dim=-1).cpu().numpy()[0]
|
| 256 |
+
|
| 257 |
+
# Handling models with unexpected output dimensions
|
| 258 |
+
if len(probabilities) == 7:
|
| 259 |
+
mapped_probs = probabilities
|
| 260 |
+
elif len(probabilities) == 2:
|
| 261 |
+
mapped_probs = np.zeros(7)
|
| 262 |
+
mapped_probs[4] = probabilities[1] * 0.5
|
| 263 |
+
mapped_probs[1] = probabilities[1] * 0.3
|
| 264 |
+
mapped_probs[0] = probabilities[1] * 0.2
|
| 265 |
+
mapped_probs[5] = probabilities[0] * 0.6
|
| 266 |
+
mapped_probs[2] = probabilities[0] * 0.2
|
| 267 |
+
mapped_probs[3] = probabilities[0] * 0.1
|
| 268 |
+
mapped_probs[6] = probabilities[0] * 0.1
|
| 269 |
+
mapped_probs /= np.sum(mapped_probs)
|
| 270 |
+
else:
|
| 271 |
+
mapped_probs = np.ones(7) / 7
|
| 272 |
+
|
| 273 |
+
predicted_idx = int(np.argmax(mapped_probs))
|
| 274 |
+
confidence = float(mapped_probs[predicted_idx])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
|
| 276 |
return {
|
| 277 |
'model': f"{config['emoji']} {config['name']}",
|
|
|
|
| 281 |
'is_malignant': predicted_idx in MALIGNANT_INDICES,
|
| 282 |
'predicted_idx': predicted_idx,
|
| 283 |
'success': True,
|
| 284 |
+
'category': model_data['category']
|
| 285 |
}
|
| 286 |
|
| 287 |
except Exception as e:
|
| 288 |
+
print(f"❌ Error in {config['name']}: {e}")
|
| 289 |
+
return {'model': config['name'], 'success': False, 'error': str(e)}
|
| 290 |
+
|
| 291 |
+
# --- CONSENSUS ANALYSIS FUNCTION ---
|
| 292 |
+
def analyze_lesion(img):
|
| 293 |
+
if img is None:
|
| 294 |
+
return "<h3>⚠️ Please upload an image</h3>"
|
| 295 |
+
|
| 296 |
+
predictions = []
|
| 297 |
+
for model_name, model_data in loaded_models.items():
|
| 298 |
+
if model_data.get('category') != 'dummy':
|
| 299 |
+
pred = predict_with_model(img, model_data)
|
| 300 |
+
if pred.get('success'):
|
| 301 |
+
predictions.append(pred)
|
| 302 |
+
|
| 303 |
+
if not predictions:
|
| 304 |
+
return "<h3>❌ No valid predictions</h3>"
|
| 305 |
+
|
| 306 |
+
class_votes, confidence_sum = {}, {}
|
| 307 |
+
for pred in predictions:
|
| 308 |
+
c = pred['class']
|
| 309 |
+
conf = pred['confidence']
|
| 310 |
+
class_votes[c] = class_votes.get(c, 0) + 1
|
| 311 |
+
confidence_sum[c] = confidence_sum.get(c, 0) + conf
|
| 312 |
+
|
| 313 |
+
consensus_class = max(class_votes, key=class_votes.get)
|
| 314 |
+
avg_conf = confidence_sum[consensus_class] / class_votes[consensus_class]
|
| 315 |
+
consensus_idx = CLASSES.index(consensus_class)
|
| 316 |
+
risk_info = RISK_LEVELS[consensus_idx]
|
| 317 |
+
|
| 318 |
+
return f"""
|
| 319 |
+
<h2>🏥 Skin Lesion Analysis Report</h2>
|
| 320 |
+
<h3>Consensus Diagnosis: {consensus_class}</h3>
|
| 321 |
+
<p>Average Confidence: <b>{avg_conf:.1%}</b></p>
|
| 322 |
+
<p>Risk Level: <b style='color:{risk_info['color']}'>{risk_info['level']}</b></p>
|
| 323 |
+
<p>Recommendation: {risk_info['urgency']}</p>
|
| 324 |
+
<hr>
|
| 325 |
+
<h4>Model Details:</h4>
|
| 326 |
+
{''.join([f"<p>{p['model']}: {p['class']} ({p['confidence']:.1%})</p>" for p in predictions])}
|
| 327 |
+
<hr>
|
| 328 |
+
<p style='color:gray;'>⚠️ This AI tool is for educational and research purposes only. Always consult a dermatologist for accurate medical diagnosis.</p>
|
| 329 |
+
"""
|
| 330 |
+
|
| 331 |
+
# --- GRADIO INTERFACE ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 332 |
gr.Interface(
|
| 333 |
+
fn=analyze_lesion,
|
| 334 |
+
inputs=gr.Image(type="pil", label="Upload a Skin Lesion Image"),
|
| 335 |
+
outputs=gr.HTML(label="AI Analysis Report"),
|
| 336 |
+
title="Skin Lesion Analysis AI",
|
| 337 |
+
description="""
|
| 338 |
+
<h2 style="text-align:center;">🩺 AI-Powered Skin Lesion Analyzer 🩺</h2>
|
| 339 |
+
<p style="text-align:center;">Upload a clear skin lesion image. The system runs several deep learning models (both skin-specialized and general vision models) and provides a consensus diagnosis with confidence and risk level.</p>
|
| 340 |
+
<p style="text-align:center; color:gray;">⚠️ Research prototype only. Not a substitute for professional medical advice.</p>
|
| 341 |
+
""",
|
| 342 |
+
theme="soft"
|
| 343 |
+
).launch(debug=True)
|