dcavadia commited on
Commit
b46360a
·
1 Parent(s): a76fe9c

update project structure

Browse files
app.py CHANGED
@@ -1,216 +1,102 @@
1
- import json
2
- import numpy as np
3
- import gradio as gr
4
- import onnxruntime as ort
5
- from PIL import Image
6
- from torchvision import transforms
7
- import pandas as pd
8
- import time
9
- import os
10
-
11
- # ----------------------------
12
- # Model + metadata
13
- # ----------------------------
14
- ORT_PROVIDERS = ["CPUExecutionProvider"] # add "CUDAExecutionProvider" if available
15
- ort_session = ort.InferenceSession("NFNetL0-0.961.onnx", providers=ORT_PROVIDERS)
16
-
17
- with open("data.json", "r", encoding="utf-8") as f:
18
- data = json.load(f)
19
- CLASSES = list(data) # ordered list of class names
20
-
21
- def empty_df():
22
- return pd.DataFrame({"item": CLASSES, "probability": [0] * len(CLASSES)})
23
-
24
- # ----------------------------
25
- # Utils
26
- # ----------------------------
27
- def probabilities_to_ints(probabilities, total_sum=100):
28
- probabilities = np.array(probabilities)
29
- positive_values = np.maximum(probabilities, 0)
30
- total_positive = positive_values.sum()
31
- if total_positive == 0:
32
- return np.zeros_like(probabilities, dtype=int)
33
- scaled = positive_values / total_positive * total_sum
34
- rounded = np.round(scaled).astype(int)
35
- diff = total_sum - rounded.sum()
36
- if diff != 0:
37
- max_idx = int(np.argmax(positive_values))
38
- rounded = rounded.flatten()
39
- rounded[max_idx] += diff
40
- rounded = rounded.reshape(scaled.shape)
41
- return rounded
42
-
43
- MEAN = [0.7611, 0.5869, 0.5923]
44
- STD = [0.1266, 0.1487, 0.1619]
45
- TFMS = transforms.Compose([
46
- transforms.Resize((100, 100)),
47
- transforms.ToTensor(),
48
- transforms.Normalize(mean=MEAN, std=STD),
49
- ])
50
-
51
- def preprocess(pil_img: Image.Image):
52
- return TFMS(pil_img).unsqueeze(0).numpy()
53
-
54
- # ----------------------------
55
- # Inference function
56
- # ----------------------------
57
- def predict(image):
58
- # Handle clicks with no image gracefully
59
- if image is None:
60
- return ("Cargue una imagen y presione Analizar.", "", "", "", "", "", empty_df(), "")
61
- if isinstance(image, Image.Image):
62
- pil = image.convert("RGB")
63
- else:
64
- try:
65
- pil = Image.fromarray(image).convert("RGB")
66
- except Exception:
67
- return ("Imagen inválida", "", "", "", "", "", empty_df(), "")
68
-
69
- t0 = time.time()
70
- input_tensor = preprocess(pil)
71
- input_name = ort_session.get_inputs()[0].name
72
- output = ort_session.run(None, {input_name: input_tensor})
73
-
74
- logits = output[0].squeeze()
75
- pred_idx = int(np.argmax(logits))
76
- pred_name = CLASSES[pred_idx]
77
-
78
- # Softmax probabilities
79
- exp = np.exp(logits - np.max(logits))
80
- probs = exp / exp.sum()
81
- conf_text = f"{float(probs[pred_idx]) * 100:.1f}%"
82
-
83
- ints = probabilities_to_ints(probs * 100.0, total_sum=100)
84
- df = pd.DataFrame({"item": CLASSES, "probability": ints.astype(int)}).sort_values(
85
- "probability", ascending=True
86
- )
87
-
88
- details = data[pred_name]
89
- descripcion = details.get("description", "")
90
- sintomas = details.get("symptoms", "")
91
- causas = details.get("causes", "")
92
- tratamiento = details.get("treatment-1", "")
93
-
94
- latency_ms = int((time.time() - t0) * 1000)
95
- return (pred_name, conf_text, descripcion, sintomas, causas, tratamiento, df, f"{latency_ms} ms")
96
-
97
- # ----------------------------
98
- # Theme (compatible across Gradio versions)
99
- # ----------------------------
100
- try:
101
- theme = gr.themes.Soft(primary_hue="rose", secondary_hue="slate")
102
- except Exception:
103
- theme = None # fallback to default theme
104
-
105
- # CSS polish; tint bars via CSS for Gradio 4.27
106
- CUSTOM_CSS = """
107
- .header {display:flex; align-items:center; gap:12px;}
108
- .badge {font-size:12px; padding:4px 8px; border-radius:12px; background:#f1f5f9; color:#334155;}
109
- .pred-card {font-size:18px;}
110
- .footer {font-size:12px; color:#64748b; text-align:center; padding:12px 0;}
111
- button, .gradio-container .gr-box, .gradio-container .gr-panel { border-radius: 10px !important; }
112
- /* Uniform bar color in Vega-Lite (Gradio 4.27) */
113
- .vega-embed .mark-rect, .vega-embed .mark-bar, .vega-embed .role-mark rect { fill: #ef4444 !important; }
114
  """
 
115
 
116
- # ----------------------------
117
- # UI
118
- # ----------------------------
119
- with gr.Blocks(theme=theme, css=CUSTOM_CSS) as demo:
120
- with gr.Row():
121
- with gr.Column(scale=6):
122
- gr.Markdown(
123
- """
124
- <div class="header">
125
- <h1 style="margin:0;">Clasificación de Enfermedades de la Piel</h1>
126
- <span class="badge">Demo • No diagnóstico médico</span>
127
- </div>
128
- <p style="margin-top:6px;">
129
- Sube una imagen dermatoscópica para ver la clase predicha, la confianza y la distribución de probabilidades.
130
- </p>
131
- """
132
- )
133
- with gr.Column(scale=1, min_width=140):
134
- try:
135
- dark_toggle = gr.ThemeMode(label="Modo", value="system")
136
- except Exception:
137
- gr.Markdown("")
138
-
139
- with gr.Row(equal_height=True):
140
- # Left column: input + actions
141
- with gr.Column(scale=5):
142
- image = gr.Image(type="numpy", label="Imagen de la lesión", height=420, sources=["upload", "clipboard"])
143
- with gr.Row():
144
- analyze_btn = gr.Button("Analizar", variant="primary") # Always enabled
145
- clear_btn = gr.Button("Limpiar")
146
- example_paths = [
147
- "examples/ak.jpg",
148
- "examples/bcc.jpg",
149
- "examples/df.jpg",
150
- "examples/melanoma.jpg",
151
- "examples/nevus.jpg",
152
- ]
153
- example_paths = [p for p in example_paths if os.path.exists(p)]
154
- if example_paths:
155
- gr.Examples(examples=example_paths, inputs=image, label="Ejemplos rápidos")
156
- latency = gr.Label(label="Latencia aproximada")
157
-
158
- # Right column: results
159
- with gr.Column(scale=5):
160
- with gr.Group():
161
- with gr.Row():
162
- nombre = gr.Label(label="Predicción principal", elem_classes=["pred-card"])
163
- confianza = gr.Label(label="Confianza")
164
- # Default BarPlot; CSS applies color
165
- prob_plot = gr.BarPlot(
166
- value=empty_df(),
167
- x="item",
168
- y="probability",
169
- title="Distribución de probabilidad (Top‑k)",
170
- x_title="Clase",
171
- y_title="Probabilidad",
172
- vertical=False,
173
- tooltip=["item", "probability"],
174
- width=520,
175
- height=320,
176
- )
177
- with gr.Tabs():
178
- with gr.TabItem("Detalles"):
179
- with gr.Accordion("Descripción", open=True):
180
- descripcion = gr.Textbox(lines=4, interactive=False)
181
- with gr.Accordion("Síntomas", open=False):
182
- sintomas = gr.Textbox(lines=4, interactive=False)
183
- with gr.Accordion("Causas", open=False):
184
- causas = gr.Textbox(lines=4, interactive=False)
185
- with gr.Accordion("Tratamiento", open=False):
186
- tratamiento = gr.Textbox(lines=4, interactive=False)
187
- with gr.TabItem("Acerca del modelo"):
188
- gr.Markdown(
189
- "- Arquitectura: CNN exportado a ONNX.<br>"
190
- "- Entrenamiento: dataset dermatoscópico (ver documentación).<br>"
191
- "- Nota: Esta herramienta es solo con fines educativos y no reemplaza una evaluación médica."
192
- )
193
 
194
- gr.Markdown("<div class='footer'>Versión del modelo: 1.0 • Última actualización: 2025‑08 • Universidad Central de Venezuela</div>")
195
-
196
- # ----------------------------
197
- # Wiring: original-like behavior
198
- # ----------------------------
199
- outputs = [nombre, confianza, descripcion, sintomas, causas, tratamiento, prob_plot, latency]
200
-
201
- # Analyze click runs prediction; predict() handles None safely
202
- analyze_btn.click(fn=predict, inputs=[image], outputs=outputs, show_progress="full")
203
-
204
- # Clear resets input and outputs - Fixed version
205
- def clear_all():
206
- # Return values for: image, nombre, confianza, descripcion, sintomas, causas, tratamiento, prob_plot, latency
207
- return (None, "", "", "", "", "", "", empty_df(), "")
208
-
209
- clear_btn.click(
210
- fn=clear_all,
211
- inputs=[],
212
- outputs=[image, nombre, confianza, descripcion, sintomas, causas, tratamiento, prob_plot, latency]
 
 
 
 
 
 
213
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
  if __name__ == "__main__":
216
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  """
2
+ MelanoScope AI - Enterprise-ready Skin Lesion Classification Application
3
 
4
+ A production-ready deep learning application for dermatoscopic image analysis
5
+ using ONNX Runtime and Gradio for web interface deployment.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ Author: Daniel Cavadia
8
+ Institution: Universidad Central de Venezuela
9
+ Version: 1.0
10
+ """
11
+ import logging
12
+ import sys
13
+ from pathlib import Path
14
+
15
+ # Add src to Python path
16
+ sys.path.insert(0, str(Path(__file__).parent / "src"))
17
+
18
+ from src.config.settings import LogConfig, AppConfig, EnvConfig
19
+ from src.core.model import MelanoScopeModel
20
+ from src.ui.components import MelanoScopeUI
21
+
22
+ def setup_logging() -> None:
23
+ """Configure application logging."""
24
+ log_level = getattr(logging, LogConfig.LOG_LEVEL.upper(), logging.INFO)
25
+
26
+ logging.basicConfig(
27
+ level=log_level,
28
+ format=LogConfig.LOG_FORMAT,
29
+ handlers=[
30
+ logging.StreamHandler(sys.stdout),
31
+ ]
32
  )
33
+
34
+ # Add file handler in production
35
+ if not EnvConfig.DEBUG:
36
+ try:
37
+ file_handler = logging.FileHandler(LogConfig.LOG_FILE)
38
+ file_handler.setFormatter(logging.Formatter(LogConfig.LOG_FORMAT))
39
+ logging.getLogger().addHandler(file_handler)
40
+ except Exception as e:
41
+ logging.warning(f"Could not create log file handler: {e}")
42
+
43
+ def create_application():
44
+ """
45
+ Create and configure the MelanoScope AI application.
46
+
47
+ Returns:
48
+ Configured Gradio interface
49
+ """
50
+ logger = logging.getLogger(__name__)
51
+
52
+ try:
53
+ logger.info(f"Initializing {AppConfig.TITLE} v{AppConfig.VERSION}")
54
+
55
+ # Initialize model
56
+ logger.info("Loading model and medical data...")
57
+ model = MelanoScopeModel()
58
+
59
+ # Log model information
60
+ model_info = model.get_model_info()
61
+ logger.info(f"Model loaded with {model_info['num_classes']} classes")
62
+
63
+ # Initialize UI
64
+ logger.info("Creating user interface...")
65
+ ui = MelanoScopeUI(model, model.classes)
66
+ interface = ui.create_interface()
67
+
68
+ logger.info("Application initialized successfully")
69
+ return interface
70
+
71
+ except Exception as e:
72
+ logger.error(f"Failed to initialize application: {e}")
73
+ raise RuntimeError(f"Application initialization failed: {e}")
74
+
75
+ def main():
76
+ """Main entry point for the application."""
77
+ # Set up logging
78
+ setup_logging()
79
+ logger = logging.getLogger(__name__)
80
+
81
+ try:
82
+ # Create application
83
+ app = create_application()
84
+
85
+ # Launch application
86
+ logger.info("Launching MelanoScope AI interface...")
87
+ app.launch(
88
+ server_name="0.0.0.0" if not EnvConfig.DEBUG else "127.0.0.1",
89
+ server_port=7860,
90
+ share=False,
91
+ debug=EnvConfig.DEBUG,
92
+ show_error=EnvConfig.DEBUG
93
+ )
94
+
95
+ except KeyboardInterrupt:
96
+ logger.info("Application shutdown requested")
97
+ except Exception as e:
98
+ logger.error(f"Application failed: {e}")
99
+ sys.exit(1)
100
 
101
  if __name__ == "__main__":
102
+ main()
src/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MelanoScope AI - Enterprise Skin Lesion Classification System
3
+
4
+ A production-ready deep learning application for dermatoscopic analysis.
5
+ """
6
+
7
+ __version__ = "1.0.0"
8
+ __author__ = "Daniel Cavadia"
9
+ __institution__ = "Universidad Central de Venezuela"
10
+
11
+ from .core.model import MelanoScopeModel
12
+ from .ui.components import MelanoScopeUI
13
+ from .config.settings import ModelConfig, UIConfig, AppConfig
14
+
15
+ __all__ = [
16
+ "MelanoScopeModel",
17
+ "MelanoScopeUI",
18
+ "ModelConfig",
19
+ "UIConfig",
20
+ "AppConfig"
21
+ ]
src/config/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Configuration module for MelanoScope AI."""
2
+
3
+ from .settings import ModelConfig, UIConfig, AppConfig, LogConfig, EnvConfig
4
+
5
+ __all__ = [
6
+ "ModelConfig",
7
+ "UIConfig",
8
+ "AppConfig",
9
+ "LogConfig",
10
+ "EnvConfig"
11
+ ]
src/config/settings.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Configuration settings for MelanoScope AI application.
3
+ Centralizes all constants and configuration parameters.
4
+ """
5
+ import os
6
+ from typing import List, Dict, Any
7
+ from pathlib import Path
8
+
9
+ # Project paths
10
+ PROJECT_ROOT = Path(__file__).parent.parent.parent
11
+ DATA_FILE = PROJECT_ROOT / "data.json"
12
+ MODEL_FILE = PROJECT_ROOT / "NFNetL0-0.961.onnx"
13
+ EXAMPLES_DIR = PROJECT_ROOT / "examples"
14
+
15
+ # Model configuration
16
+ class ModelConfig:
17
+ """Model-related configuration parameters."""
18
+
19
+ # ONNX Runtime providers (in order of preference)
20
+ ORT_PROVIDERS: List[str] = ["CPUExecutionProvider"]
21
+
22
+ # Image preprocessing parameters
23
+ IMAGE_SIZE: tuple[int, int] = (100, 100)
24
+ NORMALIZATION_MEAN: List[float] = [0.7611, 0.5869, 0.5923]
25
+ NORMALIZATION_STD: List[float] = [0.1266, 0.1487, 0.1619]
26
+
27
+ # Inference parameters
28
+ PROBABILITY_PRECISION: int = 1 # Decimal places for confidence display
29
+ PROBABILITY_SUM: int = 100 # Total sum for probability distribution
30
+
31
+ # UI configuration
32
+ class UIConfig:
33
+ """User interface configuration parameters."""
34
+
35
+ # Theme settings
36
+ THEME_PRIMARY_HUE: str = "rose"
37
+ THEME_SECONDARY_HUE: str = "slate"
38
+
39
+ # Component dimensions
40
+ IMAGE_HEIGHT: int = 420
41
+ PLOT_WIDTH: int = 520
42
+ PLOT_HEIGHT: int = 320
43
+ TEXTBOX_LINES: int = 4
44
+
45
+ # Layout settings
46
+ LEFT_COLUMN_SCALE: int = 5
47
+ RIGHT_COLUMN_SCALE: int = 5
48
+ THEME_TOGGLE_MIN_WIDTH: int = 140
49
+
50
+ # Application metadata
51
+ class AppConfig:
52
+ """Application metadata and information."""
53
+
54
+ TITLE: str = "MelanoScope AI - Clasificación de Enfermedades de la Piel"
55
+ VERSION: str = "1.0"
56
+ LAST_UPDATE: str = "2025-08"
57
+ INSTITUTION: str = "Universidad Central de Venezuela"
58
+ DISCLAIMER: str = "Demo • No diagnóstico médico"
59
+
60
+ # Medical disclaimer
61
+ MEDICAL_DISCLAIMER: str = (
62
+ "Esta herramienta es solo con fines educativos y no reemplaza "
63
+ "una evaluación médica."
64
+ )
65
+
66
+ # Logging configuration
67
+ class LogConfig:
68
+ """Logging configuration parameters."""
69
+
70
+ LOG_LEVEL: str = os.getenv("LOG_LEVEL", "INFO")
71
+ LOG_FORMAT: str = (
72
+ "%(asctime)s | %(name)s | %(levelname)s | %(message)s"
73
+ )
74
+ LOG_FILE: str = "melanoscope.log"
75
+
76
+ # Environment settings
77
+ class EnvConfig:
78
+ """Environment-specific configuration."""
79
+
80
+ DEBUG: bool = os.getenv("DEBUG", "False").lower() == "true"
81
+ ENVIRONMENT: str = os.getenv("ENVIRONMENT", "production")
src/core/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Core functionality for MelanoScope AI."""
2
+
3
+ from .model import MelanoScopeModel
4
+ from .preprocessing import ImagePreprocessor
5
+ from .utils import probabilities_to_ints, create_empty_dataframe, format_confidence
6
+
7
+ __all__ = [
8
+ "MelanoScopeModel",
9
+ "ImagePreprocessor",
10
+ "probabilities_to_ints",
11
+ "create_empty_dataframe",
12
+ "format_confidence"
13
+ ]
src/core/model.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Model inference module for MelanoScope AI.
3
+ Handles ONNX model loading and inference operations.
4
+ """
5
+ import json
6
+ import logging
7
+ import time
8
+ from typing import Dict, Any, List, Optional, Tuple
9
+ from pathlib import Path
10
+ import numpy as np
11
+ import onnxruntime as ort
12
+ from PIL import Image
13
+
14
+ from ..config.settings import ModelConfig, DATA_FILE, MODEL_FILE
15
+ from .preprocessing import ImagePreprocessor
16
+ from .utils import probabilities_to_ints, format_confidence
17
+
18
+ # Configure logger
19
+ logger = logging.getLogger(__name__)
20
+
21
+ class MelanoScopeModel:
22
+ """
23
+ MelanoScope AI model for skin lesion classification.
24
+
25
+ Handles model loading, inference, and result processing.
26
+ """
27
+
28
+ def __init__(self):
29
+ """Initialize the model and load medical condition data."""
30
+ self.preprocessor = ImagePreprocessor()
31
+ self.session: Optional[ort.InferenceSession] = None
32
+ self.classes: List[str] = []
33
+ self.medical_data: Dict[str, Any] = {}
34
+
35
+ # Load model and data
36
+ self._load_model()
37
+ self._load_medical_data()
38
+
39
+ logger.info(f"MelanoScopeModel initialized with {len(self.classes)} classes")
40
+
41
+ def _load_model(self) -> None:
42
+ """Load the ONNX model for inference."""
43
+ try:
44
+ if not MODEL_FILE.exists():
45
+ raise FileNotFoundError(f"Model file not found: {MODEL_FILE}")
46
+
47
+ self.session = ort.InferenceSession(
48
+ str(MODEL_FILE),
49
+ providers=ModelConfig.ORT_PROVIDERS
50
+ )
51
+
52
+ # Log model information
53
+ input_info = self.session.get_inputs()[0]
54
+ logger.info(f"Model loaded successfully")
55
+ logger.debug(f"Input shape: {input_info.shape}, Input type: {input_info.type}")
56
+
57
+ except Exception as e:
58
+ logger.error(f"Failed to load model: {e}")
59
+ raise RuntimeError(f"Model loading failed: {e}")
60
+
61
+ def _load_medical_data(self) -> None:
62
+ """Load medical condition data and class names."""
63
+ try:
64
+ if not DATA_FILE.exists():
65
+ raise FileNotFoundError(f"Data file not found: {DATA_FILE}")
66
+
67
+ with open(DATA_FILE, "r", encoding="utf-8") as f:
68
+ self.medical_data = json.load(f)
69
+
70
+ self.classes = list(self.medical_data.keys())
71
+ logger.info(f"Loaded medical data for {len(self.classes)} conditions")
72
+
73
+ except Exception as e:
74
+ logger.error(f"Failed to load medical data: {e}")
75
+ raise RuntimeError(f"Medical data loading failed: {e}")
76
+
77
+ def predict(self, image_input: Any) -> Tuple[str, str, str, str, str, str, Any, str]:
78
+ """
79
+ Perform inference on input image.
80
+
81
+ Args:
82
+ image_input: Input image (PIL Image, numpy array, or None)
83
+
84
+ Returns:
85
+ Tuple containing (prediction, confidence, description, symptoms,
86
+ causes, treatment, probability_df, latency)
87
+ """
88
+ # Handle empty input
89
+ if image_input is None:
90
+ logger.warning("Received None image input")
91
+ return self._create_empty_result("Cargue una imagen y presione Analizar.")
92
+
93
+ try:
94
+ # Start timing
95
+ start_time = time.time()
96
+
97
+ # Preprocess image
98
+ input_tensor = self.preprocessor.preprocess(image_input)
99
+ if input_tensor is None:
100
+ return self._create_empty_result("Imagen inválida")
101
+
102
+ # Run inference
103
+ prediction_result = self._run_inference(input_tensor)
104
+ if prediction_result is None:
105
+ return self._create_empty_result("Error en la inferencia")
106
+
107
+ # Process results
108
+ pred_name, confidence, prob_df = prediction_result
109
+ medical_info = self._get_medical_info(pred_name)
110
+
111
+ # Calculate latency
112
+ latency_ms = int((time.time() - start_time) * 1000)
113
+ latency_str = f"{latency_ms} ms"
114
+
115
+ logger.info(f"Prediction completed: {pred_name} ({confidence}) in {latency_ms}ms")
116
+
117
+ return (
118
+ pred_name,
119
+ confidence,
120
+ medical_info["description"],
121
+ medical_info["symptoms"],
122
+ medical_info["causes"],
123
+ medical_info["treatment"],
124
+ prob_df,
125
+ latency_str
126
+ )
127
+
128
+ except Exception as e:
129
+ logger.error(f"Prediction failed: {e}")
130
+ return self._create_empty_result(f"Error: {str(e)}")
131
+
132
+ def _run_inference(self, input_tensor: np.ndarray) -> Optional[Tuple[str, str, Any]]:
133
+ """
134
+ Run model inference on preprocessed input.
135
+
136
+ Args:
137
+ input_tensor: Preprocessed image tensor
138
+
139
+ Returns:
140
+ Tuple of (prediction_name, confidence_string, probability_dataframe)
141
+ """
142
+ try:
143
+ if self.session is None:
144
+ raise RuntimeError("Model not loaded")
145
+
146
+ # Get input name
147
+ input_name = self.session.get_inputs()[0].name
148
+
149
+ # Run inference
150
+ output = self.session.run(None, {input_name: input_tensor})
151
+ logits = output[0].squeeze()
152
+
153
+ # Get prediction
154
+ pred_idx = int(np.argmax(logits))
155
+ pred_name = self.classes[pred_idx]
156
+
157
+ # Calculate softmax probabilities
158
+ exp_logits = np.exp(logits - np.max(logits))
159
+ probabilities = exp_logits / exp_logits.sum()
160
+
161
+ # Format confidence
162
+ confidence = format_confidence(probabilities[pred_idx])
163
+
164
+ # Create probability dataframe
165
+ prob_ints = probabilities_to_ints(probabilities * 100.0)
166
+ prob_df = self._create_probability_dataframe(prob_ints)
167
+
168
+ logger.debug(f"Inference completed: {pred_name} with confidence {confidence}")
169
+ return pred_name, confidence, prob_df
170
+
171
+ except Exception as e:
172
+ logger.error(f"Inference failed: {e}")
173
+ return None
174
+
175
+ def _create_probability_dataframe(self, probabilities: np.ndarray) -> Any:
176
+ """Create a sorted probability dataframe for visualization."""
177
+ try:
178
+ import pandas as pd
179
+
180
+ df = pd.DataFrame({
181
+ "item": self.classes,
182
+ "probability": probabilities.astype(int)
183
+ }).sort_values("probability", ascending=True)
184
+
185
+ return df
186
+
187
+ except Exception as e:
188
+ logger.error(f"Error creating probability dataframe: {e}")
189
+ # Return empty dataframe as fallback
190
+ import pandas as pd
191
+ return pd.DataFrame({"item": self.classes, "probability": [0] * len(self.classes)})
192
+
193
+ def _get_medical_info(self, condition_name: str) -> Dict[str, str]:
194
+ """
195
+ Get medical information for a specific condition.
196
+
197
+ Args:
198
+ condition_name: Name of the medical condition
199
+
200
+ Returns:
201
+ Dictionary containing medical information
202
+ """
203
+ try:
204
+ condition_data = self.medical_data.get(condition_name, {})
205
+
206
+ return {
207
+ "description": condition_data.get("description", ""),
208
+ "symptoms": condition_data.get("symptoms", ""),
209
+ "causes": condition_data.get("causes", ""),
210
+ "treatment": condition_data.get("treatment-1", "")
211
+ }
212
+
213
+ except Exception as e:
214
+ logger.error(f"Error getting medical info for {condition_name}: {e}")
215
+ return {"description": "", "symptoms": "", "causes": "", "treatment": ""}
216
+
217
+ def _create_empty_result(self, message: str) -> Tuple[str, str, str, str, str, str, Any, str]:
218
+ """Create an empty result tuple with error message."""
219
+ try:
220
+ import pandas as pd
221
+ empty_df = pd.DataFrame({"item": self.classes, "probability": [0] * len(self.classes)})
222
+ except:
223
+ empty_df = None
224
+
225
+ return (message, "", "", "", "", "", empty_df, "")
226
+
227
+ def get_model_info(self) -> Dict[str, Any]:
228
+ """
229
+ Get information about the loaded model.
230
+
231
+ Returns:
232
+ Dictionary containing model metadata
233
+ """
234
+ info = {
235
+ "classes": self.classes,
236
+ "num_classes": len(self.classes),
237
+ "model_file": str(MODEL_FILE),
238
+ "providers": ModelConfig.ORT_PROVIDERS
239
+ }
240
+
241
+ if self.session:
242
+ try:
243
+ input_info = self.session.get_inputs()[0]
244
+ info.update({
245
+ "input_shape": input_info.shape,
246
+ "input_type": input_info.type,
247
+ "input_name": input_info.name
248
+ })
249
+ except Exception as e:
250
+ logger.warning(f"Could not get model input info: {e}")
251
+
252
+ return info
src/core/preprocessing.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Image preprocessing module for MelanoScope AI.
3
+ Handles image transformations and normalization.
4
+ """
5
+ import logging
6
+ from typing import Union, Optional
7
+ import numpy as np
8
+ from PIL import Image
9
+ from torchvision import transforms
10
+
11
+ from ..config.settings import ModelConfig
12
+
13
+ # Configure logger
14
+ logger = logging.getLogger(__name__)
15
+
16
+ class ImagePreprocessor:
17
+ """Handles image preprocessing for model inference."""
18
+
19
+ def __init__(self):
20
+ """Initialize the preprocessor with configured transforms."""
21
+ self.transforms = self._create_transform_pipeline()
22
+ logger.info("ImagePreprocessor initialized")
23
+
24
+ def _create_transform_pipeline(self) -> transforms.Compose:
25
+ """
26
+ Create the image transformation pipeline.
27
+
28
+ Returns:
29
+ Composed torchvision transforms
30
+ """
31
+ try:
32
+ transform_pipeline = transforms.Compose([
33
+ transforms.Resize(ModelConfig.IMAGE_SIZE),
34
+ transforms.ToTensor(),
35
+ transforms.Normalize(
36
+ mean=ModelConfig.NORMALIZATION_MEAN,
37
+ std=ModelConfig.NORMALIZATION_STD
38
+ ),
39
+ ])
40
+ logger.debug("Transform pipeline created successfully")
41
+ return transform_pipeline
42
+ except Exception as e:
43
+ logger.error(f"Error creating transform pipeline: {e}")
44
+ raise
45
+
46
+ def preprocess(self, image_input: Union[Image.Image, np.ndarray]) -> Optional[np.ndarray]:
47
+ """
48
+ Preprocess image for model inference.
49
+
50
+ Args:
51
+ image_input: PIL Image or numpy array
52
+
53
+ Returns:
54
+ Preprocessed image tensor as numpy array, or None if preprocessing fails
55
+
56
+ Raises:
57
+ ValueError: If image input is invalid
58
+ """
59
+ try:
60
+ # Convert input to PIL Image
61
+ pil_image = self._convert_to_pil(image_input)
62
+ if pil_image is None:
63
+ return None
64
+
65
+ # Apply transforms and add batch dimension
66
+ tensor = self.transforms(pil_image).unsqueeze(0).numpy()
67
+
68
+ logger.debug(f"Image preprocessed to shape: {tensor.shape}")
69
+ return tensor
70
+
71
+ except Exception as e:
72
+ logger.error(f"Error preprocessing image: {e}")
73
+ return None
74
+
75
+ def _convert_to_pil(self, image_input: Union[Image.Image, np.ndarray]) -> Optional[Image.Image]:
76
+ """
77
+ Convert various image formats to PIL Image.
78
+
79
+ Args:
80
+ image_input: Image in PIL or numpy format
81
+
82
+ Returns:
83
+ PIL Image in RGB mode, or None if conversion fails
84
+ """
85
+ try:
86
+ if isinstance(image_input, Image.Image):
87
+ return image_input.convert("RGB")
88
+ else:
89
+ # Assume numpy array
90
+ pil_image = Image.fromarray(image_input).convert("RGB")
91
+ return pil_image
92
+
93
+ except Exception as e:
94
+ logger.error(f"Error converting image to PIL format: {e}")
95
+ return None
96
+
97
+ def get_transform_info(self) -> dict:
98
+ """
99
+ Get information about the preprocessing transforms.
100
+
101
+ Returns:
102
+ Dictionary containing transform parameters
103
+ """
104
+ return {
105
+ "image_size": ModelConfig.IMAGE_SIZE,
106
+ "normalization_mean": ModelConfig.NORMALIZATION_MEAN,
107
+ "normalization_std": ModelConfig.NORMALIZATION_STD
108
+ }
src/core/utils.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility functions for MelanoScope AI.
3
+ Contains helper functions and probability calculations.
4
+ """
5
+ import logging
6
+ from typing import List, Dict, Any, Union
7
+ import numpy as np
8
+ import pandas as pd
9
+
10
+ from ..config.settings import ModelConfig
11
+
12
+ # Configure logger
13
+ logger = logging.getLogger(__name__)
14
+
15
+ def probabilities_to_ints(
16
+ probabilities: np.ndarray,
17
+ total_sum: int = ModelConfig.PROBABILITY_SUM
18
+ ) -> np.ndarray:
19
+ """
20
+ Convert probability array to integer percentages that sum to total_sum.
21
+
22
+ Args:
23
+ probabilities: Array of probability values
24
+ total_sum: Target sum for the integer percentages
25
+
26
+ Returns:
27
+ Array of integers that sum to total_sum
28
+
29
+ Raises:
30
+ ValueError: If probabilities contain invalid values
31
+ """
32
+ try:
33
+ probabilities = np.array(probabilities)
34
+
35
+ # Ensure non-negative values
36
+ positive_values = np.maximum(probabilities, 0)
37
+ total_positive = positive_values.sum()
38
+
39
+ if total_positive == 0:
40
+ logger.warning("All probabilities are zero or negative")
41
+ return np.zeros_like(probabilities, dtype=int)
42
+
43
+ # Scale to target sum
44
+ scaled = positive_values / total_positive * total_sum
45
+ rounded = np.round(scaled).astype(int)
46
+
47
+ # Adjust for rounding errors
48
+ diff = total_sum - rounded.sum()
49
+ if diff != 0:
50
+ max_idx = int(np.argmax(positive_values))
51
+ rounded = rounded.flatten()
52
+ rounded[max_idx] += diff
53
+ rounded = rounded.reshape(scaled.shape)
54
+
55
+ logger.debug(f"Converted probabilities to integers summing to {total_sum}")
56
+ return rounded
57
+
58
+ except Exception as e:
59
+ logger.error(f"Error converting probabilities to integers: {e}")
60
+ raise ValueError(f"Invalid probability values: {e}")
61
+
62
+ def create_empty_dataframe(classes: List[str]) -> pd.DataFrame:
63
+ """
64
+ Create an empty probability dataframe with zero values.
65
+
66
+ Args:
67
+ classes: List of class names
68
+
69
+ Returns:
70
+ DataFrame with items and zero probabilities
71
+ """
72
+ logger.debug(f"Creating empty dataframe for {len(classes)} classes")
73
+ return pd.DataFrame({
74
+ "item": classes,
75
+ "probability": [0] * len(classes)
76
+ })
77
+
78
+ def format_confidence(probability: float, precision: int = ModelConfig.PROBABILITY_PRECISION) -> str:
79
+ """
80
+ Format probability as percentage string.
81
+
82
+ Args:
83
+ probability: Probability value between 0 and 1
84
+ precision: Number of decimal places
85
+
86
+ Returns:
87
+ Formatted percentage string
88
+ """
89
+ try:
90
+ percentage = probability * 100
91
+ return f"{percentage:.{precision}f}%"
92
+ except Exception as e:
93
+ logger.error(f"Error formatting confidence: {e}")
94
+ return "0.0%"
95
+
96
+ def validate_image_input(image: Any) -> bool:
97
+ """
98
+ Validate that image input is not None and has valid structure.
99
+
100
+ Args:
101
+ image: Image input to validate
102
+
103
+ Returns:
104
+ True if image is valid, False otherwise
105
+ """
106
+ if image is None:
107
+ logger.warning("Image input is None")
108
+ return False
109
+
110
+ try:
111
+ # Additional validation could be added here
112
+ # e.g., check image dimensions, format, etc.
113
+ return True
114
+ except Exception as e:
115
+ logger.error(f"Error validating image input: {e}")
116
+ return False
src/ui/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """User interface components for MelanoScope AI."""
2
+
3
+ from .components import MelanoScopeUI
4
+ from .styles import create_theme, get_custom_css
5
+
6
+ __all__ = [
7
+ "MelanoScopeUI",
8
+ "create_theme",
9
+ "get_custom_css"
10
+ ]
src/ui/components.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ UI components for MelanoScope AI.
3
+ Contains Gradio interface component definitions.
4
+ """
5
+ import os
6
+ import logging
7
+ from typing import List, Any, Optional
8
+ import gradio as gr
9
+
10
+ from ..config.settings import UIConfig, EXAMPLES_DIR
11
+ from ..core.utils import create_empty_dataframe
12
+ from .styles import get_custom_css, create_theme, get_header_html, get_footer_html, get_model_info_html
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ class MelanoScopeUI:
17
+ """Handles the user interface components and layout."""
18
+
19
+ def __init__(self, model_instance, classes: List[str]):
20
+ """
21
+ Initialize UI components.
22
+
23
+ Args:
24
+ model_instance: Initialized model instance for predictions
25
+ classes: List of class names for empty dataframe
26
+ """
27
+ self.model = model_instance
28
+ self.classes = classes
29
+ self.theme = create_theme()
30
+ self.css = get_custom_css()
31
+
32
+ logger.info("MelanoScopeUI initialized")
33
+
34
+ def create_interface(self) -> gr.Blocks:
35
+ """
36
+ Create the complete Gradio interface.
37
+
38
+ Returns:
39
+ Configured Gradio Blocks interface
40
+ """
41
+ try:
42
+ with gr.Blocks(theme=self.theme, css=self.css) as interface:
43
+ # Header section
44
+ self._create_header()
45
+
46
+ # Main content area
47
+ with gr.Row(equal_height=True):
48
+ # Left column: input and controls
49
+ self._create_input_column()
50
+
51
+ # Right column: results and information
52
+ self._create_results_column()
53
+
54
+ # Footer
55
+ self._create_footer()
56
+
57
+ # Set up event handlers
58
+ self._setup_event_handlers()
59
+
60
+ logger.info("Interface created successfully")
61
+ return interface
62
+
63
+ except Exception as e:
64
+ logger.error(f"Failed to create interface: {e}")
65
+ raise
66
+
67
+ def _create_header(self) -> None:
68
+ """Create the header section with title and theme toggle."""
69
+ with gr.Row():
70
+ with gr.Column(scale=6):
71
+ gr.Markdown(get_header_html())
72
+
73
+ with gr.Column(scale=1, min_width=UIConfig.THEME_TOGGLE_MIN_WIDTH):
74
+ try:
75
+ self.dark_toggle = gr.ThemeMode(label="Modo", value="system")
76
+ except Exception:
77
+ gr.Markdown("") # Fallback for older Gradio versions
78
+
79
+ def _create_input_column(self) -> None:
80
+ """Create the left column with image input and controls."""
81
+ with gr.Column(scale=UIConfig.LEFT_COLUMN_SCALE):
82
+ # Image input
83
+ self.image_input = gr.Image(
84
+ type="numpy",
85
+ label="Imagen de la lesión",
86
+ height=UIConfig.IMAGE_HEIGHT,
87
+ sources=["upload", "clipboard"]
88
+ )
89
+
90
+ # Action buttons
91
+ with gr.Row():
92
+ self.analyze_btn = gr.Button("Analizar", variant="primary")
93
+ self.clear_btn = gr.Button("Limpiar")
94
+
95
+ # Examples section
96
+ self._create_examples_section()
97
+
98
+ # Latency display
99
+ self.latency_output = gr.Label(label="Latencia aproximada")
100
+
101
+ def _create_examples_section(self) -> None:
102
+ """Create the examples section if example files exist."""
103
+ try:
104
+ example_files = [
105
+ "examples/ak.jpg",
106
+ "examples/bcc.jpg",
107
+ "examples/df.jpg",
108
+ "examples/melanoma.jpg",
109
+ "examples/nevus.jpg",
110
+ ]
111
+
112
+ # Filter existing files
113
+ existing_examples = [f for f in example_files if os.path.exists(f)]
114
+
115
+ if existing_examples:
116
+ gr.Examples(
117
+ examples=existing_examples,
118
+ inputs=self.image_input,
119
+ label="Ejemplos rápidos"
120
+ )
121
+ logger.debug(f"Created examples with {len(existing_examples)} files")
122
+ else:
123
+ logger.warning("No example files found")
124
+
125
+ except Exception as e:
126
+ logger.warning(f"Failed to create examples section: {e}")
127
+
128
+ def _create_results_column(self) -> None:
129
+ """Create the right column with results and information."""
130
+ with gr.Column(scale=UIConfig.RIGHT_COLUMN_SCALE):
131
+ # Prediction results
132
+ self._create_prediction_results()
133
+
134
+ # Information tabs
135
+ self._create_information_tabs()
136
+
137
+ def _create_prediction_results(self) -> None:
138
+ """Create the prediction results section."""
139
+ with gr.Group():
140
+ # Main prediction and confidence
141
+ with gr.Row():
142
+ self.prediction_output = gr.Label(
143
+ label="Predicción principal",
144
+ elem_classes=["pred-card"]
145
+ )
146
+ self.confidence_output = gr.Label(label="Confianza")
147
+
148
+ # Probability distribution chart
149
+ self.probability_plot = gr.BarPlot(
150
+ value=create_empty_dataframe(self.classes),
151
+ x="item",
152
+ y="probability",
153
+ title="Distribución de probabilidad (Top‑k)",
154
+ x_title="Clase",
155
+ y_title="Probabilidad",
156
+ vertical=False,
157
+ tooltip=["item", "probability"],
158
+ width=UIConfig.PLOT_WIDTH,
159
+ height=UIConfig.PLOT_HEIGHT,
160
+ )
161
+
162
+ def _create_information_tabs(self) -> None:
163
+ """Create the tabbed information section."""
164
+ with gr.Tabs():
165
+ # Medical details tab
166
+ with gr.TabItem("Detalles"):
167
+ self._create_medical_details()
168
+
169
+ # Model information tab
170
+ with gr.TabItem("Acerca del modelo"):
171
+ gr.Markdown(get_model_info_html())
172
+
173
+ def _create_medical_details(self) -> None:
174
+ """Create the medical details accordions."""
175
+ with gr.Accordion("Descripción", open=True):
176
+ self.description_output = gr.Textbox(
177
+ lines=UIConfig.TEXTBOX_LINES,
178
+ interactive=False
179
+ )
180
+
181
+ with gr.Accordion("Síntomas", open=False):
182
+ self.symptoms_output = gr.Textbox(
183
+ lines=UIConfig.TEXTBOX_LINES,
184
+ interactive=False
185
+ )
186
+
187
+ with gr.Accordion("Causas", open=False):
188
+ self.causes_output = gr.Textbox(
189
+ lines=UIConfig.TEXTBOX_LINES,
190
+ interactive=False
191
+ )
192
+
193
+ with gr.Accordion("Tratamiento", open=False):
194
+ self.treatment_output = gr.Textbox(
195
+ lines=UIConfig.TEXTBOX_LINES,
196
+ interactive=False
197
+ )
198
+
199
+ def _create_footer(self) -> None:
200
+ """Create the footer section."""
201
+ gr.Markdown(get_footer_html())
202
+
203
+ def _setup_event_handlers(self) -> None:
204
+ """Set up event handlers for interactive components."""
205
+ try:
206
+ # Collect all output components
207
+ outputs = [
208
+ self.prediction_output,
209
+ self.confidence_output,
210
+ self.description_output,
211
+ self.symptoms_output,
212
+ self.causes_output,
213
+ self.treatment_output,
214
+ self.probability_plot,
215
+ self.latency_output
216
+ ]
217
+
218
+ # Analyze button click
219
+ self.analyze_btn.click(
220
+ fn=self.model.predict,
221
+ inputs=[self.image_input],
222
+ outputs=outputs,
223
+ show_progress="full"
224
+ )
225
+
226
+ # Clear button click
227
+ self.clear_btn.click(
228
+ fn=self._clear_all,
229
+ inputs=[],
230
+ outputs=[self.image_input] + outputs
231
+ )
232
+
233
+ logger.debug("Event handlers set up successfully")
234
+
235
+ except Exception as e:
236
+ logger.error(f"Failed to set up event handlers: {e}")
237
+ raise
238
+
239
+ def _clear_all(self) -> tuple:
240
+ """
241
+ Clear all inputs and outputs.
242
+
243
+ Returns:
244
+ Tuple of cleared values for all components
245
+ """
246
+ try:
247
+ empty_df = create_empty_dataframe(self.classes)
248
+
249
+ # Return cleared values for: image, prediction, confidence, description,
250
+ # symptoms, causes, treatment, probability_plot, latency
251
+ return (None, "", "", "", "", "", "", empty_df, "")
252
+
253
+ except Exception as e:
254
+ logger.error(f"Error clearing interface: {e}")
255
+ return (None, "", "", "", "", "", "", None, "")
src/ui/styles.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ UI styling and theming for MelanoScope AI.
3
+ Contains CSS styles and theme configurations.
4
+ """
5
+ from typing import Optional
6
+ import logging
7
+
8
+ from ..config.settings import UIConfig
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ def get_custom_css() -> str:
13
+ """
14
+ Get custom CSS styles for the application.
15
+
16
+ Returns:
17
+ CSS string for styling the interface
18
+ """
19
+ return """
20
+ .header {
21
+ display: flex;
22
+ align-items: center;
23
+ gap: 12px;
24
+ }
25
+ .badge {
26
+ font-size: 12px;
27
+ padding: 4px 8px;
28
+ border-radius: 12px;
29
+ background: #f1f5f9;
30
+ color: #334155;
31
+ }
32
+ .pred-card {
33
+ font-size: 18px;
34
+ }
35
+ .footer {
36
+ font-size: 12px;
37
+ color: #64748b;
38
+ text-align: center;
39
+ padding: 12px 0;
40
+ }
41
+ button, .gradio-container .gr-box, .gradio-container .gr-panel {
42
+ border-radius: 10px !important;
43
+ }
44
+ /* Uniform bar color in Vega-Lite charts */
45
+ .vega-embed .mark-rect, .vega-embed .mark-bar, .vega-embed .role-mark rect {
46
+ fill: #ef4444 !important;
47
+ }
48
+ /* Improve spacing and readability */
49
+ .gradio-container {
50
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
51
+ }
52
+ .gr-button {
53
+ transition: all 0.2s ease;
54
+ }
55
+ .gr-button:hover {
56
+ transform: translateY(-1px);
57
+ box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
58
+ }
59
+ """
60
+
61
+ def create_theme():
62
+ """
63
+ Create and return the application theme.
64
+
65
+ Returns:
66
+ Gradio theme object or None if creation fails
67
+ """
68
+ try:
69
+ import gradio as gr
70
+
71
+ theme = gr.themes.Soft(
72
+ primary_hue=UIConfig.THEME_PRIMARY_HUE,
73
+ secondary_hue=UIConfig.THEME_SECONDARY_HUE
74
+ )
75
+
76
+ logger.debug("Theme created successfully")
77
+ return theme
78
+
79
+ except Exception as e:
80
+ logger.warning(f"Failed to create theme, using default: {e}")
81
+ return None
82
+
83
+ def get_header_html() -> str:
84
+ """
85
+ Get HTML for the application header.
86
+
87
+ Returns:
88
+ HTML string for the header section
89
+ """
90
+ from ..config.settings import AppConfig
91
+
92
+ return f"""
93
+ <div class="header">
94
+ <h1 style="margin:0;">{AppConfig.TITLE}</h1>
95
+ <span class="badge">{AppConfig.DISCLAIMER}</span>
96
+ </div>
97
+ <p style="margin-top:6px;">
98
+ Sube una imagen dermatoscópica para ver la clase predicha,
99
+ la confianza y la distribución de probabilidades.
100
+ </p>
101
+ """
102
+
103
+ def get_footer_html() -> str:
104
+ """
105
+ Get HTML for the application footer.
106
+
107
+ Returns:
108
+ HTML string for the footer section
109
+ """
110
+ from ..config.settings import AppConfig
111
+
112
+ return (
113
+ f"<div class='footer'>"
114
+ f"Versión del modelo: {AppConfig.VERSION} • "
115
+ f"Última actualización: {AppConfig.LAST_UPDATE} • "
116
+ f"{AppConfig.INSTITUTION}"
117
+ f"</div>"
118
+ )
119
+
120
+ def get_model_info_html() -> str:
121
+ """
122
+ Get HTML for the model information tab.
123
+
124
+ Returns:
125
+ HTML string describing the model
126
+ """
127
+ from ..config.settings import AppConfig
128
+
129
+ return (
130
+ "- Arquitectura: CNN exportado a ONNX.<br>"
131
+ "- Entrenamiento: dataset dermatoscópico (ver documentación).<br>"
132
+ f"- Nota: {AppConfig.MEDICAL_DISCLAIMER}"
133
+ )