Arviano's picture
Add GradCAM target layer selection and UI defaults integration
67f1c25
import json
from pathlib import Path
from typing import Any
import gradio as gr
from src.inference import (
CASE_OPTIONS,
DEFAULT_CASE_NAME,
DEFAULT_GRADCAM_TARGET_LAYER,
DEVICE,
GRADCAM_TARGET_LAYER_OPTIONS,
batch_predict_with_xdl_stream,
)
UI_DEFAULTS_PATH = Path(__file__).resolve().parent.parent / "config" / "ui_defaults.json"
UI_DEFAULTS_FALLBACK = {
"selected_case": DEFAULT_CASE_NAME,
"confidence_threshold": 0.60,
"smoothgrad_samples": 50,
"smoothgrad_noise": 0.05,
"gradcam_target_layer": DEFAULT_GRADCAM_TARGET_LAYER,
"save_xdl_results": False,
"save_xdl_dir": "xdl_results",
}
GRADCAM_TARGET_LAYER_DROPDOWN_CHOICES = [
("DenseBlock 3 (Default, balanced)", "denseblock3"),
("Transition 2 (Broad, stable)", "transition2"),
("Transition 1 (Earlier, detailed/noisier)", "transition1"),
("DenseBlock 4 (Late, center-heavy)", "denseblock4"),
("Transition 3 (Late, center-heavy)", "transition3"),
("Norm5 Last (Legacy behavior)", "norm5_last"),
]
CUSTOM_CSS = """
.app-shell {
max-width: 1120px;
margin: 0 auto;
}
.hero {
border: 1px solid #d1d5db;
background: linear-gradient(135deg, #f0fdfa 0%, #ecfeff 45%, #f8fafc 100%);
border-radius: 14px;
padding: 16px 18px;
margin-bottom: 12px;
}
.hero h1 {
margin: 0;
font-size: 24px;
color: #0f172a;
}
.hero p {
margin: 6px 0 0 0;
color: #334155;
font-size: 14px;
}
.panel {
border: 1px solid #e2e8f0;
border-radius: 12px;
background: #ffffff;
padding: 12px;
}
"""
def _as_float(value: Any, fallback: float) -> float:
try:
return float(value)
except (TypeError, ValueError):
return float(fallback)
def _as_int(value: Any, fallback: int) -> int:
try:
return int(value)
except (TypeError, ValueError):
return int(fallback)
def _as_bool(value: Any, fallback: bool) -> bool:
if isinstance(value, bool):
return value
if isinstance(value, str):
return value.strip().lower() in {"1", "true", "yes", "y", "on"}
if value is None:
return fallback
return bool(value)
def _load_ui_defaults() -> dict[str, Any]:
defaults = dict(UI_DEFAULTS_FALLBACK)
try:
raw_text = UI_DEFAULTS_PATH.read_text(encoding="utf-8")
raw = json.loads(raw_text)
if isinstance(raw, dict):
for key in defaults:
if key in raw:
defaults[key] = raw[key]
except Exception:
pass
selected_case = str(defaults.get("selected_case", DEFAULT_CASE_NAME))
defaults["selected_case"] = selected_case if selected_case in CASE_OPTIONS else DEFAULT_CASE_NAME
defaults["confidence_threshold"] = min(
1.0,
max(0.0, _as_float(defaults.get("confidence_threshold"), UI_DEFAULTS_FALLBACK["confidence_threshold"])),
)
defaults["smoothgrad_samples"] = max(
1,
_as_int(defaults.get("smoothgrad_samples"), UI_DEFAULTS_FALLBACK["smoothgrad_samples"]),
)
defaults["smoothgrad_noise"] = min(
1.0,
max(0.0, _as_float(defaults.get("smoothgrad_noise"), UI_DEFAULTS_FALLBACK["smoothgrad_noise"])),
)
gradcam_target_layer = str(defaults.get("gradcam_target_layer", DEFAULT_GRADCAM_TARGET_LAYER)).strip().lower()
defaults["gradcam_target_layer"] = (
gradcam_target_layer if gradcam_target_layer in GRADCAM_TARGET_LAYER_OPTIONS else DEFAULT_GRADCAM_TARGET_LAYER
)
defaults["save_xdl_results"] = _as_bool(defaults.get("save_xdl_results"), UI_DEFAULTS_FALLBACK["save_xdl_results"])
defaults["save_xdl_dir"] = str(defaults.get("save_xdl_dir") or UI_DEFAULTS_FALLBACK["save_xdl_dir"])
return defaults
def _toggle_save_dir(enabled: bool):
is_enabled = bool(enabled)
return gr.update(visible=is_enabled, interactive=is_enabled)
def build_demo() -> gr.Blocks:
ui_defaults = _load_ui_defaults()
with gr.Blocks(title="XDL Colitis Demo") as demo:
gr.HTML(
f"""
<style>{CUSTOM_CSS}</style>
<div class="app-shell">
<div class="hero">
<h1>XDL Colitis Workbench</h1>
<p>Detected device: <b>{DEVICE.type}</b>. Upload a directory or enter a local folder path, then run batch inference.</p>
</div>
</div>
"""
)
with gr.Row(elem_classes=["app-shell"]):
with gr.Column(scale=2, elem_classes=["panel"]):
gr.Markdown("### 1) Image Input")
selected_case = gr.Dropdown(
choices=CASE_OPTIONS,
value=ui_defaults["selected_case"],
label="Problem Case",
info="Choose the model group that matches your diagnosis scenario.",
)
upload_input = gr.File(
file_count="directory",
file_types=["image"],
type="filepath",
label="Upload Image Folder",
)
folder_path = gr.Textbox(
label="Local Folder Path (Optional)",
placeholder="/absolute/path/to/folder/with/images",
)
with gr.Column(scale=1, elem_classes=["panel"]):
gr.Markdown("### 2) Inference Settings")
threshold = gr.Number(
value=ui_defaults["confidence_threshold"],
minimum=0.0,
maximum=1.0,
step=0.01,
precision=2,
label="Confidence Threshold",
info="Range: 0.00 to 1.00",
)
smoothgrad_samples = gr.Number(
value=ui_defaults["smoothgrad_samples"],
minimum=1,
maximum=1000,
step=1,
precision=0,
label="SmoothGrad Samples",
info="Higher values improve stability but increase runtime.",
)
smoothgrad_noise = gr.Number(
value=ui_defaults["smoothgrad_noise"],
minimum=0.0,
maximum=1.0,
step=0.01,
precision=2,
label="SmoothGrad Noise Level",
info="Typical range: 0.01 to 0.20",
)
gradcam_target_layer = gr.Dropdown(
choices=GRADCAM_TARGET_LAYER_DROPDOWN_CHOICES,
value=ui_defaults["gradcam_target_layer"],
label="GradCAM Target Layer",
info="Try `transition2` or `denseblock3` if CAM looks too centered.",
)
save_xdl_results = gr.Checkbox(
label="Save XDL Results Locally",
value=ui_defaults["save_xdl_results"],
)
save_xdl_dir = gr.Textbox(
label="Save Folder",
value=ui_defaults["save_xdl_dir"],
placeholder="xdl_results",
visible=bool(ui_defaults["save_xdl_results"]),
interactive=bool(ui_defaults["save_xdl_results"]),
)
run_btn = gr.Button("Run Batch Inference", variant="primary")
with gr.Row(elem_classes=["app-shell"]):
with gr.Column(elem_classes=["panel"]):
gr.Markdown("### 3) Results")
summary_out = gr.HTML(label="Summary")
table_out = gr.Dataframe(
headers=["filename", "status", "predicted_label", "confidence_or_error"],
datatype=["str", "str", "str", "str"],
interactive=False,
label="Per-image Results",
)
gallery_out = gr.Gallery(
label="Compact XDL Results (Original | GradCAM | SmoothGrad)",
columns=2,
)
save_xdl_results.change(
fn=_toggle_save_dir,
inputs=[save_xdl_results],
outputs=[save_xdl_dir],
)
run_btn.click(
fn=batch_predict_with_xdl_stream,
inputs=[
upload_input,
selected_case,
folder_path,
threshold,
smoothgrad_samples,
smoothgrad_noise,
save_xdl_results,
save_xdl_dir,
gradcam_target_layer,
],
outputs=[summary_out, table_out, gallery_out],
)
return demo.queue()