import gradio as gr import torch import onnxruntime as ort import numpy as np from PIL import Image from torchvision import transforms import torch.nn.functional as F import matplotlib.pyplot as plt _metadata_columns = [ "age", "usePesticide_I", "usePesticide_False", "usePesticide_True", "gender_M", "gender_F", "gender_O", "familySkinCancerHistory_False", "familySkinCancerHistory_True", "familySkinCancerHistory_I", "familyCancerHistory_True", "familyCancerHistory_False", "familyCancerHistory_I", "fitzpatrickSkinType_2.0", "fitzpatrickSkinType_1.0", "fitzpatrickSkinType_4.0", "fitzpatrickSkinType_3.0", "fitzpatrickSkinType_5.0", "macroBodyRegion_CHEST", "macroBodyRegion_NOSE", "macroBodyRegion_LIP", "macroBodyRegion_BACK", "macroBodyRegion_FOREARM", "macroBodyRegion_ARM", "macroBodyRegion_LEG", "macroBodyRegion_FACE", "macroBodyRegion_HAND", "macroBodyRegion_SCALP", "macroBodyRegion_NECK", "macroBodyRegion_FOOT", "macroBodyRegion_EAR", "macroBodyRegion_THIGH", "macroBodyRegion_ABDOMEN", "hasItched_True", "hasItched_False", "hasItched_I", "hasGrown_I", "hasGrown_False", "hasGrown_True", "hasHurt_True", "hasHurt_False", "hasHurt_I", "hasChanged_I", "hasChanged_False", "hasChanged_True", "hasBled_False", "hasBled_True", "hasBled_I", "hasElevation_I", "hasElevation_False", "hasElevation_True" ] try: ort_session = ort.InferenceSession("./pad25_mobilenetv3_folder_1.onnx") print("ONNX model loaded successfully.") except Exception as e: print(f"Error loading ONNX model: {e}") ort_session = None LABELS = ['ACK', 'BCC', 'MEL', 'NEV', 'SCC', 'SEK'] def create_plot(probs_history, steps_labels): fig, ax = plt.subplots(figsize=(10, 6)) class_data = {label: [] for label in LABELS} for step_probs in probs_history: for label, prob in step_probs.items(): class_data[label].append(prob * 100) # Identify top 3 classes based on final probability final_probs = {label: values[-1] for label, values in class_data.items()} top_classes = sorted(final_probs, key=final_probs.get, reverse=True)[:3] annotations = {} # Plot every class for name, values in class_data.items(): x_vals = range(len(values)) # Style logic if name in top_classes: # Highlight top classes line, = ax.plot(x_vals, values, label=name, linewidth=2, marker='o') color = line.get_color() # Collect Text Annotations for x, y in zip(x_vals, values): if x not in annotations: annotations[x] = [] annotations[x].append((y, f"{y:.1f}", color)) else: # Other low prob classes (faded) ax.plot(x_vals, values, label=name, alpha=1, linewidth=1) # Process annotations to avoid overlap for x in sorted(annotations.keys()): points = sorted(annotations[x], key=lambda p: p[0]) min_dist = 5 last_text_y = -100 for i, (y, text, color) in enumerate(points): text_y = y + 3 if text_y < last_text_y + min_dist: text_y = last_text_y + min_dist ax.text(x, text_y, text, ha='center', fontweight='bold', fontsize=10, color='black') last_text_y = text_y ax.set_xticks(range(len(steps_labels))) ax.set_xticklabels(steps_labels, rotation=30, ha='right') ax.set_ylabel("Probability (%)") ax.set_xlabel("Incrementally Added Features") ax.set_ylim(0, 115) ax.grid(True, linestyle='--', alpha=0.3) ax.legend(loc='upper right', bbox_to_anchor=(1.10, 1), borderaxespad=0., framealpha=0.8) plt.tight_layout() return fig def predict(image, age, region, cancer_history, skin_cancer_history, bleed, hurt, itch, grown, changed, elevation): if ort_session is None: return "Model not loaded", None steps = [ ("Baseline (Image only)", {}), (f"Age ({age})", {"age": age}), (f"Region ({region})", {"macroBodyRegion": region}), ] symptoms_map = { "Cancer History": ("familyCancerHistory", cancer_history), "Skin Cancer History": ("familySkinCancerHistory", skin_cancer_history), "Bleed": ("hasBled", bleed), "Hurt": ("hasHurt", hurt), "Itch": ("hasItched", itch), "Grew": ("hasGrown", grown), "Changed": ("hasChanged", changed), "Elevation": ("hasElevation", elevation) } for label, (key, val) in symptoms_map.items(): steps.append((f"{label} ({val})", {key: val})) probs_history = [] steps_labels = [] if image is not None: transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) image_pil = Image.open(image).convert('RGB') image_tensor = transform(image_pil).unsqueeze(0) else: image_tensor = torch.zeros(1, 3, 224, 224) def set_feature(vector, feature_name, value): col_name = f"{feature_name}_{value}" if col_name in _metadata_columns: idx = _metadata_columns.index(col_name) vector[idx] = 1.0 accumulated_features = {} for step_name, new_features in steps: skip_feature = False for key, value in new_features.items(): # I had to add this ugly "None" option in the select ;/ if value == "None" or value is None or value == []: skip_feature = True if skip_feature: continue steps_labels.append(step_name) accumulated_features.update(new_features) metadata_vector = np.zeros(len(_metadata_columns), dtype=np.float32) if "age" in accumulated_features and accumulated_features["age"] is not None: if "age" in _metadata_columns: val = accumulated_features["age"] metadata_vector[_metadata_columns.index("age")] = float(val) if val is not None else np.nan else: if "age" in _metadata_columns: metadata_vector[_metadata_columns.index("age")] = np.nan if "macroBodyRegion" in accumulated_features and accumulated_features["macroBodyRegion"]: set_feature(metadata_vector, "macroBodyRegion", accumulated_features["macroBodyRegion"]) symptom_keys = ["familyCancerHistory", "familySkinCancerHistory", "hasBled", "hasHurt", "hasItched", "hasGrown", "hasChanged", "hasElevation"] for key in symptom_keys: if key in accumulated_features: val = accumulated_features[key] if val != "None": set_feature(metadata_vector, key, val) metadata_tensor = torch.tensor(metadata_vector, dtype=torch.float32).unsqueeze(0) ort_inputs = { ort_session.get_inputs()[0].name: image_tensor.numpy(), ort_session.get_inputs()[1].name: metadata_tensor.numpy() } ort_outs = ort_session.run(None, ort_inputs) log_probs = ort_outs[0][0] probs = F.softmax(torch.tensor(log_probs), dim=0).numpy() probs_dict = {LABELS[i]: float(probs[i]) for i in range(len(LABELS))} probs_history.append(probs_dict) final_result = probs_history[-1] plot = create_plot(probs_history, steps_labels) return final_result, plot def clear_func(): return None, None, None, "None", "None", "None", "None", "None", "None", "None", "None", None, None with gr.Blocks() as demo: with gr.Row(): gr.Markdown("# PRISM: A Clinically Interpretable Stepwise Framework for Multimodal Skin Cancer Diagnosis") with gr.Row(): with gr.Column(): image = gr.Image(type="filepath", height=534, label="Input Image",) with gr.Column(): age = gr.Number(label="Age", value=None) region = gr.Dropdown(multiselect=False, allow_custom_value=False, label="Region", choices=[None, 'ARM', 'NECK', 'FACE', 'HAND', 'FOREARM', 'CHEST', 'NOSE', 'LEG', 'THIGH', 'SCALP', 'EAR', 'BACK', 'FOOT', 'ABDOMEN', 'LIP']) with gr.Row(): with gr.Column(): cancer_history = gr.Radio(label="Cancer history", choices=["True", "False", "None"], value="None") skin_cancer_history = gr.Radio(label="Skin cancer history", choices=["True", "False", "None"], value="None") bleed = gr.Radio(label="Bled", choices=["True", "False", "None"], value="None") hurt = gr.Radio(label="Pain", choices=["True", "False", "None"], value="None") with gr.Column(): itch = gr.Radio(label="Itch", choices=["True", "False", "None"], value="None") grown = gr.Radio(label="Grew", choices=["True", "False", "None"], value="None") changed = gr.Radio(label="Changed", choices=["True", "False", "None"], value="None") elevation = gr.Radio(label="Elevation", choices=["True", "False", "None"], value="None") examples = [ ["assets/examples/98540_74812_0_SCC.png", 91.0, "NECK", "False", "False", "False", "False", "True", "None", "None", "None",], ["assets/examples/23312_80156_1_BCC.png", 78.0, "NOSE", "True", "False", "True", "True", "True", "False", "False", "True",], ["assets/examples/33586_53648_1_ACK.png", 43.0, "FOREARM", "True", "False", "True", "True", "True", "False", "False", "True",], ["assets/examples/61243_97612_0_SEK.png", 73.0, "ARM", "False", "False", "False", "False", "False", "False", "False", "True",], ["assets/examples/83727_22982_0_NEV.png", 38.0, "THIGH", "False", "True", "False", "False", "True", "True", "True", "False",], ["assets/examples/86611_83131_0_MEL.png", 69.0, "FOREARM", "False", "True", "False", "True", "False", "False", "False", "False",], ] with gr.Row(): with gr.Column(): output_plot = gr.Plot(label="Incremental Prediction Change") with gr.Column(): output = gr.Label(label="Output", num_top_classes=6) gr.Examples(examples=examples, inputs=[image, age, region, cancer_history, skin_cancer_history, bleed, hurt, itch, grown, changed, elevation]) with gr.Row(): with gr.Column(): submit = gr.Button("Submit") submit.click(predict, inputs=[image, age, region, cancer_history, skin_cancer_history, bleed, hurt, itch, grown, changed, elevation], outputs=[output, output_plot]) clear = gr.Button("Clear") clear.click(clear_func, inputs=[], outputs=[image, age, region, cancer_history, skin_cancer_history, bleed, hurt, itch, grown, changed, elevation, output, output_plot]) demo.launch()