Spaces:
Sleeping
Sleeping
Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
|
@@ -1,10 +1,12 @@
|
|
| 1 |
"""
|
| 2 |
-
|
|
|
|
| 3 |
"""
|
| 4 |
import gradio as gr
|
| 5 |
import torch
|
| 6 |
from PIL import Image
|
| 7 |
from transformers import ViTImageProcessor, ViTForImageClassification
|
|
|
|
| 8 |
|
| 9 |
CLASSES = ['akiec', 'bcc', 'bkl', 'df', 'mel', 'nv', 'vasc']
|
| 10 |
CLASS_NAMES = {
|
|
@@ -17,19 +19,29 @@ CLASS_NAMES = {
|
|
| 17 |
'vasc': 'Vascular lesions'
|
| 18 |
}
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
# Load model
|
| 21 |
print("Loading BiomedCLIP model...")
|
| 22 |
device = torch.device('mps' if torch.backends.mps.is_available() else 'cpu')
|
| 23 |
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224')
|
| 24 |
-
model = ViTForImageClassification.from_pretrained('best_model_biomedclip_maximal')
|
| 25 |
model = model.to(device)
|
| 26 |
model.eval()
|
| 27 |
print(f"BiomedCLIP model loaded on {device}!")
|
| 28 |
|
| 29 |
def predict(image):
|
| 30 |
-
"""Make prediction
|
| 31 |
if image is None:
|
| 32 |
-
return {}, ""
|
| 33 |
|
| 34 |
# Preprocess
|
| 35 |
inputs = processor(images=image, return_tensors="pt")
|
|
@@ -40,7 +52,7 @@ def predict(image):
|
|
| 40 |
outputs = model(**inputs)
|
| 41 |
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)[0]
|
| 42 |
|
| 43 |
-
# Get
|
| 44 |
top_prob = float(probs.max())
|
| 45 |
top_idx = int(probs.argmax())
|
| 46 |
top_class = CLASS_NAMES[CLASSES[top_idx]]
|
|
@@ -48,86 +60,216 @@ def predict(image):
|
|
| 48 |
# Format results
|
| 49 |
results = {CLASS_NAMES[CLASSES[i]]: float(probs[i]) for i in range(len(CLASSES))}
|
| 50 |
|
| 51 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
if top_prob >= 0.80:
|
| 53 |
-
confidence_msg = f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
elif top_prob >= 0.60:
|
| 55 |
-
confidence_msg = f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
else:
|
| 57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
# Create interface
|
| 62 |
-
with gr.Blocks(title="
|
| 63 |
gr.Markdown("""
|
| 64 |
-
# 🔬
|
|
|
|
| 65 |
|
| 66 |
-
|
|
|
|
| 67 |
""")
|
| 68 |
|
| 69 |
with gr.Row():
|
| 70 |
-
with gr.Column():
|
| 71 |
-
image_input = gr.Image(type="pil", label="Upload
|
| 72 |
analyze_btn = gr.Button("🔍 Analyze Image", variant="primary", size="lg")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
-
with gr.Column():
|
| 75 |
-
output = gr.Label(num_top_classes=7, label="
|
| 76 |
-
confidence_output = gr.Markdown(label="Confidence
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
gr.Markdown("""
|
| 79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
-
|
| 82 |
-
- Trained on HAM10000 dataset (10,015 dermoscopic images)
|
| 83 |
-
- **Test Accuracy**: 51.16%
|
| 84 |
-
- **Training**: 30 epochs with 384x384 resolution images
|
| 85 |
-
- Specialized for biomedical image analysis
|
| 86 |
|
| 87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
|
| 89 |
-
|
| 90 |
-
- There are **7 different types** of skin lesions to distinguish
|
| 91 |
-
- Random guessing would achieve only **14.3%** accuracy (1 in 7)
|
| 92 |
-
- Our model at **51.16%** performs **3.6x better than random chance**
|
| 93 |
-
- This represents **73% of the theoretical maximum improvement** over guessing
|
| 94 |
-
- Even expert dermatologists sometimes struggle with these distinctions without biopsy
|
| 95 |
|
| 96 |
-
|
| 97 |
|
| 98 |
-
|
| 99 |
-
2. **Basal Cell Carcinoma (bcc)** 🟠 - Most common skin cancer, highly treatable
|
| 100 |
-
3. **Actinic Keratoses (akiec)** 🟡 - Pre-cancerous lesions from sun damage
|
| 101 |
-
4. **Benign Keratosis (bkl)** 🟢 - Non-cancerous skin lesions
|
| 102 |
-
5. **Melanocytic Nevi (nv)** 🔵 - Common moles, usually benign
|
| 103 |
-
6. **Dermatofibroma (df)** 🟣 - Benign fibrous skin nodules
|
| 104 |
-
7. **Vascular Lesions (vasc)** 🟤 - Blood vessel abnormalities
|
| 105 |
|
| 106 |
-
|
|
|
|
|
|
|
|
|
|
| 107 |
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
- **60-80%**: Moderate confidence - model sees strong patterns
|
| 111 |
-
- **Below 60%**: Low confidence - uncertain, needs expert review
|
| 112 |
|
| 113 |
-
|
| 114 |
|
| 115 |
-
|
| 116 |
|
| 117 |
-
|
| 118 |
|
| 119 |
-
**
|
| 120 |
-
-
|
| 121 |
-
-
|
| 122 |
-
- Monitoring suspicious lesions
|
| 123 |
-
- Any concerning skin changes
|
| 124 |
|
| 125 |
-
|
| 126 |
""")
|
| 127 |
|
| 128 |
# Connect button
|
| 129 |
-
analyze_btn.click(
|
| 130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
|
| 132 |
if __name__ == "__main__":
|
| 133 |
demo.launch()
|
|
|
|
| 1 |
"""
|
| 2 |
+
Medical Image AI Lab - Educational Demo
|
| 3 |
+
Learn how computer vision models analyze and misclassify dermoscopy images
|
| 4 |
"""
|
| 5 |
import gradio as gr
|
| 6 |
import torch
|
| 7 |
from PIL import Image
|
| 8 |
from transformers import ViTImageProcessor, ViTForImageClassification
|
| 9 |
+
import numpy as np
|
| 10 |
|
| 11 |
CLASSES = ['akiec', 'bcc', 'bkl', 'df', 'mel', 'nv', 'vasc']
|
| 12 |
CLASS_NAMES = {
|
|
|
|
| 19 |
'vasc': 'Vascular lesions'
|
| 20 |
}
|
| 21 |
|
| 22 |
+
CLASS_DESCRIPTIONS = {
|
| 23 |
+
'akiec': '⚠️ Pre-cancerous lesions from sun damage',
|
| 24 |
+
'bcc': '🔴 Most common skin cancer (highly treatable)',
|
| 25 |
+
'bkl': '✅ Non-cancerous skin lesions',
|
| 26 |
+
'df': '🟣 Benign fibrous nodules',
|
| 27 |
+
'mel': '🚨 Most dangerous skin cancer',
|
| 28 |
+
'nv': '🔵 Common moles (usually benign)',
|
| 29 |
+
'vasc': '🟤 Blood vessel abnormalities'
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
# Load model
|
| 33 |
print("Loading BiomedCLIP model...")
|
| 34 |
device = torch.device('mps' if torch.backends.mps.is_available() else 'cpu')
|
| 35 |
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224')
|
| 36 |
+
model = ViTForImageClassification.from_pretrained('best_model_biomedclip_maximal', local_files_only=True)
|
| 37 |
model = model.to(device)
|
| 38 |
model.eval()
|
| 39 |
print(f"BiomedCLIP model loaded on {device}!")
|
| 40 |
|
| 41 |
def predict(image):
|
| 42 |
+
"""Make prediction and return educational insights"""
|
| 43 |
if image is None:
|
| 44 |
+
return {}, "", ""
|
| 45 |
|
| 46 |
# Preprocess
|
| 47 |
inputs = processor(images=image, return_tensors="pt")
|
|
|
|
| 52 |
outputs = model(**inputs)
|
| 53 |
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)[0]
|
| 54 |
|
| 55 |
+
# Get predictions
|
| 56 |
top_prob = float(probs.max())
|
| 57 |
top_idx = int(probs.argmax())
|
| 58 |
top_class = CLASS_NAMES[CLASSES[top_idx]]
|
|
|
|
| 60 |
# Format results
|
| 61 |
results = {CLASS_NAMES[CLASSES[i]]: float(probs[i]) for i in range(len(CLASSES))}
|
| 62 |
|
| 63 |
+
# Educational analysis
|
| 64 |
+
sorted_probs = sorted(enumerate(probs), key=lambda x: x[1], reverse=True)
|
| 65 |
+
second_best_idx = sorted_probs[1][0]
|
| 66 |
+
second_best_prob = float(sorted_probs[1][1])
|
| 67 |
+
|
| 68 |
+
# Confidence analysis
|
| 69 |
if top_prob >= 0.80:
|
| 70 |
+
confidence_msg = f"### 🎯 High Confidence Prediction ({top_prob*100:.1f}%)\n\n"
|
| 71 |
+
confidence_msg += f"**Model strongly believes:** {top_class}\n\n"
|
| 72 |
+
confidence_msg += "**Learning Point:** High confidence doesn't always mean correct! The model might be overconfident due to:\n"
|
| 73 |
+
confidence_msg += "- Training on similar-looking samples\n"
|
| 74 |
+
confidence_msg += "- Overfitting to specific visual patterns\n"
|
| 75 |
+
confidence_msg += "- Limited dataset diversity"
|
| 76 |
elif top_prob >= 0.60:
|
| 77 |
+
confidence_msg = f"### ⚖️ Moderate Confidence ({top_prob*100:.1f}%)\n\n"
|
| 78 |
+
confidence_msg += f"**Top prediction:** {top_class}\n"
|
| 79 |
+
confidence_msg += f"**Runner-up:** {CLASS_NAMES[CLASSES[second_best_idx]]} ({second_best_prob*100:.1f}%)\n\n"
|
| 80 |
+
confidence_msg += "**Learning Point:** The model is uncertain between multiple classes. This reveals:\n"
|
| 81 |
+
confidence_msg += "- Visual similarity between lesion types\n"
|
| 82 |
+
confidence_msg += "- Challenges in feature extraction\n"
|
| 83 |
+
confidence_msg += "- Why medical AI requires expert validation"
|
| 84 |
+
else:
|
| 85 |
+
confidence_msg = f"### 🤔 Low Confidence ({top_prob*100:.1f}%)\n\n"
|
| 86 |
+
confidence_msg += f"**Best guess:** {top_class}\n"
|
| 87 |
+
confidence_msg += f"**But also considering:** {CLASS_NAMES[CLASSES[second_best_idx]]} ({second_best_prob*100:.1f}%)\n\n"
|
| 88 |
+
confidence_msg += "**Learning Point:** The model struggles with this image! Possible reasons:\n"
|
| 89 |
+
confidence_msg += "- Image quality issues\n"
|
| 90 |
+
confidence_msg += "- Unusual presentation\n"
|
| 91 |
+
confidence_msg += "- Out-of-distribution sample\n"
|
| 92 |
+
confidence_msg += "- Dataset bias (underrepresented class)"
|
| 93 |
+
|
| 94 |
+
# Educational insights
|
| 95 |
+
entropy = -sum(p * np.log(p + 1e-10) for p in probs if p > 0.01)
|
| 96 |
+
max_entropy = np.log(7) # log of number of classes
|
| 97 |
+
normalized_entropy = entropy / max_entropy
|
| 98 |
+
|
| 99 |
+
insights = f"### 📊 Model Behavior Analysis\n\n"
|
| 100 |
+
insights += f"**Prediction Entropy:** {entropy:.3f} (max: {max_entropy:.3f})\n"
|
| 101 |
+
insights += f"**Uncertainty Score:** {normalized_entropy:.1%}\n\n"
|
| 102 |
+
|
| 103 |
+
if normalized_entropy > 0.8:
|
| 104 |
+
insights += "⚠️ **High uncertainty** - Model is very confused between multiple classes\n\n"
|
| 105 |
+
insights += "**What this teaches us:**\n"
|
| 106 |
+
insights += "- Some lesions have overlapping visual features\n"
|
| 107 |
+
insights += "- Class boundaries in medical imaging are often fuzzy\n"
|
| 108 |
+
insights += "- This is why dermatologists use additional context (patient history, location, etc.)"
|
| 109 |
+
elif normalized_entropy < 0.3:
|
| 110 |
+
insights += "✅ **Low uncertainty** - Model has a clear preferred class\n\n"
|
| 111 |
+
insights += "**What this teaches us:**\n"
|
| 112 |
+
insights += "- The image has distinctive features the model recognizes\n"
|
| 113 |
+
insights += "- However, low uncertainty ≠ correct prediction!\n"
|
| 114 |
+
insights += "- Models can be confidently wrong (calibration problem)"
|
| 115 |
else:
|
| 116 |
+
insights += "⚖️ **Moderate uncertainty** - Model sees multiple possibilities\n\n"
|
| 117 |
+
insights += "**What this teaches us:**\n"
|
| 118 |
+
insights += "- Real-world classification is rarely binary\n"
|
| 119 |
+
insights += "- Probability distributions > single predictions\n"
|
| 120 |
+
insights += "- Why ensemble methods and expert review matter"
|
| 121 |
|
| 122 |
+
insights += f"\n**Top 3 Predictions:**\n"
|
| 123 |
+
for i in range(min(3, len(sorted_probs))):
|
| 124 |
+
idx = sorted_probs[i][0]
|
| 125 |
+
prob = float(sorted_probs[i][1])
|
| 126 |
+
insights += f"{i+1}. {CLASS_NAMES[CLASSES[idx]]}: {prob*100:.1f}%\n"
|
| 127 |
+
|
| 128 |
+
return results, confidence_msg, insights
|
| 129 |
|
| 130 |
# Create interface
|
| 131 |
+
with gr.Blocks(title="Medical Image AI Lab", theme="soft") as demo:
|
| 132 |
gr.Markdown("""
|
| 133 |
+
# 🔬 Medical Image AI Lab
|
| 134 |
+
### Learn How Computer Vision Models Analyze and Misclassify Dermoscopy Images
|
| 135 |
|
| 136 |
+
**This is an educational demo for ML/AI students, researchers, and educators.**
|
| 137 |
+
Explore how a real computer vision model trained on skin lesion data makes predictions—and where it fails.
|
| 138 |
""")
|
| 139 |
|
| 140 |
with gr.Row():
|
| 141 |
+
with gr.Column(scale=1):
|
| 142 |
+
image_input = gr.Image(type="pil", label="📸 Upload a Dermoscopy Image")
|
| 143 |
analyze_btn = gr.Button("🔍 Analyze Image", variant="primary", size="lg")
|
| 144 |
+
|
| 145 |
+
gr.Markdown("""
|
| 146 |
+
### 💡 Educational Value
|
| 147 |
+
|
| 148 |
+
**What You'll Learn:**
|
| 149 |
+
- How ML models handle ambiguous medical images
|
| 150 |
+
- The difference between confidence and correctness
|
| 151 |
+
- Why medical AI is challenging
|
| 152 |
+
- Dataset bias and class imbalance effects
|
| 153 |
+
- Model uncertainty and calibration
|
| 154 |
+
|
| 155 |
+
**For Educators:**
|
| 156 |
+
Use this to teach confusion matrices, ROC curves, calibration,
|
| 157 |
+
and the gap between benchmark performance and real-world deployment.
|
| 158 |
+
""")
|
| 159 |
|
| 160 |
+
with gr.Column(scale=1):
|
| 161 |
+
output = gr.Label(num_top_classes=7, label="🎯 Model Predictions")
|
| 162 |
+
confidence_output = gr.Markdown(label="Model Confidence Analysis")
|
| 163 |
+
insights_output = gr.Markdown(label="Educational Insights")
|
| 164 |
+
|
| 165 |
+
gr.Markdown("""
|
| 166 |
+
---
|
| 167 |
+
|
| 168 |
+
## 📚 Understanding the Model
|
| 169 |
+
|
| 170 |
+
### Model Architecture
|
| 171 |
+
- **Base:** Vision Transformer (ViT) with BiomedCLIP weights
|
| 172 |
+
- **Training:** 30 epochs on HAM10000 dataset (10,015 images)
|
| 173 |
+
- **Test Accuracy:** 51.16%
|
| 174 |
+
|
| 175 |
+
### Why 51% is Actually Meaningful
|
| 176 |
+
|
| 177 |
+
**Context matters:**
|
| 178 |
+
- Random guessing: 14.3% (1 in 7 classes)
|
| 179 |
+
- This model: 51.16% (**3.6x better than random**)
|
| 180 |
+
- Represents 73% of maximum possible improvement over random
|
| 181 |
+
|
| 182 |
+
**Real-world complexity:**
|
| 183 |
+
- Even expert dermatologists disagree on diagnoses without biopsy
|
| 184 |
+
- Visual similarity between some lesion types is extreme
|
| 185 |
+
- Dataset has significant class imbalance (e.g., 67% melanocytic nevi vs <1% dermatofibroma)
|
| 186 |
+
|
| 187 |
+
### Common Failure Modes (Learning Opportunities!)
|
| 188 |
+
|
| 189 |
+
1. **Class Imbalance Bias**
|
| 190 |
+
Model tends to predict common classes (nevi) more often
|
| 191 |
+
|
| 192 |
+
2. **Visual Similarity Confusion**
|
| 193 |
+
Melanoma vs nevi, BCC vs other lesions—very hard to distinguish
|
| 194 |
+
|
| 195 |
+
3. **Domain Shift**
|
| 196 |
+
Different cameras, lighting, or skin types can confuse the model
|
| 197 |
+
|
| 198 |
+
4. **Overconfidence**
|
| 199 |
+
The model can be 90% confident and still wrong (calibration problem)
|
| 200 |
+
|
| 201 |
+
### 7 Lesion Categories
|
| 202 |
+
|
| 203 |
+
""")
|
| 204 |
+
|
| 205 |
+
for cls_id, cls_name in CLASS_NAMES.items():
|
| 206 |
+
gr.Markdown(f"**{cls_name}** — {CLASS_DESCRIPTIONS[cls_id]}")
|
| 207 |
|
| 208 |
gr.Markdown("""
|
| 209 |
+
---
|
| 210 |
+
|
| 211 |
+
## 🎓 For Students & Researchers
|
| 212 |
+
|
| 213 |
+
### Experiments You Can Try
|
| 214 |
+
|
| 215 |
+
1. **Test on edge cases:** Upload images with poor lighting, blur, or unusual angles
|
| 216 |
+
2. **Compare similar lesions:** See how the model handles visually similar classes
|
| 217 |
+
3. **Analyze confidence:** Does high confidence correlate with correctness?
|
| 218 |
+
4. **Class bias testing:** Upload multiple examples of rare vs common classes
|
| 219 |
+
|
| 220 |
+
### Questions to Explore
|
| 221 |
+
|
| 222 |
+
- How does image quality affect predictions?
|
| 223 |
+
- Which classes get confused most often?
|
| 224 |
+
- When is the model most/least confident?
|
| 225 |
+
- How would you improve this model?
|
| 226 |
|
| 227 |
+
### Next Steps for Learning
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
|
| 229 |
+
- Study the HAM10000 dataset distribution
|
| 230 |
+
- Implement explainability (Grad-CAM, attention maps)
|
| 231 |
+
- Try data augmentation strategies
|
| 232 |
+
- Experiment with ensemble methods
|
| 233 |
+
- Research medical AI validation standards
|
| 234 |
|
| 235 |
+
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
|
| 237 |
+
## ⚠️ Important Disclaimer
|
| 238 |
|
| 239 |
+
**This tool is for EDUCATIONAL and RESEARCH purposes ONLY.**
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 240 |
|
| 241 |
+
- ❌ **NOT a medical device**
|
| 242 |
+
- ❌ **NOT for clinical diagnosis**
|
| 243 |
+
- ❌ **NOT for treatment decisions**
|
| 244 |
+
- ❌ **NOT a substitute for professional medical advice**
|
| 245 |
|
| 246 |
+
This demo shows how ML models work and fail in medical imaging contexts.
|
| 247 |
+
It is designed to teach AI limitations, not to provide medical guidance.
|
|
|
|
|
|
|
| 248 |
|
| 249 |
+
**For actual medical concerns, always consult a board-certified dermatologist.**
|
| 250 |
|
| 251 |
+
---
|
| 252 |
|
| 253 |
+
## 📖 Additional Resources
|
| 254 |
|
| 255 |
+
- **Dataset:** [HAM10000 on Kaggle](https://www.kaggle.com/kmader/skin-cancer-mnist-ham10000)
|
| 256 |
+
- **Paper:** Tschandl et al. (2018) "The HAM10000 dataset"
|
| 257 |
+
- **Learn More:** [Understanding Medical AI Challenges](https://www.nature.com/articles/s41591-020-0842-6)
|
|
|
|
|
|
|
| 258 |
|
| 259 |
+
Built for ML education | Not for medical use | Model accuracy: 51.16% on test set
|
| 260 |
""")
|
| 261 |
|
| 262 |
# Connect button
|
| 263 |
+
analyze_btn.click(
|
| 264 |
+
fn=predict,
|
| 265 |
+
inputs=image_input,
|
| 266 |
+
outputs=[output, confidence_output, insights_output]
|
| 267 |
+
)
|
| 268 |
+
image_input.change(
|
| 269 |
+
fn=predict,
|
| 270 |
+
inputs=image_input,
|
| 271 |
+
outputs=[output, confidence_output, insights_output]
|
| 272 |
+
)
|
| 273 |
|
| 274 |
if __name__ == "__main__":
|
| 275 |
demo.launch()
|