Spaces:
Sleeping
Sleeping
Commit ·
af6cc87
0
Parent(s):
Deployment ready (excluding model)
Browse files- .gitignore +10 -0
- Dockerfile +31 -0
- README.md +328 -0
- README_AI.md +511 -0
- client/app.js +445 -0
- client/index.html +322 -0
- client/styles.css +975 -0
- data/1.jpg +0 -0
- data/2.jpeg +0 -0
- data/3.jpeg +0 -0
- notebooks/mobileNetUnetAttention.py +844 -0
- server/main.py +440 -0
- server/requirements.txt +22 -0
.gitignore
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.py[cod]
|
| 3 |
+
*$py.class
|
| 4 |
+
.DS_Store
|
| 5 |
+
.env
|
| 6 |
+
venv/
|
| 7 |
+
env/
|
| 8 |
+
.idea/
|
| 9 |
+
.vscode/
|
| 10 |
+
models/*.pth
|
Dockerfile
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use official Python runtime as a parent image
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# Set working directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Install system dependencies required for OpenCV
|
| 8 |
+
RUN apt-get update && apt-get install -y \
|
| 9 |
+
libgl1-mesa-glx \
|
| 10 |
+
libglib2.0-0 \
|
| 11 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 12 |
+
|
| 13 |
+
# Install Python dependencies
|
| 14 |
+
COPY server/requirements.txt .
|
| 15 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 16 |
+
|
| 17 |
+
# Copy the application code
|
| 18 |
+
COPY server/ ./server/
|
| 19 |
+
COPY client/ ./client/
|
| 20 |
+
COPY models/ ./models/
|
| 21 |
+
|
| 22 |
+
# Create a non-root user for security (recommended for HF Spaces)
|
| 23 |
+
RUN useradd -m -u 1000 user
|
| 24 |
+
USER user
|
| 25 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 26 |
+
|
| 27 |
+
# Expose the port (Hugging Face Spaces uses 7860 by default)
|
| 28 |
+
EXPOSE 7860
|
| 29 |
+
|
| 30 |
+
# Run the FastAPI server on port 7860
|
| 31 |
+
CMD ["uvicorn", "server.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: DermAI Skin Lesion Segmentation
|
| 3 |
+
emoji: 🔬
|
| 4 |
+
colorFrom: purple
|
| 5 |
+
colorTo: pink
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# DermAI - Web Application
|
| 11 |
+
## Skin Lesion Segmentation System
|
| 12 |
+
|
| 13 |
+
A modern web application for AI-powered skin lesion segmentation using Attention U-Net deep learning model.
|
| 14 |
+
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
## 🚀 Quick Start
|
| 18 |
+
|
| 19 |
+
### Prerequisites
|
| 20 |
+
|
| 21 |
+
- Python 3.8+
|
| 22 |
+
- pip (Python package manager)
|
| 23 |
+
|
| 24 |
+
### Installation
|
| 25 |
+
|
| 26 |
+
1. **Clone/Navigate to the project:**
|
| 27 |
+
```bash
|
| 28 |
+
cd "/home/raid/Desktop/isic2018 skin cancer app"
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
2. **Activate the virtual environment:**
|
| 32 |
+
```bash
|
| 33 |
+
source ~/cv-env/bin/activate
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
3. **Install dependencies:**
|
| 37 |
+
```bash
|
| 38 |
+
pip install -r server/requirements.txt
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
4. **Add your trained model:**
|
| 42 |
+
```bash
|
| 43 |
+
# Copy your trained model to:
|
| 44 |
+
models/model.pth
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
5. **Run the server:**
|
| 48 |
+
```bash
|
| 49 |
+
cd server
|
| 50 |
+
python main.py
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
Or with uvicorn:
|
| 54 |
+
```bash
|
| 55 |
+
uvicorn server.main:app --reload --host 0.0.0.0 --port 8000
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
6. **Open in browser:**
|
| 59 |
+
```
|
| 60 |
+
http://localhost:8000
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
---
|
| 64 |
+
|
| 65 |
+
## 📁 Project Structure
|
| 66 |
+
|
| 67 |
+
```
|
| 68 |
+
isic2018-skin-cancer-app/
|
| 69 |
+
├── client/ # Frontend (HTML/CSS/JS)
|
| 70 |
+
│ ├── index.html # Main HTML page
|
| 71 |
+
│ ├── styles.css # CSS styles (dark medical theme)
|
| 72 |
+
│ └── app.js # JavaScript (file upload, API calls)
|
| 73 |
+
│
|
| 74 |
+
├── server/ # Backend (FastAPI)
|
| 75 |
+
│ ├── main.py # FastAPI application & model
|
| 76 |
+
│ └── requirements.txt # Python dependencies
|
| 77 |
+
│
|
| 78 |
+
├── models/ # Trained model weights
|
| 79 |
+
│ └── model.pth # (add your trained model here)
|
| 80 |
+
│
|
| 81 |
+
├── notebooks/ # Training notebooks
|
| 82 |
+
│ └── mobileNetUnetAttention.py
|
| 83 |
+
│
|
| 84 |
+
├── README.md # This file
|
| 85 |
+
└── README_AI.md # AI/Model documentation
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
---
|
| 89 |
+
|
| 90 |
+
## 🖥️ Features
|
| 91 |
+
|
| 92 |
+
### Frontend
|
| 93 |
+
- **Modern Medical Theme**: Dark mode with purple/pink gradient accents
|
| 94 |
+
- **Drag & Drop Upload**: Easy image upload with drag-and-drop support
|
| 95 |
+
- **Real-time Results**: Instant visualization of segmentation results
|
| 96 |
+
- **Responsive Design**: Works on desktop, tablet, and mobile
|
| 97 |
+
- **Download Results**: Export combined analysis as PNG image
|
| 98 |
+
|
| 99 |
+
### Backend (API)
|
| 100 |
+
- **FastAPI Framework**: High-performance async Python server
|
| 101 |
+
- **CORS Enabled**: Cross-origin requests supported
|
| 102 |
+
- **Health Check**: API status monitoring endpoint
|
| 103 |
+
- **Image Validation**: Supports JPEG, PNG, WebP formats
|
| 104 |
+
|
| 105 |
+
---
|
| 106 |
+
|
| 107 |
+
## 🔌 API Endpoints
|
| 108 |
+
|
| 109 |
+
### Health Check
|
| 110 |
+
```http
|
| 111 |
+
GET /api/health
|
| 112 |
+
```
|
| 113 |
+
**Response:**
|
| 114 |
+
```json
|
| 115 |
+
{
|
| 116 |
+
"status": "healthy",
|
| 117 |
+
"model_loaded": true,
|
| 118 |
+
"device": "cuda"
|
| 119 |
+
}
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
### Segmentation
|
| 123 |
+
```http
|
| 124 |
+
POST /api/segment
|
| 125 |
+
Content-Type: multipart/form-data
|
| 126 |
+
|
| 127 |
+
file: <image_file>
|
| 128 |
+
```
|
| 129 |
+
**Response:**
|
| 130 |
+
```json
|
| 131 |
+
{
|
| 132 |
+
"success": true,
|
| 133 |
+
"mask_base64": "iVBORw0KGgo...",
|
| 134 |
+
"overlay_base64": "iVBORw0KGgo...",
|
| 135 |
+
"confidence": 85.5,
|
| 136 |
+
"lesion_area_percent": 12.3
|
| 137 |
+
}
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
---
|
| 141 |
+
|
| 142 |
+
## 🎨 Design System
|
| 143 |
+
|
| 144 |
+
### Color Palette
|
| 145 |
+
|
| 146 |
+
| Color | HSL | Usage |
|
| 147 |
+
|-------|-----|-------|
|
| 148 |
+
| Primary Purple | `hsl(250, 89%, 65%)` | Buttons, accents |
|
| 149 |
+
| Accent Pink | `hsl(330, 81%, 60%)` | Highlights, gradients |
|
| 150 |
+
| Success Green | `hsl(142, 71%, 45%)` | Positive indicators |
|
| 151 |
+
| Background Dark | `hsl(240, 20%, 4%)` | Main background |
|
| 152 |
+
|
| 153 |
+
### Typography
|
| 154 |
+
- **Primary Font**: Inter (Google Fonts)
|
| 155 |
+
- **Monospace Font**: JetBrains Mono
|
| 156 |
+
- **Headings**: 700-800 weight
|
| 157 |
+
- **Body**: 400-500 weight
|
| 158 |
+
|
| 159 |
+
### Effects
|
| 160 |
+
- **Glassmorphism**: Blur + transparency on cards
|
| 161 |
+
- **Gradient Orbs**: Animated background blobs
|
| 162 |
+
- **Smooth Transitions**: 250ms ease animations
|
| 163 |
+
- **Hover States**: Lift + glow effects
|
| 164 |
+
|
| 165 |
+
---
|
| 166 |
+
|
| 167 |
+
## ⌨️ Keyboard Shortcuts
|
| 168 |
+
|
| 169 |
+
| Shortcut | Action |
|
| 170 |
+
|----------|--------|
|
| 171 |
+
| `Ctrl/Cmd + O` | Open file dialog |
|
| 172 |
+
| `Ctrl/Cmd + S` | Download results |
|
| 173 |
+
| `Escape` | Reset analysis |
|
| 174 |
+
|
| 175 |
+
---
|
| 176 |
+
|
| 177 |
+
## 🛠️ Configuration
|
| 178 |
+
|
| 179 |
+
### Server Configuration (server/main.py)
|
| 180 |
+
|
| 181 |
+
```python
|
| 182 |
+
# Model path
|
| 183 |
+
MODEL_PATH = "../models/model.pth"
|
| 184 |
+
|
| 185 |
+
# Image size (must match training)
|
| 186 |
+
IMG_SIZE = 256
|
| 187 |
+
|
| 188 |
+
# Device (auto-detected)
|
| 189 |
+
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 190 |
+
```
|
| 191 |
+
|
| 192 |
+
### Running on Different Port
|
| 193 |
+
|
| 194 |
+
```bash
|
| 195 |
+
uvicorn server.main:app --port 3000
|
| 196 |
+
```
|
| 197 |
+
|
| 198 |
+
### Production Mode
|
| 199 |
+
|
| 200 |
+
```bash
|
| 201 |
+
uvicorn server.main:app --host 0.0.0.0 --port 8000 --workers 4
|
| 202 |
+
```
|
| 203 |
+
|
| 204 |
+
---
|
| 205 |
+
|
| 206 |
+
## 🧪 Testing
|
| 207 |
+
|
| 208 |
+
### API Test with cURL
|
| 209 |
+
|
| 210 |
+
```bash
|
| 211 |
+
# Health check
|
| 212 |
+
curl http://localhost:8000/api/health
|
| 213 |
+
|
| 214 |
+
# Segmentation
|
| 215 |
+
curl -X POST http://localhost:8000/api/segment \
|
| 216 |
+
-F "file=@test_image.jpg"
|
| 217 |
+
```
|
| 218 |
+
|
| 219 |
+
### API Test with Python
|
| 220 |
+
|
| 221 |
+
```python
|
| 222 |
+
import requests
|
| 223 |
+
|
| 224 |
+
# Health check
|
| 225 |
+
response = requests.get("http://localhost:8000/api/health")
|
| 226 |
+
print(response.json())
|
| 227 |
+
|
| 228 |
+
# Segmentation
|
| 229 |
+
with open("test_image.jpg", "rb") as f:
|
| 230 |
+
response = requests.post(
|
| 231 |
+
"http://localhost:8000/api/segment",
|
| 232 |
+
files={"file": f}
|
| 233 |
+
)
|
| 234 |
+
print(response.json())
|
| 235 |
+
```
|
| 236 |
+
|
| 237 |
+
---
|
| 238 |
+
|
| 239 |
+
## 📦 Dependencies
|
| 240 |
+
|
| 241 |
+
### Python (Backend)
|
| 242 |
+
```
|
| 243 |
+
fastapi>=0.104.0
|
| 244 |
+
uvicorn>=0.24.0
|
| 245 |
+
python-multipart>=0.0.6
|
| 246 |
+
torch>=2.0.0
|
| 247 |
+
torchvision>=0.15.0
|
| 248 |
+
opencv-python>=4.8.0
|
| 249 |
+
Pillow>=10.0.0
|
| 250 |
+
albumentations>=1.3.0
|
| 251 |
+
numpy>=1.24.0
|
| 252 |
+
pydantic>=2.0.0
|
| 253 |
+
```
|
| 254 |
+
|
| 255 |
+
### Frontend
|
| 256 |
+
- Vanilla HTML5
|
| 257 |
+
- Vanilla CSS3
|
| 258 |
+
- Vanilla JavaScript (ES6+)
|
| 259 |
+
- Google Fonts (Inter, JetBrains Mono)
|
| 260 |
+
|
| 261 |
+
---
|
| 262 |
+
|
| 263 |
+
## 🔒 Security Notes
|
| 264 |
+
|
| 265 |
+
- All image processing is done locally (no external API calls)
|
| 266 |
+
- Images are processed in memory and not stored
|
| 267 |
+
- CORS is enabled for development (restrict in production)
|
| 268 |
+
|
| 269 |
+
---
|
| 270 |
+
|
| 271 |
+
## 🐛 Troubleshooting
|
| 272 |
+
|
| 273 |
+
### Model not found
|
| 274 |
+
```
|
| 275 |
+
⚠ Model file not found at models/model.pth
|
| 276 |
+
```
|
| 277 |
+
**Solution**: Add your trained model to the `models/` directory.
|
| 278 |
+
|
| 279 |
+
### CUDA out of memory
|
| 280 |
+
```
|
| 281 |
+
RuntimeError: CUDA out of memory
|
| 282 |
+
```
|
| 283 |
+
**Solution**: Reduce batch size or use CPU:
|
| 284 |
+
```python
|
| 285 |
+
DEVICE = torch.device('cpu')
|
| 286 |
+
```
|
| 287 |
+
|
| 288 |
+
### Port already in use
|
| 289 |
+
```
|
| 290 |
+
OSError: [Errno 98] Address already in use
|
| 291 |
+
```
|
| 292 |
+
**Solution**: Kill the existing process or use a different port:
|
| 293 |
+
```bash
|
| 294 |
+
lsof -i :8000 # Find process
|
| 295 |
+
kill -9 <PID> # Kill it
|
| 296 |
+
```
|
| 297 |
+
|
| 298 |
+
### Static files not serving
|
| 299 |
+
**Solution**: Ensure the client directory exists and contains index.html:
|
| 300 |
+
```bash
|
| 301 |
+
ls -la client/
|
| 302 |
+
```
|
| 303 |
+
|
| 304 |
+
---
|
| 305 |
+
|
| 306 |
+
## 📄 License
|
| 307 |
+
|
| 308 |
+
This project is for **research and educational purposes only**.
|
| 309 |
+
⚠️ **Medical Disclaimer**: This tool should not be used as a substitute for professional medical advice, diagnosis, or treatment.
|
| 310 |
+
|
| 311 |
+
---
|
| 312 |
+
|
| 313 |
+
## 👥 Contributing
|
| 314 |
+
|
| 315 |
+
1. Fork the repository
|
| 316 |
+
2. Create a feature branch
|
| 317 |
+
3. Make your changes
|
| 318 |
+
4. Submit a pull request
|
| 319 |
+
|
| 320 |
+
---
|
| 321 |
+
|
| 322 |
+
## 📞 Support
|
| 323 |
+
|
| 324 |
+
For issues and questions, please open a GitHub issue.
|
| 325 |
+
|
| 326 |
+
---
|
| 327 |
+
|
| 328 |
+
*Version 1.0.0 | Built with FastAPI, PyTorch & ❤️*
|
README_AI.md
ADDED
|
@@ -0,0 +1,511 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# DermAI - AI Model Documentation
|
| 2 |
+
## Skin Lesion Segmentation using Attention U-Net
|
| 3 |
+
|
| 4 |
+
This document provides a comprehensive overview of the AI/Deep Learning components used in the DermAI skin cancer lesion segmentation application.
|
| 5 |
+
|
| 6 |
+
---
|
| 7 |
+
|
| 8 |
+
## Table of Contents
|
| 9 |
+
1. [Overview](#overview)
|
| 10 |
+
2. [Model Architecture](#model-architecture)
|
| 11 |
+
3. [Dataset](#dataset)
|
| 12 |
+
4. [Training Pipeline](#training-pipeline)
|
| 13 |
+
5. [Loss Functions](#loss-functions)
|
| 14 |
+
6. [Data Augmentation](#data-augmentation)
|
| 15 |
+
7. [Inference Pipeline](#inference-pipeline)
|
| 16 |
+
8. [Performance Metrics](#performance-metrics)
|
| 17 |
+
9. [Technical Specifications](#technical-specifications)
|
| 18 |
+
|
| 19 |
+
---
|
| 20 |
+
|
| 21 |
+
## Overview
|
| 22 |
+
|
| 23 |
+
The AI system in DermAI is designed for **semantic segmentation** of skin lesions in dermoscopic images. The goal is to precisely identify and delineate the boundaries of potentially cancerous skin lesions, assisting dermatologists in their diagnostic workflow.
|
| 24 |
+
|
| 25 |
+
### Key Features
|
| 26 |
+
- **Encoder-Decoder Architecture**: U-Net style architecture with skip connections
|
| 27 |
+
- **Transfer Learning**: MobileNetV2 pre-trained on ImageNet as the encoder
|
| 28 |
+
- **Attention Mechanism**: Attention gates to focus on lesion regions
|
| 29 |
+
- **Deep Supervision**: Multi-scale loss for better gradient flow
|
| 30 |
+
- **Boundary Refinement**: Dedicated head for precise edge detection
|
| 31 |
+
|
| 32 |
+
---
|
| 33 |
+
|
| 34 |
+
## Model Architecture
|
| 35 |
+
|
| 36 |
+
### 1. Encoder: MobileNetV2
|
| 37 |
+
|
| 38 |
+
The encoder uses **MobileNetV2** pre-trained on ImageNet, which provides:
|
| 39 |
+
|
| 40 |
+
- **Efficient Feature Extraction**: Inverted residual blocks with linear bottlenecks
|
| 41 |
+
- **Depthwise Separable Convolutions**: Reduces computational cost
|
| 42 |
+
- **Pre-trained Weights**: Transfer learning from ImageNet for robust feature representations
|
| 43 |
+
|
| 44 |
+
#### Architecture Details:
|
| 45 |
+
```
|
| 46 |
+
Input: 256 × 256 × 3 (RGB image)
|
| 47 |
+
├── Layer 0-1: 16 channels (skip connection 1)
|
| 48 |
+
├── Layer 2-3: 24 channels (skip connection 2)
|
| 49 |
+
├── Layer 4-6: 32 channels (skip connection 3)
|
| 50 |
+
├── Layer 7-13: 96 channels (skip connection 4)
|
| 51 |
+
└── Layer 14-18: 1280 channels (bottleneck)
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
### 2. Decoder: U-Net Style with Skip Connections
|
| 55 |
+
|
| 56 |
+
The decoder progressively upsamples the feature maps while integrating skip connections from the encoder:
|
| 57 |
+
|
| 58 |
+
```
|
| 59 |
+
Bottleneck (1280 channels)
|
| 60 |
+
↓ ConvTranspose2d
|
| 61 |
+
Decoder Stage 1 (256 channels) + Skip[4] with Attention Gate
|
| 62 |
+
↓ ConvTranspose2d
|
| 63 |
+
Decoder Stage 2 (128 channels) + Skip[3] with Attention Gate
|
| 64 |
+
↓ ConvTranspose2d
|
| 65 |
+
Decoder Stage 3 (64 channels) + Skip[2] with Attention Gate
|
| 66 |
+
↓ ConvTranspose2d
|
| 67 |
+
Decoder Stage 4 (32 channels) + Skip[1] with Attention Gate
|
| 68 |
+
↓ ConvTranspose2d
|
| 69 |
+
Final Stage (16 channels)
|
| 70 |
+
↓ Conv2d + Sigmoid
|
| 71 |
+
Output: 256 × 256 × 1 (binary mask)
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
### 3. Attention Gates
|
| 75 |
+
|
| 76 |
+
Attention gates are applied to each skip connection to help the decoder focus on relevant features:
|
| 77 |
+
|
| 78 |
+
```python
|
| 79 |
+
class AttentionGate(nn.Module):
|
| 80 |
+
"""
|
| 81 |
+
Args:
|
| 82 |
+
F_g: Channels in gating signal (from decoder)
|
| 83 |
+
F_l: Channels in skip connection (from encoder)
|
| 84 |
+
F_int: Intermediate channels
|
| 85 |
+
|
| 86 |
+
Operation:
|
| 87 |
+
1. Project gating signal: W_g(g) → features
|
| 88 |
+
2. Project skip connection: W_x(x) → features
|
| 89 |
+
3. Combine: ReLU(W_g + W_x)
|
| 90 |
+
4. Generate attention map: Sigmoid(psi)
|
| 91 |
+
5. Apply attention: x * attention_map
|
| 92 |
+
"""
|
| 93 |
+
```
|
| 94 |
+
|
| 95 |
+
The attention mechanism learns to:
|
| 96 |
+
- **Highlight lesion regions**: Focus on areas with suspicious features
|
| 97 |
+
- **Suppress background**: Ignore hair, skin texture, and other noise
|
| 98 |
+
- **Improve boundary detection**: Pay attention to lesion edges
|
| 99 |
+
|
| 100 |
+
### 4. Boundary Refinement Head
|
| 101 |
+
|
| 102 |
+
A dedicated convolutional head specifically for refining lesion boundaries:
|
| 103 |
+
|
| 104 |
+
```
|
| 105 |
+
Input: 16 channels (from decoder)
|
| 106 |
+
↓ Conv2d(16 → 64) + ReLU
|
| 107 |
+
↓ Conv2d(64 → 64) + ReLU
|
| 108 |
+
↓ Conv2d(64 → 1) + Sigmoid
|
| 109 |
+
Output: Boundary-refined mask
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
The final output is the average of the main segmentation output and the boundary-refined output.
|
| 113 |
+
|
| 114 |
+
### 5. Deep Supervision (Training Only)
|
| 115 |
+
|
| 116 |
+
During training, auxiliary outputs are generated at multiple decoder scales:
|
| 117 |
+
|
| 118 |
+
| Stage | Resolution | Weight |
|
| 119 |
+
|-------|------------|--------|
|
| 120 |
+
| Decoder 1 | 16×16 | 0.5 |
|
| 121 |
+
| Decoder 2 | 32×32 | 0.3 |
|
| 122 |
+
| Decoder 3 | 64×64 | 0.2 |
|
| 123 |
+
| Decoder 4 | 128×128 | 0.1 |
|
| 124 |
+
| Final | 256×256 | 1.0 |
|
| 125 |
+
|
| 126 |
+
Benefits:
|
| 127 |
+
- Better gradient flow to early decoder layers
|
| 128 |
+
- Faster convergence
|
| 129 |
+
- Improved feature learning at multiple scales
|
| 130 |
+
|
| 131 |
+
---
|
| 132 |
+
|
| 133 |
+
## Dataset
|
| 134 |
+
|
| 135 |
+
### ISIC 2018 Challenge Dataset
|
| 136 |
+
|
| 137 |
+
The model is trained on the **International Skin Imaging Collaboration (ISIC) 2018** dataset:
|
| 138 |
+
|
| 139 |
+
| Split | Images | Purpose |
|
| 140 |
+
|-------|--------|---------|
|
| 141 |
+
| Training | 2595 | Model training |
|
| 142 |
+
| Validation | 100 | Hyperparameter tuning |
|
| 143 |
+
| Test | 1000 | Final evaluation |
|
| 144 |
+
|
| 145 |
+
### Data Characteristics
|
| 146 |
+
- **Image Type**: Dermoscopic images of skin lesions
|
| 147 |
+
- **Resolution**: Variable (resized to 256×256 for training)
|
| 148 |
+
- **Annotations**: Binary segmentation masks (pixel-level)
|
| 149 |
+
- **Lesion Types**: Melanoma, nevus, seborrheic keratosis, etc.
|
| 150 |
+
|
| 151 |
+
---
|
| 152 |
+
|
| 153 |
+
## Training Pipeline
|
| 154 |
+
|
| 155 |
+
### 1. Training Loop
|
| 156 |
+
|
| 157 |
+
```python
|
| 158 |
+
for epoch in range(EPOCHS):
|
| 159 |
+
# Phase 1: Frozen encoder (first 5 epochs)
|
| 160 |
+
if epoch < ENCODER_FREEZE_EPOCHS:
|
| 161 |
+
# Only train decoder layers
|
| 162 |
+
frozen_layers = 7 # First 7 encoder layers frozen
|
| 163 |
+
else:
|
| 164 |
+
# Phase 2: Full fine-tuning
|
| 165 |
+
unfreeze_encoder()
|
| 166 |
+
|
| 167 |
+
# Train epoch
|
| 168 |
+
for batch in train_loader:
|
| 169 |
+
optimizer.zero_grad()
|
| 170 |
+
|
| 171 |
+
# Forward pass with deep supervision
|
| 172 |
+
main_output, ds_outputs = model(images)
|
| 173 |
+
|
| 174 |
+
# Compute combined loss
|
| 175 |
+
loss = deep_supervision_loss(main_output, ds_outputs, masks)
|
| 176 |
+
|
| 177 |
+
loss.backward()
|
| 178 |
+
optimizer.step()
|
| 179 |
+
|
| 180 |
+
# Validate and update LR scheduler
|
| 181 |
+
scheduler.step(val_loss)
|
| 182 |
+
|
| 183 |
+
# Early stopping check
|
| 184 |
+
if no_improvement_for(patience=7):
|
| 185 |
+
break
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
### 2. Hyperparameters
|
| 189 |
+
|
| 190 |
+
| Parameter | Value | Description |
|
| 191 |
+
|-----------|-------|-------------|
|
| 192 |
+
| Image Size | 256×256 | Input resolution |
|
| 193 |
+
| Batch Size | 32 | Training batch size |
|
| 194 |
+
| Learning Rate | 1e-4 | Initial learning rate |
|
| 195 |
+
| Optimizer | Adam | Optimizer algorithm |
|
| 196 |
+
| Epochs | 50 | Maximum epochs |
|
| 197 |
+
| Early Stopping Patience | 7 | Epochs without improvement |
|
| 198 |
+
| Encoder Freeze Epochs | 5 | Epochs with frozen encoder |
|
| 199 |
+
|
| 200 |
+
### 3. Learning Rate Scheduler
|
| 201 |
+
|
| 202 |
+
```python
|
| 203 |
+
scheduler = ReduceLROnPlateau(
|
| 204 |
+
optimizer,
|
| 205 |
+
mode='min', # Reduce when val_loss stops decreasing
|
| 206 |
+
patience=3, # Wait 3 epochs before reducing
|
| 207 |
+
factor=0.5, # Reduce LR by half
|
| 208 |
+
min_lr=1e-7 # Minimum learning rate
|
| 209 |
+
)
|
| 210 |
+
```
|
| 211 |
+
|
| 212 |
+
---
|
| 213 |
+
|
| 214 |
+
## Loss Functions
|
| 215 |
+
|
| 216 |
+
### Combined Loss Function
|
| 217 |
+
|
| 218 |
+
The model is trained using a weighted combination of three loss functions:
|
| 219 |
+
|
| 220 |
+
```
|
| 221 |
+
Total Loss = 0.3 × BCE + 0.4 × Dice + 0.3 × Tversky
|
| 222 |
+
```
|
| 223 |
+
|
| 224 |
+
### 1. Binary Cross-Entropy (BCE) Loss
|
| 225 |
+
|
| 226 |
+
Measures pixel-wise classification accuracy:
|
| 227 |
+
|
| 228 |
+
```
|
| 229 |
+
BCE = -1/N Σ [y·log(p) + (1-y)·log(1-p)]
|
| 230 |
+
```
|
| 231 |
+
|
| 232 |
+
**Purpose**: Ensures accurate per-pixel predictions
|
| 233 |
+
|
| 234 |
+
### 2. Dice Loss
|
| 235 |
+
|
| 236 |
+
Measures overlap between prediction and ground truth:
|
| 237 |
+
|
| 238 |
+
```
|
| 239 |
+
Dice = 2·|P ∩ G| / (|P| + |G|)
|
| 240 |
+
Dice Loss = 1 - Dice
|
| 241 |
+
```
|
| 242 |
+
|
| 243 |
+
**Purpose**: Handles class imbalance, optimizes for segmentation quality
|
| 244 |
+
|
| 245 |
+
### 3. Tversky Loss
|
| 246 |
+
|
| 247 |
+
A generalization of Dice loss with asymmetric penalization:
|
| 248 |
+
|
| 249 |
+
```
|
| 250 |
+
Tversky = TP / (TP + α·FN + β·FP)
|
| 251 |
+
Tversky Loss = 1 - Tversky
|
| 252 |
+
```
|
| 253 |
+
|
| 254 |
+
Configuration:
|
| 255 |
+
- **α = 0.7**: Higher penalty for False Negatives (missing lesion parts)
|
| 256 |
+
- **β = 0.3**: Lower penalty for False Positives
|
| 257 |
+
|
| 258 |
+
**Purpose**: Addresses class imbalance by penalizing missed lesions more than false alarms
|
| 259 |
+
|
| 260 |
+
### Deep Supervision Loss
|
| 261 |
+
|
| 262 |
+
For multi-scale outputs during training:
|
| 263 |
+
|
| 264 |
+
```
|
| 265 |
+
Total_DS = L_main + Σ(wi × L_i)
|
| 266 |
+
|
| 267 |
+
Where:
|
| 268 |
+
- L_main: Loss on final output (weight = 1.0)
|
| 269 |
+
- L_i: Loss on auxiliary output i
|
| 270 |
+
- wi: Weight for scale i (0.5, 0.3, 0.2, 0.1)
|
| 271 |
+
```
|
| 272 |
+
|
| 273 |
+
---
|
| 274 |
+
|
| 275 |
+
## Data Augmentation
|
| 276 |
+
|
| 277 |
+
The training pipeline uses **Albumentations** for advanced data augmentation:
|
| 278 |
+
|
| 279 |
+
### Geometric Augmentations
|
| 280 |
+
|
| 281 |
+
| Augmentation | Parameters | Probability |
|
| 282 |
+
|--------------|------------|-------------|
|
| 283 |
+
| HorizontalFlip | - | 0.5 |
|
| 284 |
+
| VerticalFlip | - | 0.5 |
|
| 285 |
+
| RandomRotate90 | - | 0.5 |
|
| 286 |
+
| Affine | translate=±10%, scale=0.8-1.2, rotate=±45° | 0.5 |
|
| 287 |
+
| ElasticTransform | alpha=120, sigma=6 | 0.3 |
|
| 288 |
+
|
| 289 |
+
**Rationale**: Skin lesions can appear at any orientation; dermoscopy captures images from various angles.
|
| 290 |
+
|
| 291 |
+
### Color Augmentations
|
| 292 |
+
|
| 293 |
+
| Augmentation | Parameters | Probability |
|
| 294 |
+
|--------------|------------|-------------|
|
| 295 |
+
| ColorJitter | brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1 | 0.5 |
|
| 296 |
+
| RandomBrightnessContrast | limit=0.2 | 0.5 |
|
| 297 |
+
| HueSaturationValue | hue=±10, sat=±20, val=±10 | 0.3 |
|
| 298 |
+
|
| 299 |
+
**Rationale**: Handles variations in camera settings, lighting conditions, and skin tones.
|
| 300 |
+
|
| 301 |
+
### Noise & Blur Augmentations
|
| 302 |
+
|
| 303 |
+
| Augmentation | Parameters | Probability |
|
| 304 |
+
|--------------|------------|-------------|
|
| 305 |
+
| GaussianBlur | blur_limit=3-5 | 0.2 |
|
| 306 |
+
| GaussNoise | std=0.02-0.1 | 0.2 |
|
| 307 |
+
|
| 308 |
+
**Rationale**: Simulates image quality variations and camera noise.
|
| 309 |
+
|
| 310 |
+
### Cutout/Dropout
|
| 311 |
+
|
| 312 |
+
| Augmentation | Parameters | Probability |
|
| 313 |
+
|--------------|------------|-------------|
|
| 314 |
+
| CoarseDropout | 1-8 holes, size=16-32 pixels | 0.5 |
|
| 315 |
+
|
| 316 |
+
**Rationale**: Forces the model to learn from partial information, improving robustness to occlusions (hair, rulers, artifacts).
|
| 317 |
+
|
| 318 |
+
### Normalization
|
| 319 |
+
|
| 320 |
+
All images are normalized using ImageNet statistics:
|
| 321 |
+
```python
|
| 322 |
+
mean = (0.485, 0.456, 0.406)
|
| 323 |
+
std = (0.229, 0.224, 0.225)
|
| 324 |
+
```
|
| 325 |
+
|
| 326 |
+
---
|
| 327 |
+
|
| 328 |
+
## Inference Pipeline
|
| 329 |
+
|
| 330 |
+
### Preprocessing
|
| 331 |
+
|
| 332 |
+
```python
|
| 333 |
+
def preprocess_image(image):
|
| 334 |
+
# 1. Convert to RGB
|
| 335 |
+
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 336 |
+
|
| 337 |
+
# 2. Resize to 256×256
|
| 338 |
+
image_resized = cv2.resize(image_rgb, (256, 256))
|
| 339 |
+
|
| 340 |
+
# 3. Normalize with ImageNet stats
|
| 341 |
+
image_normalized = (image_resized / 255.0 - mean) / std
|
| 342 |
+
|
| 343 |
+
# 4. Convert to tensor [1, 3, 256, 256]
|
| 344 |
+
tensor = torch.from_numpy(image_normalized).permute(2, 0, 1).unsqueeze(0)
|
| 345 |
+
|
| 346 |
+
return tensor.float()
|
| 347 |
+
```
|
| 348 |
+
|
| 349 |
+
### Inference
|
| 350 |
+
|
| 351 |
+
```python
|
| 352 |
+
def predict(model, image_tensor):
|
| 353 |
+
model.eval()
|
| 354 |
+
with torch.no_grad():
|
| 355 |
+
output = model(image_tensor) # [1, 1, 256, 256]
|
| 356 |
+
mask = output.cpu().numpy().squeeze() # [256, 256]
|
| 357 |
+
return mask
|
| 358 |
+
```
|
| 359 |
+
|
| 360 |
+
### Postprocessing
|
| 361 |
+
|
| 362 |
+
```python
|
| 363 |
+
def postprocess_mask(mask, original_size):
|
| 364 |
+
# 1. Apply threshold
|
| 365 |
+
binary_mask = (mask > 0.5).astype(np.float32)
|
| 366 |
+
|
| 367 |
+
# 2. Resize to original image size
|
| 368 |
+
mask_resized = cv2.resize(binary_mask,
|
| 369 |
+
(original_size[1], original_size[0]),
|
| 370 |
+
interpolation=cv2.INTER_LINEAR)
|
| 371 |
+
|
| 372 |
+
return mask_resized
|
| 373 |
+
```
|
| 374 |
+
|
| 375 |
+
### Metrics Calculation
|
| 376 |
+
|
| 377 |
+
```python
|
| 378 |
+
# Confidence Score: Mean probability of lesion pixels
|
| 379 |
+
confidence = np.mean(mask[mask > 0.5]) if np.any(mask > 0.5) else 0.0
|
| 380 |
+
|
| 381 |
+
# Lesion Area: Percentage of image covered by lesion
|
| 382 |
+
lesion_area = np.mean(mask > 0.5) * 100
|
| 383 |
+
```
|
| 384 |
+
|
| 385 |
+
---
|
| 386 |
+
|
| 387 |
+
## Performance Metrics
|
| 388 |
+
|
| 389 |
+
### Primary Metrics
|
| 390 |
+
|
| 391 |
+
| Metric | Description | Target |
|
| 392 |
+
|--------|-------------|--------|
|
| 393 |
+
| **Dice Coefficient** | Overlap between prediction and ground truth | > 0.85 |
|
| 394 |
+
| **Accuracy** | Per-pixel classification accuracy | > 0.95 |
|
| 395 |
+
| **IoU (Jaccard)** | Intersection over Union | > 0.80 |
|
| 396 |
+
|
| 397 |
+
### Dice Coefficient Calculation
|
| 398 |
+
|
| 399 |
+
```python
|
| 400 |
+
def dice_coefficient(pred, target, threshold=0.5):
|
| 401 |
+
pred_binary = (pred > threshold).float()
|
| 402 |
+
target_binary = (target > threshold).float()
|
| 403 |
+
|
| 404 |
+
intersection = (pred_binary * target_binary).sum()
|
| 405 |
+
dice = (2 * intersection) / (pred_binary.sum() + target_binary.sum())
|
| 406 |
+
|
| 407 |
+
return dice.item()
|
| 408 |
+
```
|
| 409 |
+
|
| 410 |
+
### Expected Performance
|
| 411 |
+
|
| 412 |
+
### Actual Training Results (Epoch 39)
|
| 413 |
+
|
| 414 |
+
| Split | Dice | Accuracy | Loss |
|
| 415 |
+
|-------|------|----------|------|
|
| 416 |
+
| Validation | 0.8864 | 0.9405 | 0.1470 |
|
| 417 |
+
|
| 418 |
+
*Model achieved best validation loss at Epoch 39.*
|
| 419 |
+
|
| 420 |
+
---
|
| 421 |
+
|
| 422 |
+
## Technical Specifications
|
| 423 |
+
|
| 424 |
+
### Model Size
|
| 425 |
+
|
| 426 |
+
| Component | Parameters |
|
| 427 |
+
|-----------|------------|
|
| 428 |
+
| MobileNetV2 Encoder | ~3.4M |
|
| 429 |
+
| Attention Gates | ~0.2M |
|
| 430 |
+
| Decoder | ~2.0M |
|
| 431 |
+
| Boundary Head | ~0.1M |
|
| 432 |
+
| **Total Parameters** | **5,637,018** |
|
| 433 |
+
| **Trainable Parameters** | **5,581,530** |
|
| 434 |
+
|
| 435 |
+
### Computational Requirements
|
| 436 |
+
|
| 437 |
+
| Metric | Value |
|
| 438 |
+
|--------|-------|
|
| 439 |
+
| Input Size | 256 × 256 × 3 |
|
| 440 |
+
| FLOPs | ~1.2 GFLOPs |
|
| 441 |
+
| GPU Memory (Inference) | ~500 MB |
|
| 442 |
+
| Inference Time (GPU) | ~15 ms |
|
| 443 |
+
| Inference Time (CPU) | ~200 ms |
|
| 444 |
+
|
| 445 |
+
### Dependencies
|
| 446 |
+
|
| 447 |
+
| Library | Version | Purpose |
|
| 448 |
+
|---------|---------|---------|
|
| 449 |
+
| PyTorch | ≥2.0.0 | Deep learning framework |
|
| 450 |
+
| torchvision | ≥0.15.0 | Pre-trained models |
|
| 451 |
+
| Albumentations | ≥1.3.0 | Data augmentation |
|
| 452 |
+
| OpenCV | ≥4.8.0 | Image processing |
|
| 453 |
+
| NumPy | ≥1.24.0 | Numerical operations |
|
| 454 |
+
|
| 455 |
+
---
|
| 456 |
+
|
| 457 |
+
## Model Files
|
| 458 |
+
|
| 459 |
+
### Required Files
|
| 460 |
+
|
| 461 |
+
```
|
| 462 |
+
models/
|
| 463 |
+
└── model.pth # Trained model weights (~23 MB)
|
| 464 |
+
```
|
| 465 |
+
|
| 466 |
+
### Loading the Model
|
| 467 |
+
|
| 468 |
+
```python
|
| 469 |
+
from server.main import AttentionUNetMobileNet
|
| 470 |
+
|
| 471 |
+
# Initialize model
|
| 472 |
+
model = AttentionUNetMobileNet(deep_supervision=False)
|
| 473 |
+
|
| 474 |
+
# Load trained weights
|
| 475 |
+
model.load_state_dict(
|
| 476 |
+
torch.load('models/model.pth', map_location='cpu'),
|
| 477 |
+
strict=False
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
# Set to evaluation mode
|
| 481 |
+
model.eval()
|
| 482 |
+
```
|
| 483 |
+
|
| 484 |
+
---
|
| 485 |
+
|
| 486 |
+
## Future Improvements
|
| 487 |
+
|
| 488 |
+
1. **Multi-class Segmentation**: Extend to classify lesion types
|
| 489 |
+
2. **Ensemble Models**: Combine multiple architectures for robustness
|
| 490 |
+
3. **Test-Time Augmentation**: Average predictions over augmented inputs
|
| 491 |
+
4. **Model Quantization**: Reduce model size for mobile deployment
|
| 492 |
+
5. **Uncertainty Estimation**: Provide confidence intervals for predictions
|
| 493 |
+
|
| 494 |
+
---
|
| 495 |
+
|
| 496 |
+
## References
|
| 497 |
+
|
| 498 |
+
1. **U-Net**: Ronneberger, O., Fischer, P., & Brox, T. (2015). U-Net: Convolutional Networks for Biomedical Image Segmentation.
|
| 499 |
+
2. **Attention U-Net**: Oktay, O., et al. (2018). Attention U-Net: Learning Where to Look for the Pancreas.
|
| 500 |
+
3. **MobileNetV2**: Sandler, M., et al. (2018). MobileNetV2: Inverted Residuals and Linear Bottlenecks.
|
| 501 |
+
4. **ISIC 2018**: Codella, N., et al. (2018). Skin Lesion Analysis Toward Melanoma Detection 2018.
|
| 502 |
+
5. **Tversky Loss**: Salehi, S.S.M., et al. (2017). Tversky Loss Function for Image Segmentation.
|
| 503 |
+
|
| 504 |
+
---
|
| 505 |
+
|
| 506 |
+
## License
|
| 507 |
+
|
| 508 |
+
This AI model is for **research and educational purposes only**. It should not be used as a substitute for professional medical diagnosis.
|
| 509 |
+
|
| 510 |
+
---
|
| 511 |
+
|
client/app.js
ADDED
|
@@ -0,0 +1,445 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* DermAI - Skin Lesion Segmentation Application
|
| 3 |
+
* Frontend JavaScript
|
| 4 |
+
* ============================================
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
// ===== DOM ELEMENTS =====
|
| 8 |
+
const uploadArea = document.getElementById('upload-area');
|
| 9 |
+
const fileInput = document.getElementById('file-input');
|
| 10 |
+
const uploadBtn = document.getElementById('upload-btn');
|
| 11 |
+
const resultsSection = document.getElementById('results-section');
|
| 12 |
+
const loadingOverlay = document.getElementById('loading-overlay');
|
| 13 |
+
const statusBadge = document.getElementById('status-badge');
|
| 14 |
+
|
| 15 |
+
// Result elements
|
| 16 |
+
const originalImage = document.getElementById('original-image');
|
| 17 |
+
const maskImage = document.getElementById('mask-image');
|
| 18 |
+
const overlayImage = document.getElementById('overlay-image');
|
| 19 |
+
const confidenceValue = document.getElementById('confidence-value');
|
| 20 |
+
const areaValue = document.getElementById('area-value');
|
| 21 |
+
|
| 22 |
+
// Action buttons
|
| 23 |
+
const newAnalysisBtn = document.getElementById('new-analysis-btn');
|
| 24 |
+
const downloadBtn = document.getElementById('download-btn');
|
| 25 |
+
|
| 26 |
+
// ===== CONFIGURATION =====
|
| 27 |
+
const API_BASE_URL = window.location.origin;
|
| 28 |
+
|
| 29 |
+
// ===== STATE =====
|
| 30 |
+
let currentResults = null;
|
| 31 |
+
let currentOriginalImage = null;
|
| 32 |
+
|
| 33 |
+
// ===== INITIALIZATION =====
|
| 34 |
+
document.addEventListener('DOMContentLoaded', () => {
|
| 35 |
+
initializeEventListeners();
|
| 36 |
+
checkAPIHealth();
|
| 37 |
+
});
|
| 38 |
+
|
| 39 |
+
function initializeEventListeners() {
|
| 40 |
+
// Upload area click
|
| 41 |
+
uploadArea.addEventListener('click', (e) => {
|
| 42 |
+
if (e.target !== uploadBtn) {
|
| 43 |
+
fileInput.click();
|
| 44 |
+
}
|
| 45 |
+
});
|
| 46 |
+
|
| 47 |
+
// Upload button click
|
| 48 |
+
uploadBtn.addEventListener('click', (e) => {
|
| 49 |
+
e.stopPropagation();
|
| 50 |
+
fileInput.click();
|
| 51 |
+
});
|
| 52 |
+
|
| 53 |
+
// File input change
|
| 54 |
+
fileInput.addEventListener('change', handleFileSelect);
|
| 55 |
+
|
| 56 |
+
// Drag and drop
|
| 57 |
+
uploadArea.addEventListener('dragover', handleDragOver);
|
| 58 |
+
uploadArea.addEventListener('dragleave', handleDragLeave);
|
| 59 |
+
uploadArea.addEventListener('drop', handleDrop);
|
| 60 |
+
|
| 61 |
+
// Action buttons
|
| 62 |
+
newAnalysisBtn.addEventListener('click', resetAnalysis);
|
| 63 |
+
downloadBtn.addEventListener('click', downloadResults);
|
| 64 |
+
|
| 65 |
+
// Smooth scroll for nav links
|
| 66 |
+
document.querySelectorAll('.nav-link').forEach(link => {
|
| 67 |
+
link.addEventListener('click', (e) => {
|
| 68 |
+
const href = link.getAttribute('href');
|
| 69 |
+
if (href.startsWith('#')) {
|
| 70 |
+
e.preventDefault();
|
| 71 |
+
const target = document.querySelector(href);
|
| 72 |
+
if (target) {
|
| 73 |
+
target.scrollIntoView({ behavior: 'smooth' });
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
});
|
| 77 |
+
});
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
// ===== API HEALTH CHECK =====
|
| 81 |
+
async function checkAPIHealth() {
|
| 82 |
+
try {
|
| 83 |
+
const response = await fetch(`${API_BASE_URL}/api/health`);
|
| 84 |
+
const data = await response.json();
|
| 85 |
+
|
| 86 |
+
if (data.status === 'healthy') {
|
| 87 |
+
statusBadge.classList.add('online');
|
| 88 |
+
statusBadge.classList.remove('offline');
|
| 89 |
+
statusBadge.querySelector('.status-text').textContent =
|
| 90 |
+
data.model_loaded ? 'Model Ready' : 'API Online';
|
| 91 |
+
} else {
|
| 92 |
+
throw new Error('API not healthy');
|
| 93 |
+
}
|
| 94 |
+
} catch (error) {
|
| 95 |
+
console.error('API health check failed:', error);
|
| 96 |
+
statusBadge.classList.add('offline');
|
| 97 |
+
statusBadge.classList.remove('online');
|
| 98 |
+
statusBadge.querySelector('.status-text').textContent = 'Offline';
|
| 99 |
+
}
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
// ===== DRAG AND DROP HANDLERS =====
|
| 103 |
+
function handleDragOver(e) {
|
| 104 |
+
e.preventDefault();
|
| 105 |
+
e.stopPropagation();
|
| 106 |
+
uploadArea.classList.add('dragover');
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
function handleDragLeave(e) {
|
| 110 |
+
e.preventDefault();
|
| 111 |
+
e.stopPropagation();
|
| 112 |
+
uploadArea.classList.remove('dragover');
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
function handleDrop(e) {
|
| 116 |
+
e.preventDefault();
|
| 117 |
+
e.stopPropagation();
|
| 118 |
+
uploadArea.classList.remove('dragover');
|
| 119 |
+
|
| 120 |
+
const files = e.dataTransfer.files;
|
| 121 |
+
if (files.length > 0) {
|
| 122 |
+
processFile(files[0]);
|
| 123 |
+
}
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
// ===== FILE HANDLING =====
|
| 127 |
+
function handleFileSelect(e) {
|
| 128 |
+
const files = e.target.files;
|
| 129 |
+
if (files.length > 0) {
|
| 130 |
+
processFile(files[0]);
|
| 131 |
+
}
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
function processFile(file) {
|
| 135 |
+
// Validate file type
|
| 136 |
+
const validTypes = ['image/jpeg', 'image/png', 'image/webp'];
|
| 137 |
+
if (!validTypes.includes(file.type)) {
|
| 138 |
+
showNotification('Invalid file type. Please upload JPEG, PNG, or WebP images.', 'error');
|
| 139 |
+
return;
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
// Validate file size (max 10MB)
|
| 143 |
+
const maxSize = 10 * 1024 * 1024;
|
| 144 |
+
if (file.size > maxSize) {
|
| 145 |
+
showNotification('File too large. Maximum size is 10MB.', 'error');
|
| 146 |
+
return;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
// Store original image for download
|
| 150 |
+
const reader = new FileReader();
|
| 151 |
+
reader.onload = (e) => {
|
| 152 |
+
currentOriginalImage = e.target.result;
|
| 153 |
+
originalImage.src = currentOriginalImage;
|
| 154 |
+
};
|
| 155 |
+
reader.readAsDataURL(file);
|
| 156 |
+
|
| 157 |
+
// Send to API
|
| 158 |
+
analyzeImage(file);
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
// ===== API COMMUNICATION =====
|
| 162 |
+
async function analyzeImage(file) {
|
| 163 |
+
showLoading(true);
|
| 164 |
+
|
| 165 |
+
try {
|
| 166 |
+
const formData = new FormData();
|
| 167 |
+
formData.append('file', file);
|
| 168 |
+
|
| 169 |
+
const response = await fetch(`${API_BASE_URL}/api/segment`, {
|
| 170 |
+
method: 'POST',
|
| 171 |
+
body: formData
|
| 172 |
+
});
|
| 173 |
+
|
| 174 |
+
const data = await response.json();
|
| 175 |
+
|
| 176 |
+
if (data.success) {
|
| 177 |
+
currentResults = data;
|
| 178 |
+
displayResults(data);
|
| 179 |
+
} else {
|
| 180 |
+
throw new Error(data.message || 'Analysis failed');
|
| 181 |
+
}
|
| 182 |
+
} catch (error) {
|
| 183 |
+
console.error('Analysis error:', error);
|
| 184 |
+
showNotification(`Analysis failed: ${error.message}`, 'error');
|
| 185 |
+
} finally {
|
| 186 |
+
showLoading(false);
|
| 187 |
+
}
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
// ===== RESULTS DISPLAY =====
|
| 191 |
+
function displayResults(data) {
|
| 192 |
+
// Set images
|
| 193 |
+
maskImage.src = `data:image/png;base64,${data.mask_base64}`;
|
| 194 |
+
overlayImage.src = `data:image/png;base64,${data.overlay_base64}`;
|
| 195 |
+
|
| 196 |
+
// Set metrics
|
| 197 |
+
confidenceValue.textContent = `${data.confidence}%`;
|
| 198 |
+
areaValue.textContent = `${data.lesion_area_percent}%`;
|
| 199 |
+
|
| 200 |
+
// Apply color coding to confidence
|
| 201 |
+
if (data.confidence >= 80) {
|
| 202 |
+
confidenceValue.style.color = 'var(--success-400)';
|
| 203 |
+
} else if (data.confidence >= 50) {
|
| 204 |
+
confidenceValue.style.color = '#fbbf24';
|
| 205 |
+
} else {
|
| 206 |
+
confidenceValue.style.color = '#f87171';
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
// Show results section
|
| 210 |
+
resultsSection.classList.remove('hidden');
|
| 211 |
+
|
| 212 |
+
// Scroll to results
|
| 213 |
+
resultsSection.scrollIntoView({ behavior: 'smooth', block: 'start' });
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
// ===== RESET ANALYSIS =====
|
| 217 |
+
function resetAnalysis() {
|
| 218 |
+
// Hide results
|
| 219 |
+
resultsSection.classList.add('hidden');
|
| 220 |
+
|
| 221 |
+
// Clear images
|
| 222 |
+
originalImage.src = '';
|
| 223 |
+
maskImage.src = '';
|
| 224 |
+
overlayImage.src = '';
|
| 225 |
+
|
| 226 |
+
// Reset metrics
|
| 227 |
+
confidenceValue.textContent = '--';
|
| 228 |
+
areaValue.textContent = '--';
|
| 229 |
+
confidenceValue.style.color = '';
|
| 230 |
+
|
| 231 |
+
// Clear state
|
| 232 |
+
currentResults = null;
|
| 233 |
+
currentOriginalImage = null;
|
| 234 |
+
|
| 235 |
+
// Reset file input
|
| 236 |
+
fileInput.value = '';
|
| 237 |
+
|
| 238 |
+
// Scroll to upload
|
| 239 |
+
uploadArea.scrollIntoView({ behavior: 'smooth', block: 'center' });
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
// ===== DOWNLOAD RESULTS =====
|
| 243 |
+
function downloadResults() {
|
| 244 |
+
if (!currentResults || !currentOriginalImage) {
|
| 245 |
+
showNotification('No results to download', 'error');
|
| 246 |
+
return;
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
// Create canvas to composite results
|
| 250 |
+
const canvas = document.createElement('canvas');
|
| 251 |
+
const ctx = canvas.getContext('2d');
|
| 252 |
+
|
| 253 |
+
// Load all images
|
| 254 |
+
const images = {
|
| 255 |
+
original: new Image(),
|
| 256 |
+
mask: new Image(),
|
| 257 |
+
overlay: new Image()
|
| 258 |
+
};
|
| 259 |
+
|
| 260 |
+
images.original.src = currentOriginalImage;
|
| 261 |
+
images.mask.src = `data:image/png;base64,${currentResults.mask_base64}`;
|
| 262 |
+
images.overlay.src = `data:image/png;base64,${currentResults.overlay_base64}`;
|
| 263 |
+
|
| 264 |
+
// Wait for all images to load
|
| 265 |
+
Promise.all([
|
| 266 |
+
new Promise(resolve => images.original.onload = resolve),
|
| 267 |
+
new Promise(resolve => images.mask.onload = resolve),
|
| 268 |
+
new Promise(resolve => images.overlay.onload = resolve)
|
| 269 |
+
]).then(() => {
|
| 270 |
+
// Calculate dimensions
|
| 271 |
+
const imgWidth = images.original.width;
|
| 272 |
+
const imgHeight = images.original.height;
|
| 273 |
+
const padding = 20;
|
| 274 |
+
const headerHeight = 60;
|
| 275 |
+
const footerHeight = 80;
|
| 276 |
+
|
| 277 |
+
canvas.width = imgWidth * 3 + padding * 4;
|
| 278 |
+
canvas.height = imgHeight + headerHeight + footerHeight + padding * 2;
|
| 279 |
+
|
| 280 |
+
// Background
|
| 281 |
+
ctx.fillStyle = '#0a0a0f';
|
| 282 |
+
ctx.fillRect(0, 0, canvas.width, canvas.height);
|
| 283 |
+
|
| 284 |
+
// Header
|
| 285 |
+
ctx.fillStyle = '#fafafa';
|
| 286 |
+
ctx.font = 'bold 24px Inter, sans-serif';
|
| 287 |
+
ctx.textAlign = 'center';
|
| 288 |
+
ctx.fillText('DermAI - Skin Lesion Analysis Results', canvas.width / 2, 40);
|
| 289 |
+
|
| 290 |
+
// Images
|
| 291 |
+
const y = headerHeight + padding;
|
| 292 |
+
|
| 293 |
+
// Original
|
| 294 |
+
ctx.drawImage(images.original, padding, y, imgWidth, imgHeight);
|
| 295 |
+
ctx.fillStyle = '#a1a1aa';
|
| 296 |
+
ctx.font = '14px Inter, sans-serif';
|
| 297 |
+
ctx.fillText('Original', padding + imgWidth / 2, y - 10);
|
| 298 |
+
|
| 299 |
+
// Mask
|
| 300 |
+
ctx.drawImage(images.mask, imgWidth + padding * 2, y, imgWidth, imgHeight);
|
| 301 |
+
ctx.fillText('Segmentation Mask', imgWidth + padding * 2 + imgWidth / 2, y - 10);
|
| 302 |
+
|
| 303 |
+
// Overlay
|
| 304 |
+
ctx.drawImage(images.overlay, imgWidth * 2 + padding * 3, y, imgWidth, imgHeight);
|
| 305 |
+
ctx.fillText('Lesion Overlay', imgWidth * 2 + padding * 3 + imgWidth / 2, y - 10);
|
| 306 |
+
|
| 307 |
+
// Footer with metrics
|
| 308 |
+
const footerY = y + imgHeight + padding + 20;
|
| 309 |
+
ctx.fillStyle = '#fafafa';
|
| 310 |
+
ctx.font = 'bold 16px Inter, sans-serif';
|
| 311 |
+
ctx.textAlign = 'center';
|
| 312 |
+
ctx.fillText(
|
| 313 |
+
`Confidence: ${currentResults.confidence}% | Lesion Area: ${currentResults.lesion_area_percent}% | Model: Attention U-Net`,
|
| 314 |
+
canvas.width / 2,
|
| 315 |
+
footerY
|
| 316 |
+
);
|
| 317 |
+
|
| 318 |
+
ctx.fillStyle = '#71717a';
|
| 319 |
+
ctx.font = '12px Inter, sans-serif';
|
| 320 |
+
ctx.fillText(
|
| 321 |
+
`Generated: ${new Date().toLocaleString()} | DermAI - For research purposes only`,
|
| 322 |
+
canvas.width / 2,
|
| 323 |
+
footerY + 25
|
| 324 |
+
);
|
| 325 |
+
|
| 326 |
+
// Download
|
| 327 |
+
const link = document.createElement('a');
|
| 328 |
+
link.download = `dermai-analysis-${Date.now()}.png`;
|
| 329 |
+
link.href = canvas.toDataURL('image/png');
|
| 330 |
+
link.click();
|
| 331 |
+
|
| 332 |
+
showNotification('Results downloaded successfully!', 'success');
|
| 333 |
+
});
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
// ===== UTILITY FUNCTIONS =====
|
| 337 |
+
function showLoading(show) {
|
| 338 |
+
if (show) {
|
| 339 |
+
loadingOverlay.classList.remove('hidden');
|
| 340 |
+
document.body.style.overflow = 'hidden';
|
| 341 |
+
} else {
|
| 342 |
+
loadingOverlay.classList.add('hidden');
|
| 343 |
+
document.body.style.overflow = '';
|
| 344 |
+
}
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
function showNotification(message, type = 'info') {
|
| 348 |
+
// Create notification element
|
| 349 |
+
const notification = document.createElement('div');
|
| 350 |
+
notification.className = `notification notification-${type}`;
|
| 351 |
+
notification.innerHTML = `
|
| 352 |
+
<span class="notification-icon">${type === 'success' ? '✓' : type === 'error' ? '✗' : 'ℹ'}</span>
|
| 353 |
+
<span class="notification-message">${message}</span>
|
| 354 |
+
`;
|
| 355 |
+
|
| 356 |
+
// Add styles
|
| 357 |
+
Object.assign(notification.style, {
|
| 358 |
+
position: 'fixed',
|
| 359 |
+
bottom: '20px',
|
| 360 |
+
right: '20px',
|
| 361 |
+
padding: '1rem 1.5rem',
|
| 362 |
+
background: type === 'success' ? 'rgba(34, 197, 94, 0.9)' :
|
| 363 |
+
type === 'error' ? 'rgba(239, 68, 68, 0.9)' :
|
| 364 |
+
'rgba(99, 102, 241, 0.9)',
|
| 365 |
+
color: 'white',
|
| 366 |
+
borderRadius: '12px',
|
| 367 |
+
boxShadow: '0 10px 40px rgba(0, 0, 0, 0.3)',
|
| 368 |
+
display: 'flex',
|
| 369 |
+
alignItems: 'center',
|
| 370 |
+
gap: '0.75rem',
|
| 371 |
+
fontSize: '0.9rem',
|
| 372 |
+
fontWeight: '500',
|
| 373 |
+
zIndex: '9999',
|
| 374 |
+
animation: 'slideIn 0.3s ease',
|
| 375 |
+
backdropFilter: 'blur(10px)'
|
| 376 |
+
});
|
| 377 |
+
|
| 378 |
+
// Add animation keyframes
|
| 379 |
+
const style = document.createElement('style');
|
| 380 |
+
style.textContent = `
|
| 381 |
+
@keyframes slideIn {
|
| 382 |
+
from {
|
| 383 |
+
opacity: 0;
|
| 384 |
+
transform: translateX(100px);
|
| 385 |
+
}
|
| 386 |
+
to {
|
| 387 |
+
opacity: 1;
|
| 388 |
+
transform: translateX(0);
|
| 389 |
+
}
|
| 390 |
+
}
|
| 391 |
+
@keyframes slideOut {
|
| 392 |
+
from {
|
| 393 |
+
opacity: 1;
|
| 394 |
+
transform: translateX(0);
|
| 395 |
+
}
|
| 396 |
+
to {
|
| 397 |
+
opacity: 0;
|
| 398 |
+
transform: translateX(100px);
|
| 399 |
+
}
|
| 400 |
+
}
|
| 401 |
+
`;
|
| 402 |
+
document.head.appendChild(style);
|
| 403 |
+
|
| 404 |
+
document.body.appendChild(notification);
|
| 405 |
+
|
| 406 |
+
// Auto remove after 4 seconds
|
| 407 |
+
setTimeout(() => {
|
| 408 |
+
notification.style.animation = 'slideOut 0.3s ease';
|
| 409 |
+
setTimeout(() => {
|
| 410 |
+
notification.remove();
|
| 411 |
+
}, 300);
|
| 412 |
+
}, 4000);
|
| 413 |
+
}
|
| 414 |
+
|
| 415 |
+
// ===== KEYBOARD SHORTCUTS =====
|
| 416 |
+
document.addEventListener('keydown', (e) => {
|
| 417 |
+
// Ctrl/Cmd + O to open file
|
| 418 |
+
if ((e.ctrlKey || e.metaKey) && e.key === 'o') {
|
| 419 |
+
e.preventDefault();
|
| 420 |
+
fileInput.click();
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
// Ctrl/Cmd + S to download results
|
| 424 |
+
if ((e.ctrlKey || e.metaKey) && e.key === 's' && currentResults) {
|
| 425 |
+
e.preventDefault();
|
| 426 |
+
downloadResults();
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
// Escape to reset
|
| 430 |
+
if (e.key === 'Escape' && !resultsSection.classList.contains('hidden')) {
|
| 431 |
+
resetAnalysis();
|
| 432 |
+
}
|
| 433 |
+
});
|
| 434 |
+
|
| 435 |
+
// ===== WINDOW EVENTS =====
|
| 436 |
+
// Prevent accidental page leave when results are present
|
| 437 |
+
window.addEventListener('beforeunload', (e) => {
|
| 438 |
+
if (currentResults) {
|
| 439 |
+
e.preventDefault();
|
| 440 |
+
e.returnValue = '';
|
| 441 |
+
}
|
| 442 |
+
});
|
| 443 |
+
|
| 444 |
+
// Re-check API health periodically
|
| 445 |
+
setInterval(checkAPIHealth, 30000);
|
client/index.html
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
|
| 4 |
+
<head>
|
| 5 |
+
<meta charset="UTF-8">
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 7 |
+
<meta name="description"
|
| 8 |
+
content="DermAI - Advanced skin lesion segmentation using deep learning. Analyze dermoscopic images for potential skin cancer lesions.">
|
| 9 |
+
<meta name="keywords" content="skin cancer, dermoscopy, AI, deep learning, medical imaging, lesion segmentation">
|
| 10 |
+
<title>DermAI | AI-Powered Skin Lesion Analysis</title>
|
| 11 |
+
|
| 12 |
+
<!-- Google Fonts -->
|
| 13 |
+
<link rel="preconnect" href="https://fonts.googleapis.com">
|
| 14 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
| 15 |
+
<link
|
| 16 |
+
href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800&family=JetBrains+Mono:wght@400;500&display=swap"
|
| 17 |
+
rel="stylesheet">
|
| 18 |
+
|
| 19 |
+
<!-- Styles -->
|
| 20 |
+
<link rel="stylesheet" href="styles.css">
|
| 21 |
+
</head>
|
| 22 |
+
|
| 23 |
+
<body>
|
| 24 |
+
<!-- Gradient Background -->
|
| 25 |
+
<div class="gradient-bg"></div>
|
| 26 |
+
<div class="gradient-orbs">
|
| 27 |
+
<div class="orb orb-1"></div>
|
| 28 |
+
<div class="orb orb-2"></div>
|
| 29 |
+
<div class="orb orb-3"></div>
|
| 30 |
+
</div>
|
| 31 |
+
|
| 32 |
+
<!-- Main Container -->
|
| 33 |
+
<div class="app-container">
|
| 34 |
+
<!-- Header -->
|
| 35 |
+
<header class="header">
|
| 36 |
+
<div class="logo">
|
| 37 |
+
<div class="logo-icon">
|
| 38 |
+
<svg viewBox="0 0 40 40" fill="none" xmlns="http://www.w3.org/2000/svg">
|
| 39 |
+
<circle cx="20" cy="20" r="18" stroke="url(#gradient1)" stroke-width="2" />
|
| 40 |
+
<circle cx="20" cy="20" r="8" fill="url(#gradient1)" />
|
| 41 |
+
<path d="M20 2C20 2 25 10 25 20C25 30 20 38 20 38" stroke="url(#gradient1)" stroke-width="2"
|
| 42 |
+
stroke-linecap="round" />
|
| 43 |
+
<path d="M20 2C20 2 15 10 15 20C15 30 20 38 20 38" stroke="url(#gradient1)" stroke-width="2"
|
| 44 |
+
stroke-linecap="round" />
|
| 45 |
+
<defs>
|
| 46 |
+
<linearGradient id="gradient1" x1="0" y1="0" x2="40" y2="40">
|
| 47 |
+
<stop offset="0%" stop-color="#6366f1" />
|
| 48 |
+
<stop offset="100%" stop-color="#ec4899" />
|
| 49 |
+
</linearGradient>
|
| 50 |
+
</defs>
|
| 51 |
+
</svg>
|
| 52 |
+
</div>
|
| 53 |
+
<span class="logo-text">Derm<span class="highlight">AI</span></span>
|
| 54 |
+
</div>
|
| 55 |
+
<nav class="nav">
|
| 56 |
+
<a href="#" class="nav-link active">Analysis</a>
|
| 57 |
+
<a href="#about" class="nav-link">About</a>
|
| 58 |
+
<a href="#technology" class="nav-link">Technology</a>
|
| 59 |
+
</nav>
|
| 60 |
+
<div class="status-badge" id="status-badge">
|
| 61 |
+
<span class="status-dot"></span>
|
| 62 |
+
<span class="status-text">Checking...</span>
|
| 63 |
+
</div>
|
| 64 |
+
</header>
|
| 65 |
+
|
| 66 |
+
<!-- Main Content -->
|
| 67 |
+
<main class="main-content">
|
| 68 |
+
<!-- Hero Section -->
|
| 69 |
+
<section class="hero">
|
| 70 |
+
<div class="hero-content">
|
| 71 |
+
<h1 class="hero-title">
|
| 72 |
+
AI-Powered <span class="gradient-text">Skin Lesion</span> Analysis
|
| 73 |
+
</h1>
|
| 74 |
+
<p class="hero-subtitle">
|
| 75 |
+
Upload dermoscopic images for instant lesion segmentation using our
|
| 76 |
+
state-of-the-art Attention U-Net deep learning model.
|
| 77 |
+
</p>
|
| 78 |
+
</div>
|
| 79 |
+
</section>
|
| 80 |
+
|
| 81 |
+
<!-- Upload Section -->
|
| 82 |
+
<section class="upload-section">
|
| 83 |
+
<div class="upload-card" id="upload-area">
|
| 84 |
+
<div class="upload-icon">
|
| 85 |
+
<svg viewBox="0 0 64 64" fill="none" xmlns="http://www.w3.org/2000/svg">
|
| 86 |
+
<rect x="8" y="16" width="48" height="36" rx="4" stroke="currentColor" stroke-width="2" />
|
| 87 |
+
<circle cx="24" cy="28" r="4" stroke="currentColor" stroke-width="2" />
|
| 88 |
+
<path d="M8 44L24 32L36 40L56 28" stroke="currentColor" stroke-width="2"
|
| 89 |
+
stroke-linecap="round" stroke-linejoin="round" />
|
| 90 |
+
<path d="M32 8V16M28 12L32 8L36 12" stroke="currentColor" stroke-width="2"
|
| 91 |
+
stroke-linecap="round" stroke-linejoin="round" />
|
| 92 |
+
</svg>
|
| 93 |
+
</div>
|
| 94 |
+
<h3 class="upload-title">Drop your dermoscopic image here</h3>
|
| 95 |
+
<p class="upload-subtitle">or click to browse • JPEG, PNG, WebP supported</p>
|
| 96 |
+
<input type="file" id="file-input" accept="image/jpeg,image/png,image/webp" hidden>
|
| 97 |
+
<button class="upload-btn" id="upload-btn">
|
| 98 |
+
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 99 |
+
<path d="M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4" />
|
| 100 |
+
<polyline points="17,8 12,3 7,8" />
|
| 101 |
+
<line x1="12" y1="3" x2="12" y2="15" />
|
| 102 |
+
</svg>
|
| 103 |
+
Select Image
|
| 104 |
+
</button>
|
| 105 |
+
</div>
|
| 106 |
+
</section>
|
| 107 |
+
|
| 108 |
+
<!-- Results Section (Hidden by default) -->
|
| 109 |
+
<section class="results-section hidden" id="results-section">
|
| 110 |
+
<div class="results-grid">
|
| 111 |
+
<!-- Original Image -->
|
| 112 |
+
<div class="result-card">
|
| 113 |
+
<div class="card-header">
|
| 114 |
+
<span class="card-icon">📷</span>
|
| 115 |
+
<h3 class="card-title">Original Image</h3>
|
| 116 |
+
</div>
|
| 117 |
+
<div class="image-container">
|
| 118 |
+
<img id="original-image" src="" alt="Original dermoscopic image">
|
| 119 |
+
</div>
|
| 120 |
+
</div>
|
| 121 |
+
|
| 122 |
+
<!-- Segmentation Mask -->
|
| 123 |
+
<div class="result-card">
|
| 124 |
+
<div class="card-header">
|
| 125 |
+
<span class="card-icon">🎯</span>
|
| 126 |
+
<h3 class="card-title">Segmentation Mask</h3>
|
| 127 |
+
</div>
|
| 128 |
+
<div class="image-container">
|
| 129 |
+
<img id="mask-image" src="" alt="Segmentation mask">
|
| 130 |
+
</div>
|
| 131 |
+
</div>
|
| 132 |
+
|
| 133 |
+
<!-- Overlay -->
|
| 134 |
+
<div class="result-card overlay-card">
|
| 135 |
+
<div class="card-header">
|
| 136 |
+
<span class="card-icon">🔬</span>
|
| 137 |
+
<h3 class="card-title">Lesion Overlay</h3>
|
| 138 |
+
</div>
|
| 139 |
+
<div class="image-container">
|
| 140 |
+
<img id="overlay-image" src="" alt="Overlay visualization">
|
| 141 |
+
</div>
|
| 142 |
+
</div>
|
| 143 |
+
</div>
|
| 144 |
+
|
| 145 |
+
<!-- Metrics Panel -->
|
| 146 |
+
<div class="metrics-panel">
|
| 147 |
+
<div class="metric-card">
|
| 148 |
+
<div class="metric-icon confidence-icon">
|
| 149 |
+
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 150 |
+
<path d="M22 11.08V12a10 10 0 1 1-5.93-9.14" />
|
| 151 |
+
<polyline points="22,4 12,14.01 9,11.01" />
|
| 152 |
+
</svg>
|
| 153 |
+
</div>
|
| 154 |
+
<div class="metric-content">
|
| 155 |
+
<span class="metric-value" id="confidence-value">--</span>
|
| 156 |
+
<span class="metric-label">Confidence Score</span>
|
| 157 |
+
</div>
|
| 158 |
+
</div>
|
| 159 |
+
<div class="metric-card">
|
| 160 |
+
<div class="metric-icon area-icon">
|
| 161 |
+
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 162 |
+
<rect x="3" y="3" width="18" height="18" rx="2" />
|
| 163 |
+
<circle cx="12" cy="12" r="4" />
|
| 164 |
+
</svg>
|
| 165 |
+
</div>
|
| 166 |
+
<div class="metric-content">
|
| 167 |
+
<span class="metric-value" id="area-value">--</span>
|
| 168 |
+
<span class="metric-label">Lesion Area</span>
|
| 169 |
+
</div>
|
| 170 |
+
</div>
|
| 171 |
+
<div class="metric-card">
|
| 172 |
+
<div class="metric-icon model-icon">
|
| 173 |
+
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 174 |
+
<polygon points="12,2 2,7 12,12 22,7" />
|
| 175 |
+
<polyline points="2,17 12,22 22,17" />
|
| 176 |
+
<polyline points="2,12 12,17 22,12" />
|
| 177 |
+
</svg>
|
| 178 |
+
</div>
|
| 179 |
+
<div class="metric-content">
|
| 180 |
+
<span class="metric-value model-name">Attention U-Net</span>
|
| 181 |
+
<span class="metric-label">Model Architecture</span>
|
| 182 |
+
</div>
|
| 183 |
+
</div>
|
| 184 |
+
</div>
|
| 185 |
+
|
| 186 |
+
<!-- Action Buttons -->
|
| 187 |
+
<div class="action-buttons">
|
| 188 |
+
<button class="btn btn-secondary" id="new-analysis-btn">
|
| 189 |
+
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 190 |
+
<path d="M23 4v6h-6" />
|
| 191 |
+
<path d="M20.49 15a9 9 0 1 1-2.12-9.36L23 10" />
|
| 192 |
+
</svg>
|
| 193 |
+
New Analysis
|
| 194 |
+
</button>
|
| 195 |
+
<button class="btn btn-primary" id="download-btn">
|
| 196 |
+
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 197 |
+
<path d="M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4" />
|
| 198 |
+
<polyline points="7,10 12,15 17,10" />
|
| 199 |
+
<line x1="12" y1="15" x2="12" y2="3" />
|
| 200 |
+
</svg>
|
| 201 |
+
Download Results
|
| 202 |
+
</button>
|
| 203 |
+
</div>
|
| 204 |
+
</section>
|
| 205 |
+
|
| 206 |
+
<!-- Loading State -->
|
| 207 |
+
<div class="loading-overlay hidden" id="loading-overlay">
|
| 208 |
+
<div class="loading-content">
|
| 209 |
+
<div class="loading-spinner">
|
| 210 |
+
<div class="spinner-ring"></div>
|
| 211 |
+
<div class="spinner-ring"></div>
|
| 212 |
+
<div class="spinner-ring"></div>
|
| 213 |
+
</div>
|
| 214 |
+
<p class="loading-text">Analyzing image...</p>
|
| 215 |
+
<p class="loading-subtext">Running AI segmentation model</p>
|
| 216 |
+
</div>
|
| 217 |
+
</div>
|
| 218 |
+
</main>
|
| 219 |
+
|
| 220 |
+
<!-- About Section -->
|
| 221 |
+
<section class="info-section" id="about">
|
| 222 |
+
<div class="section-header">
|
| 223 |
+
<h2 class="section-title">About <span class="gradient-text">DermAI</span></h2>
|
| 224 |
+
<p class="section-subtitle">Empowering healthcare professionals with AI-assisted skin cancer detection
|
| 225 |
+
</p>
|
| 226 |
+
</div>
|
| 227 |
+
<div class="features-grid">
|
| 228 |
+
<div class="feature-card">
|
| 229 |
+
<div class="feature-icon">
|
| 230 |
+
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 231 |
+
<circle cx="12" cy="12" r="3" />
|
| 232 |
+
<path
|
| 233 |
+
d="M19.4 15a1.65 1.65 0 0 0 .33 1.82l.06.06a2 2 0 0 1 0 2.83 2 2 0 0 1-2.83 0l-.06-.06a1.65 1.65 0 0 0-1.82-.33 1.65 1.65 0 0 0-1 1.51V21a2 2 0 0 1-2 2 2 2 0 0 1-2-2v-.09A1.65 1.65 0 0 0 9 19.4a1.65 1.65 0 0 0-1.82.33l-.06.06a2 2 0 0 1-2.83 0 2 2 0 0 1 0-2.83l.06-.06a1.65 1.65 0 0 0 .33-1.82 1.65 1.65 0 0 0-1.51-1H3a2 2 0 0 1-2-2 2 2 0 0 1 2-2h.09A1.65 1.65 0 0 0 4.6 9a1.65 1.65 0 0 0-.33-1.82l-.06-.06a2 2 0 0 1 0-2.83 2 2 0 0 1 2.83 0l.06.06a1.65 1.65 0 0 0 1.82.33H9a1.65 1.65 0 0 0 1-1.51V3a2 2 0 0 1 2-2 2 2 0 0 1 2 2v.09a1.65 1.65 0 0 0 1 1.51 1.65 1.65 0 0 0 1.82-.33l.06-.06a2 2 0 0 1 2.83 0 2 2 0 0 1 0 2.83l-.06.06a1.65 1.65 0 0 0-.33 1.82V9a1.65 1.65 0 0 0 1.51 1H21a2 2 0 0 1 2 2 2 2 0 0 1-2 2h-.09a1.65 1.65 0 0 0-1.51 1z" />
|
| 234 |
+
</svg>
|
| 235 |
+
</div>
|
| 236 |
+
<h3>Advanced AI</h3>
|
| 237 |
+
<p>Powered by Attention U-Net with MobileNetV2 encoder, trained on the ISIC 2018 dataset.</p>
|
| 238 |
+
</div>
|
| 239 |
+
<div class="feature-card">
|
| 240 |
+
<div class="feature-icon">
|
| 241 |
+
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 242 |
+
<path d="M13 2L3 14h9l-1 8 10-12h-9l1-8z" />
|
| 243 |
+
</svg>
|
| 244 |
+
</div>
|
| 245 |
+
<h3>Real-time Analysis</h3>
|
| 246 |
+
<p>Get instant segmentation results with confidence scores and lesion area metrics.</p>
|
| 247 |
+
</div>
|
| 248 |
+
<div class="feature-card">
|
| 249 |
+
<div class="feature-icon">
|
| 250 |
+
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 251 |
+
<path d="M12 22s8-4 8-10V5l-8-3-8 3v7c0 6 8 10 8 10z" />
|
| 252 |
+
</svg>
|
| 253 |
+
</div>
|
| 254 |
+
<h3>Privacy First</h3>
|
| 255 |
+
<p>All processing happens locally. Your medical images never leave your environment.</p>
|
| 256 |
+
</div>
|
| 257 |
+
</div>
|
| 258 |
+
</section>
|
| 259 |
+
|
| 260 |
+
<!-- Technology Section -->
|
| 261 |
+
<section class="info-section" id="technology">
|
| 262 |
+
<div class="section-header">
|
| 263 |
+
<h2 class="section-title">The <span class="gradient-text">Technology</span></h2>
|
| 264 |
+
<p class="section-subtitle">State-of-the-art deep learning architecture for medical image segmentation
|
| 265 |
+
</p>
|
| 266 |
+
</div>
|
| 267 |
+
<div class="tech-grid">
|
| 268 |
+
<div class="tech-card">
|
| 269 |
+
<div class="tech-header">
|
| 270 |
+
<span class="tech-badge">Encoder</span>
|
| 271 |
+
<h3>MobileNetV2</h3>
|
| 272 |
+
</div>
|
| 273 |
+
<p>Pre-trained on ImageNet, providing efficient feature extraction with inverted residuals and
|
| 274 |
+
linear bottlenecks.</p>
|
| 275 |
+
</div>
|
| 276 |
+
<div class="tech-card">
|
| 277 |
+
<div class="tech-header">
|
| 278 |
+
<span class="tech-badge">Architecture</span>
|
| 279 |
+
<h3>Attention U-Net</h3>
|
| 280 |
+
</div>
|
| 281 |
+
<p>Skip connections enhanced with attention gates to focus on lesion regions while suppressing
|
| 282 |
+
irrelevant background features.</p>
|
| 283 |
+
</div>
|
| 284 |
+
<div class="tech-card">
|
| 285 |
+
<div class="tech-header">
|
| 286 |
+
<span class="tech-badge">Loss Function</span>
|
| 287 |
+
<h3>Combined Loss</h3>
|
| 288 |
+
</div>
|
| 289 |
+
<p>Optimized with BCE, Dice, and Tversky losses to handle class imbalance and improve boundary
|
| 290 |
+
precision.</p>
|
| 291 |
+
</div>
|
| 292 |
+
<div class="tech-card">
|
| 293 |
+
<div class="tech-header">
|
| 294 |
+
<span class="tech-badge">Training</span>
|
| 295 |
+
<h3>Deep Supervision</h3>
|
| 296 |
+
</div>
|
| 297 |
+
<p>Multi-scale loss computation at different decoder levels for better gradient flow and feature
|
| 298 |
+
learning.</p>
|
| 299 |
+
</div>
|
| 300 |
+
</div>
|
| 301 |
+
</section>
|
| 302 |
+
|
| 303 |
+
<!-- Disclaimer -->
|
| 304 |
+
<div class="disclaimer">
|
| 305 |
+
<div class="disclaimer-icon">⚠️</div>
|
| 306 |
+
<p><strong>Medical Disclaimer:</strong> This tool is for research and educational purposes only.
|
| 307 |
+
It should not be used as a substitute for professional medical advice, diagnosis, or treatment.
|
| 308 |
+
Always consult a qualified healthcare provider for medical concerns.</p>
|
| 309 |
+
</div>
|
| 310 |
+
|
| 311 |
+
<!-- Footer -->
|
| 312 |
+
<footer class="footer">
|
| 313 |
+
<p>© 2026 raid • Powered by PyTorch & FastAPI</p>
|
| 314 |
+
<p class="footer-tech">Built with Attention U-Net • Trained on ISIC 2018 Dataset</p>
|
| 315 |
+
</footer>
|
| 316 |
+
</div>
|
| 317 |
+
|
| 318 |
+
<!-- Scripts -->
|
| 319 |
+
<script src="app.js"></script>
|
| 320 |
+
</body>
|
| 321 |
+
|
| 322 |
+
</html>
|
client/styles.css
ADDED
|
@@ -0,0 +1,975 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* DermAI - Skin Lesion Segmentation Application
|
| 3 |
+
* Modern Medical Theme Stylesheet
|
| 4 |
+
* ============================================
|
| 5 |
+
*/
|
| 6 |
+
|
| 7 |
+
/* ===== CSS VARIABLES ===== */
|
| 8 |
+
:root {
|
| 9 |
+
/* Primary Colors - Medical Purple/Blue */
|
| 10 |
+
--primary-50: #f5f3ff;
|
| 11 |
+
--primary-100: #ede9fe;
|
| 12 |
+
--primary-200: #ddd6fe;
|
| 13 |
+
--primary-300: #c4b5fd;
|
| 14 |
+
--primary-400: #a78bfa;
|
| 15 |
+
--primary-500: #8b5cf6;
|
| 16 |
+
--primary-600: #7c3aed;
|
| 17 |
+
--primary-700: #6d28d9;
|
| 18 |
+
--primary-800: #5b21b6;
|
| 19 |
+
--primary-900: #4c1d95;
|
| 20 |
+
|
| 21 |
+
/* Accent Colors - Medical Pink/Magenta */
|
| 22 |
+
--accent-400: #f472b6;
|
| 23 |
+
--accent-500: #ec4899;
|
| 24 |
+
--accent-600: #db2777;
|
| 25 |
+
|
| 26 |
+
/* Success/Health Green */
|
| 27 |
+
--success-400: #4ade80;
|
| 28 |
+
--success-500: #22c55e;
|
| 29 |
+
--success-600: #16a34a;
|
| 30 |
+
|
| 31 |
+
/* Neutral Colors */
|
| 32 |
+
--neutral-50: #fafafa;
|
| 33 |
+
--neutral-100: #f4f4f5;
|
| 34 |
+
--neutral-200: #e4e4e7;
|
| 35 |
+
--neutral-300: #d4d4d8;
|
| 36 |
+
--neutral-400: #a1a1aa;
|
| 37 |
+
--neutral-500: #71717a;
|
| 38 |
+
--neutral-600: #52525b;
|
| 39 |
+
--neutral-700: #3f3f46;
|
| 40 |
+
--neutral-800: #27272a;
|
| 41 |
+
--neutral-900: #18181b;
|
| 42 |
+
--neutral-950: #09090b;
|
| 43 |
+
|
| 44 |
+
/* Semantic Colors */
|
| 45 |
+
--bg-primary: #0a0a0f;
|
| 46 |
+
--bg-secondary: #111118;
|
| 47 |
+
--bg-tertiary: #18181f;
|
| 48 |
+
--bg-card: rgba(24, 24, 31, 0.7);
|
| 49 |
+
--bg-card-hover: rgba(30, 30, 40, 0.8);
|
| 50 |
+
|
| 51 |
+
--text-primary: #fafafa;
|
| 52 |
+
--text-secondary: #a1a1aa;
|
| 53 |
+
--text-muted: #71717a;
|
| 54 |
+
|
| 55 |
+
/* Gradients */
|
| 56 |
+
--gradient-primary: linear-gradient(135deg, #6366f1 0%, #8b5cf6 50%, #ec4899 100%);
|
| 57 |
+
--gradient-accent: linear-gradient(135deg, #ec4899 0%, #f472b6 100%);
|
| 58 |
+
--gradient-success: linear-gradient(135deg, #22c55e 0%, #4ade80 100%);
|
| 59 |
+
--gradient-glass: linear-gradient(135deg, rgba(255,255,255,0.1) 0%, rgba(255,255,255,0.05) 100%);
|
| 60 |
+
|
| 61 |
+
/* Shadows */
|
| 62 |
+
--shadow-sm: 0 1px 2px rgba(0, 0, 0, 0.3);
|
| 63 |
+
--shadow-md: 0 4px 6px -1px rgba(0, 0, 0, 0.4), 0 2px 4px -2px rgba(0, 0, 0, 0.3);
|
| 64 |
+
--shadow-lg: 0 10px 15px -3px rgba(0, 0, 0, 0.4), 0 4px 6px -4px rgba(0, 0, 0, 0.3);
|
| 65 |
+
--shadow-xl: 0 20px 25px -5px rgba(0, 0, 0, 0.4), 0 8px 10px -6px rgba(0, 0, 0, 0.3);
|
| 66 |
+
--shadow-glow: 0 0 40px rgba(139, 92, 246, 0.3);
|
| 67 |
+
--shadow-glow-accent: 0 0 40px rgba(236, 72, 153, 0.3);
|
| 68 |
+
|
| 69 |
+
/* Borders */
|
| 70 |
+
--border-primary: 1px solid rgba(255, 255, 255, 0.1);
|
| 71 |
+
--border-accent: 1px solid rgba(139, 92, 246, 0.3);
|
| 72 |
+
|
| 73 |
+
/* Border Radius */
|
| 74 |
+
--radius-sm: 0.375rem;
|
| 75 |
+
--radius-md: 0.5rem;
|
| 76 |
+
--radius-lg: 0.75rem;
|
| 77 |
+
--radius-xl: 1rem;
|
| 78 |
+
--radius-2xl: 1.5rem;
|
| 79 |
+
--radius-full: 9999px;
|
| 80 |
+
|
| 81 |
+
/* Transitions */
|
| 82 |
+
--transition-fast: 150ms ease;
|
| 83 |
+
--transition-base: 250ms ease;
|
| 84 |
+
--transition-slow: 350ms ease;
|
| 85 |
+
--transition-spring: 300ms cubic-bezier(0.34, 1.56, 0.64, 1);
|
| 86 |
+
|
| 87 |
+
/* Typography */
|
| 88 |
+
--font-sans: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
| 89 |
+
--font-mono: 'JetBrains Mono', 'Fira Code', monospace;
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
/* ===== RESET & BASE ===== */
|
| 93 |
+
*, *::before, *::after {
|
| 94 |
+
box-sizing: border-box;
|
| 95 |
+
margin: 0;
|
| 96 |
+
padding: 0;
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
html {
|
| 100 |
+
scroll-behavior: smooth;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
body {
|
| 104 |
+
font-family: var(--font-sans);
|
| 105 |
+
background: var(--bg-primary);
|
| 106 |
+
color: var(--text-primary);
|
| 107 |
+
line-height: 1.6;
|
| 108 |
+
min-height: 100vh;
|
| 109 |
+
overflow-x: hidden;
|
| 110 |
+
-webkit-font-smoothing: antialiased;
|
| 111 |
+
-moz-osx-font-smoothing: grayscale;
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
a {
|
| 115 |
+
color: inherit;
|
| 116 |
+
text-decoration: none;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
button {
|
| 120 |
+
font-family: inherit;
|
| 121 |
+
cursor: pointer;
|
| 122 |
+
border: none;
|
| 123 |
+
background: none;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
img {
|
| 127 |
+
max-width: 100%;
|
| 128 |
+
height: auto;
|
| 129 |
+
display: block;
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
/* ===== ANIMATED BACKGROUND ===== */
|
| 133 |
+
.gradient-bg {
|
| 134 |
+
position: fixed;
|
| 135 |
+
top: 0;
|
| 136 |
+
left: 0;
|
| 137 |
+
width: 100%;
|
| 138 |
+
height: 100%;
|
| 139 |
+
background:
|
| 140 |
+
radial-gradient(ellipse at 20% 20%, rgba(99, 102, 241, 0.15) 0%, transparent 50%),
|
| 141 |
+
radial-gradient(ellipse at 80% 80%, rgba(236, 72, 153, 0.1) 0%, transparent 50%),
|
| 142 |
+
radial-gradient(ellipse at 50% 50%, rgba(139, 92, 246, 0.05) 0%, transparent 70%);
|
| 143 |
+
pointer-events: none;
|
| 144 |
+
z-index: -2;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
.gradient-orbs {
|
| 148 |
+
position: fixed;
|
| 149 |
+
top: 0;
|
| 150 |
+
left: 0;
|
| 151 |
+
width: 100%;
|
| 152 |
+
height: 100%;
|
| 153 |
+
pointer-events: none;
|
| 154 |
+
z-index: -1;
|
| 155 |
+
overflow: hidden;
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
.orb {
|
| 159 |
+
position: absolute;
|
| 160 |
+
border-radius: 50%;
|
| 161 |
+
filter: blur(80px);
|
| 162 |
+
opacity: 0.4;
|
| 163 |
+
animation: float 20s ease-in-out infinite;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
.orb-1 {
|
| 167 |
+
width: 500px;
|
| 168 |
+
height: 500px;
|
| 169 |
+
background: var(--primary-600);
|
| 170 |
+
top: -200px;
|
| 171 |
+
right: -100px;
|
| 172 |
+
animation-delay: 0s;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
.orb-2 {
|
| 176 |
+
width: 400px;
|
| 177 |
+
height: 400px;
|
| 178 |
+
background: var(--accent-500);
|
| 179 |
+
bottom: -150px;
|
| 180 |
+
left: -100px;
|
| 181 |
+
animation-delay: -7s;
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
.orb-3 {
|
| 185 |
+
width: 300px;
|
| 186 |
+
height: 300px;
|
| 187 |
+
background: var(--primary-500);
|
| 188 |
+
top: 50%;
|
| 189 |
+
left: 50%;
|
| 190 |
+
transform: translate(-50%, -50%);
|
| 191 |
+
animation-delay: -14s;
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
@keyframes float {
|
| 195 |
+
0%, 100% { transform: translate(0, 0) scale(1); }
|
| 196 |
+
25% { transform: translate(30px, -30px) scale(1.05); }
|
| 197 |
+
50% { transform: translate(-20px, 20px) scale(0.95); }
|
| 198 |
+
75% { transform: translate(-30px, -20px) scale(1.02); }
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
/* ===== APP CONTAINER ===== */
|
| 202 |
+
.app-container {
|
| 203 |
+
max-width: 1400px;
|
| 204 |
+
margin: 0 auto;
|
| 205 |
+
padding: 0 2rem;
|
| 206 |
+
min-height: 100vh;
|
| 207 |
+
display: flex;
|
| 208 |
+
flex-direction: column;
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
/* ===== HEADER ===== */
|
| 212 |
+
.header {
|
| 213 |
+
display: flex;
|
| 214 |
+
align-items: center;
|
| 215 |
+
justify-content: space-between;
|
| 216 |
+
padding: 1.5rem 0;
|
| 217 |
+
border-bottom: var(--border-primary);
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
.logo {
|
| 221 |
+
display: flex;
|
| 222 |
+
align-items: center;
|
| 223 |
+
gap: 0.75rem;
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
.logo-icon {
|
| 227 |
+
width: 40px;
|
| 228 |
+
height: 40px;
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
.logo-text {
|
| 232 |
+
font-size: 1.5rem;
|
| 233 |
+
font-weight: 700;
|
| 234 |
+
letter-spacing: -0.02em;
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
.logo-text .highlight {
|
| 238 |
+
background: var(--gradient-primary);
|
| 239 |
+
-webkit-background-clip: text;
|
| 240 |
+
-webkit-text-fill-color: transparent;
|
| 241 |
+
background-clip: text;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
.nav {
|
| 245 |
+
display: flex;
|
| 246 |
+
gap: 2rem;
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
.nav-link {
|
| 250 |
+
font-size: 0.9rem;
|
| 251 |
+
font-weight: 500;
|
| 252 |
+
color: var(--text-secondary);
|
| 253 |
+
transition: color var(--transition-fast);
|
| 254 |
+
position: relative;
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
.nav-link:hover,
|
| 258 |
+
.nav-link.active {
|
| 259 |
+
color: var(--text-primary);
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
.nav-link.active::after {
|
| 263 |
+
content: '';
|
| 264 |
+
position: absolute;
|
| 265 |
+
bottom: -4px;
|
| 266 |
+
left: 0;
|
| 267 |
+
right: 0;
|
| 268 |
+
height: 2px;
|
| 269 |
+
background: var(--gradient-primary);
|
| 270 |
+
border-radius: var(--radius-full);
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
.status-badge {
|
| 274 |
+
display: flex;
|
| 275 |
+
align-items: center;
|
| 276 |
+
gap: 0.5rem;
|
| 277 |
+
padding: 0.5rem 1rem;
|
| 278 |
+
background: var(--bg-card);
|
| 279 |
+
border: var(--border-primary);
|
| 280 |
+
border-radius: var(--radius-full);
|
| 281 |
+
backdrop-filter: blur(10px);
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
.status-dot {
|
| 285 |
+
width: 8px;
|
| 286 |
+
height: 8px;
|
| 287 |
+
border-radius: 50%;
|
| 288 |
+
background: var(--neutral-500);
|
| 289 |
+
animation: pulse 2s ease-in-out infinite;
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
.status-badge.online .status-dot {
|
| 293 |
+
background: var(--success-500);
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
.status-badge.offline .status-dot {
|
| 297 |
+
background: #ef4444;
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
.status-text {
|
| 301 |
+
font-size: 0.8rem;
|
| 302 |
+
font-weight: 500;
|
| 303 |
+
color: var(--text-secondary);
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
@keyframes pulse {
|
| 307 |
+
0%, 100% { opacity: 1; }
|
| 308 |
+
50% { opacity: 0.5; }
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
/* ===== MAIN CONTENT ===== */
|
| 312 |
+
.main-content {
|
| 313 |
+
flex: 1;
|
| 314 |
+
padding: 3rem 0;
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
/* ===== HERO SECTION ===== */
|
| 318 |
+
.hero {
|
| 319 |
+
text-align: center;
|
| 320 |
+
margin-bottom: 3rem;
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
.hero-title {
|
| 324 |
+
font-size: clamp(2rem, 5vw, 3.5rem);
|
| 325 |
+
font-weight: 800;
|
| 326 |
+
letter-spacing: -0.03em;
|
| 327 |
+
line-height: 1.1;
|
| 328 |
+
margin-bottom: 1rem;
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
.gradient-text {
|
| 332 |
+
background: var(--gradient-primary);
|
| 333 |
+
-webkit-background-clip: text;
|
| 334 |
+
-webkit-text-fill-color: transparent;
|
| 335 |
+
background-clip: text;
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
.hero-subtitle {
|
| 339 |
+
font-size: 1.125rem;
|
| 340 |
+
color: var(--text-secondary);
|
| 341 |
+
max-width: 600px;
|
| 342 |
+
margin: 0 auto;
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
/* ===== UPLOAD SECTION ===== */
|
| 346 |
+
.upload-section {
|
| 347 |
+
margin-bottom: 3rem;
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
.upload-card {
|
| 351 |
+
background: var(--bg-card);
|
| 352 |
+
border: 2px dashed rgba(139, 92, 246, 0.3);
|
| 353 |
+
border-radius: var(--radius-2xl);
|
| 354 |
+
padding: 4rem 2rem;
|
| 355 |
+
text-align: center;
|
| 356 |
+
transition: all var(--transition-base);
|
| 357 |
+
cursor: pointer;
|
| 358 |
+
backdrop-filter: blur(10px);
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
.upload-card:hover,
|
| 362 |
+
.upload-card.dragover {
|
| 363 |
+
border-color: var(--primary-500);
|
| 364 |
+
background: var(--bg-card-hover);
|
| 365 |
+
box-shadow: var(--shadow-glow);
|
| 366 |
+
transform: translateY(-2px);
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
.upload-icon {
|
| 370 |
+
width: 80px;
|
| 371 |
+
height: 80px;
|
| 372 |
+
margin: 0 auto 1.5rem;
|
| 373 |
+
color: var(--primary-400);
|
| 374 |
+
opacity: 0.8;
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
.upload-title {
|
| 378 |
+
font-size: 1.25rem;
|
| 379 |
+
font-weight: 600;
|
| 380 |
+
margin-bottom: 0.5rem;
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
.upload-subtitle {
|
| 384 |
+
font-size: 0.9rem;
|
| 385 |
+
color: var(--text-muted);
|
| 386 |
+
margin-bottom: 1.5rem;
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
.upload-btn {
|
| 390 |
+
display: inline-flex;
|
| 391 |
+
align-items: center;
|
| 392 |
+
gap: 0.5rem;
|
| 393 |
+
padding: 0.875rem 1.5rem;
|
| 394 |
+
background: var(--gradient-primary);
|
| 395 |
+
color: white;
|
| 396 |
+
font-weight: 600;
|
| 397 |
+
font-size: 0.9rem;
|
| 398 |
+
border-radius: var(--radius-lg);
|
| 399 |
+
transition: all var(--transition-base);
|
| 400 |
+
box-shadow: var(--shadow-lg), 0 0 20px rgba(139, 92, 246, 0.2);
|
| 401 |
+
}
|
| 402 |
+
|
| 403 |
+
.upload-btn:hover {
|
| 404 |
+
transform: translateY(-2px);
|
| 405 |
+
box-shadow: var(--shadow-xl), 0 0 30px rgba(139, 92, 246, 0.3);
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
.upload-btn svg {
|
| 409 |
+
width: 18px;
|
| 410 |
+
height: 18px;
|
| 411 |
+
}
|
| 412 |
+
|
| 413 |
+
/* ===== RESULTS SECTION ===== */
|
| 414 |
+
.results-section {
|
| 415 |
+
animation: fadeIn 0.5s ease-out;
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
.results-section.hidden {
|
| 419 |
+
display: none;
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
@keyframes fadeIn {
|
| 423 |
+
from {
|
| 424 |
+
opacity: 0;
|
| 425 |
+
transform: translateY(20px);
|
| 426 |
+
}
|
| 427 |
+
to {
|
| 428 |
+
opacity: 1;
|
| 429 |
+
transform: translateY(0);
|
| 430 |
+
}
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
.results-grid {
|
| 434 |
+
display: grid;
|
| 435 |
+
grid-template-columns: repeat(3, 1fr);
|
| 436 |
+
gap: 1.5rem;
|
| 437 |
+
margin-bottom: 2rem;
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
.result-card {
|
| 441 |
+
background: var(--bg-card);
|
| 442 |
+
border: var(--border-primary);
|
| 443 |
+
border-radius: var(--radius-xl);
|
| 444 |
+
overflow: hidden;
|
| 445 |
+
backdrop-filter: blur(10px);
|
| 446 |
+
transition: all var(--transition-base);
|
| 447 |
+
}
|
| 448 |
+
|
| 449 |
+
.result-card:hover {
|
| 450 |
+
border-color: rgba(139, 92, 246, 0.3);
|
| 451 |
+
box-shadow: var(--shadow-lg);
|
| 452 |
+
transform: translateY(-4px);
|
| 453 |
+
}
|
| 454 |
+
|
| 455 |
+
.card-header {
|
| 456 |
+
display: flex;
|
| 457 |
+
align-items: center;
|
| 458 |
+
gap: 0.75rem;
|
| 459 |
+
padding: 1rem 1.25rem;
|
| 460 |
+
border-bottom: var(--border-primary);
|
| 461 |
+
background: rgba(255, 255, 255, 0.02);
|
| 462 |
+
}
|
| 463 |
+
|
| 464 |
+
.card-icon {
|
| 465 |
+
font-size: 1.25rem;
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
.card-title {
|
| 469 |
+
font-size: 0.9rem;
|
| 470 |
+
font-weight: 600;
|
| 471 |
+
}
|
| 472 |
+
|
| 473 |
+
.image-container {
|
| 474 |
+
aspect-ratio: 1;
|
| 475 |
+
display: flex;
|
| 476 |
+
align-items: center;
|
| 477 |
+
justify-content: center;
|
| 478 |
+
background: var(--neutral-950);
|
| 479 |
+
padding: 1rem;
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
.image-container img {
|
| 483 |
+
max-width: 100%;
|
| 484 |
+
max-height: 100%;
|
| 485 |
+
object-fit: contain;
|
| 486 |
+
border-radius: var(--radius-md);
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
/* ===== METRICS PANEL ===== */
|
| 490 |
+
.metrics-panel {
|
| 491 |
+
display: grid;
|
| 492 |
+
grid-template-columns: repeat(3, 1fr);
|
| 493 |
+
gap: 1.5rem;
|
| 494 |
+
margin-bottom: 2rem;
|
| 495 |
+
}
|
| 496 |
+
|
| 497 |
+
.metric-card {
|
| 498 |
+
display: flex;
|
| 499 |
+
align-items: center;
|
| 500 |
+
gap: 1rem;
|
| 501 |
+
padding: 1.5rem;
|
| 502 |
+
background: var(--bg-card);
|
| 503 |
+
border: var(--border-primary);
|
| 504 |
+
border-radius: var(--radius-xl);
|
| 505 |
+
backdrop-filter: blur(10px);
|
| 506 |
+
transition: all var(--transition-base);
|
| 507 |
+
}
|
| 508 |
+
|
| 509 |
+
.metric-card:hover {
|
| 510 |
+
border-color: rgba(139, 92, 246, 0.3);
|
| 511 |
+
transform: translateY(-2px);
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
.metric-icon {
|
| 515 |
+
width: 48px;
|
| 516 |
+
height: 48px;
|
| 517 |
+
display: flex;
|
| 518 |
+
align-items: center;
|
| 519 |
+
justify-content: center;
|
| 520 |
+
border-radius: var(--radius-lg);
|
| 521 |
+
flex-shrink: 0;
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
.metric-icon svg {
|
| 525 |
+
width: 24px;
|
| 526 |
+
height: 24px;
|
| 527 |
+
}
|
| 528 |
+
|
| 529 |
+
.confidence-icon {
|
| 530 |
+
background: rgba(34, 197, 94, 0.1);
|
| 531 |
+
color: var(--success-400);
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
.area-icon {
|
| 535 |
+
background: rgba(139, 92, 246, 0.1);
|
| 536 |
+
color: var(--primary-400);
|
| 537 |
+
}
|
| 538 |
+
|
| 539 |
+
.model-icon {
|
| 540 |
+
background: rgba(236, 72, 153, 0.1);
|
| 541 |
+
color: var(--accent-400);
|
| 542 |
+
}
|
| 543 |
+
|
| 544 |
+
.metric-content {
|
| 545 |
+
display: flex;
|
| 546 |
+
flex-direction: column;
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
.metric-value {
|
| 550 |
+
font-size: 1.5rem;
|
| 551 |
+
font-weight: 700;
|
| 552 |
+
line-height: 1.2;
|
| 553 |
+
}
|
| 554 |
+
|
| 555 |
+
.metric-value.model-name {
|
| 556 |
+
font-size: 1rem;
|
| 557 |
+
font-family: var(--font-mono);
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
.metric-label {
|
| 561 |
+
font-size: 0.8rem;
|
| 562 |
+
color: var(--text-muted);
|
| 563 |
+
font-weight: 500;
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
/* ===== ACTION BUTTONS ===== */
|
| 567 |
+
.action-buttons {
|
| 568 |
+
display: flex;
|
| 569 |
+
justify-content: center;
|
| 570 |
+
gap: 1rem;
|
| 571 |
+
}
|
| 572 |
+
|
| 573 |
+
.btn {
|
| 574 |
+
display: inline-flex;
|
| 575 |
+
align-items: center;
|
| 576 |
+
gap: 0.5rem;
|
| 577 |
+
padding: 0.875rem 1.5rem;
|
| 578 |
+
font-weight: 600;
|
| 579 |
+
font-size: 0.9rem;
|
| 580 |
+
border-radius: var(--radius-lg);
|
| 581 |
+
transition: all var(--transition-base);
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
.btn svg {
|
| 585 |
+
width: 18px;
|
| 586 |
+
height: 18px;
|
| 587 |
+
}
|
| 588 |
+
|
| 589 |
+
.btn-primary {
|
| 590 |
+
background: var(--gradient-primary);
|
| 591 |
+
color: white;
|
| 592 |
+
box-shadow: var(--shadow-md), 0 0 20px rgba(139, 92, 246, 0.2);
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
.btn-primary:hover {
|
| 596 |
+
transform: translateY(-2px);
|
| 597 |
+
box-shadow: var(--shadow-lg), 0 0 30px rgba(139, 92, 246, 0.3);
|
| 598 |
+
}
|
| 599 |
+
|
| 600 |
+
.btn-secondary {
|
| 601 |
+
background: var(--bg-tertiary);
|
| 602 |
+
color: var(--text-primary);
|
| 603 |
+
border: var(--border-primary);
|
| 604 |
+
}
|
| 605 |
+
|
| 606 |
+
.btn-secondary:hover {
|
| 607 |
+
background: var(--bg-card-hover);
|
| 608 |
+
border-color: rgba(139, 92, 246, 0.3);
|
| 609 |
+
}
|
| 610 |
+
|
| 611 |
+
/* ===== LOADING OVERLAY ===== */
|
| 612 |
+
.loading-overlay {
|
| 613 |
+
position: fixed;
|
| 614 |
+
top: 0;
|
| 615 |
+
left: 0;
|
| 616 |
+
width: 100%;
|
| 617 |
+
height: 100%;
|
| 618 |
+
background: rgba(10, 10, 15, 0.9);
|
| 619 |
+
backdrop-filter: blur(10px);
|
| 620 |
+
display: flex;
|
| 621 |
+
align-items: center;
|
| 622 |
+
justify-content: center;
|
| 623 |
+
z-index: 1000;
|
| 624 |
+
}
|
| 625 |
+
|
| 626 |
+
.loading-overlay.hidden {
|
| 627 |
+
display: none;
|
| 628 |
+
}
|
| 629 |
+
|
| 630 |
+
.loading-content {
|
| 631 |
+
text-align: center;
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
.loading-spinner {
|
| 635 |
+
width: 80px;
|
| 636 |
+
height: 80px;
|
| 637 |
+
position: relative;
|
| 638 |
+
margin: 0 auto 1.5rem;
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
.spinner-ring {
|
| 642 |
+
position: absolute;
|
| 643 |
+
width: 100%;
|
| 644 |
+
height: 100%;
|
| 645 |
+
border-radius: 50%;
|
| 646 |
+
border: 3px solid transparent;
|
| 647 |
+
animation: spin 1.5s linear infinite;
|
| 648 |
+
}
|
| 649 |
+
|
| 650 |
+
.spinner-ring:nth-child(1) {
|
| 651 |
+
border-top-color: var(--primary-500);
|
| 652 |
+
animation-delay: 0s;
|
| 653 |
+
}
|
| 654 |
+
|
| 655 |
+
.spinner-ring:nth-child(2) {
|
| 656 |
+
width: 60%;
|
| 657 |
+
height: 60%;
|
| 658 |
+
top: 20%;
|
| 659 |
+
left: 20%;
|
| 660 |
+
border-right-color: var(--accent-500);
|
| 661 |
+
animation-delay: 0.2s;
|
| 662 |
+
animation-direction: reverse;
|
| 663 |
+
}
|
| 664 |
+
|
| 665 |
+
.spinner-ring:nth-child(3) {
|
| 666 |
+
width: 30%;
|
| 667 |
+
height: 30%;
|
| 668 |
+
top: 35%;
|
| 669 |
+
left: 35%;
|
| 670 |
+
border-bottom-color: var(--success-500);
|
| 671 |
+
animation-delay: 0.4s;
|
| 672 |
+
}
|
| 673 |
+
|
| 674 |
+
@keyframes spin {
|
| 675 |
+
from { transform: rotate(0deg); }
|
| 676 |
+
to { transform: rotate(360deg); }
|
| 677 |
+
}
|
| 678 |
+
|
| 679 |
+
.loading-text {
|
| 680 |
+
font-size: 1.25rem;
|
| 681 |
+
font-weight: 600;
|
| 682 |
+
margin-bottom: 0.5rem;
|
| 683 |
+
}
|
| 684 |
+
|
| 685 |
+
.loading-subtext {
|
| 686 |
+
font-size: 0.9rem;
|
| 687 |
+
color: var(--text-muted);
|
| 688 |
+
}
|
| 689 |
+
|
| 690 |
+
/* ===== INFO SECTIONS ===== */
|
| 691 |
+
.info-section {
|
| 692 |
+
padding: 4rem 0;
|
| 693 |
+
border-top: var(--border-primary);
|
| 694 |
+
}
|
| 695 |
+
|
| 696 |
+
.section-header {
|
| 697 |
+
text-align: center;
|
| 698 |
+
margin-bottom: 3rem;
|
| 699 |
+
}
|
| 700 |
+
|
| 701 |
+
.section-title {
|
| 702 |
+
font-size: 2rem;
|
| 703 |
+
font-weight: 700;
|
| 704 |
+
margin-bottom: 0.75rem;
|
| 705 |
+
}
|
| 706 |
+
|
| 707 |
+
.section-subtitle {
|
| 708 |
+
font-size: 1rem;
|
| 709 |
+
color: var(--text-secondary);
|
| 710 |
+
max-width: 500px;
|
| 711 |
+
margin: 0 auto;
|
| 712 |
+
}
|
| 713 |
+
|
| 714 |
+
/* ===== FEATURES GRID ===== */
|
| 715 |
+
.features-grid {
|
| 716 |
+
display: grid;
|
| 717 |
+
grid-template-columns: repeat(3, 1fr);
|
| 718 |
+
gap: 1.5rem;
|
| 719 |
+
}
|
| 720 |
+
|
| 721 |
+
.feature-card {
|
| 722 |
+
padding: 2rem;
|
| 723 |
+
background: var(--bg-card);
|
| 724 |
+
border: var(--border-primary);
|
| 725 |
+
border-radius: var(--radius-xl);
|
| 726 |
+
text-align: center;
|
| 727 |
+
transition: all var(--transition-base);
|
| 728 |
+
backdrop-filter: blur(10px);
|
| 729 |
+
}
|
| 730 |
+
|
| 731 |
+
.feature-card:hover {
|
| 732 |
+
border-color: rgba(139, 92, 246, 0.3);
|
| 733 |
+
transform: translateY(-4px);
|
| 734 |
+
box-shadow: var(--shadow-lg);
|
| 735 |
+
}
|
| 736 |
+
|
| 737 |
+
.feature-icon {
|
| 738 |
+
width: 60px;
|
| 739 |
+
height: 60px;
|
| 740 |
+
margin: 0 auto 1.5rem;
|
| 741 |
+
display: flex;
|
| 742 |
+
align-items: center;
|
| 743 |
+
justify-content: center;
|
| 744 |
+
background: var(--gradient-primary);
|
| 745 |
+
border-radius: var(--radius-lg);
|
| 746 |
+
}
|
| 747 |
+
|
| 748 |
+
.feature-icon svg {
|
| 749 |
+
width: 28px;
|
| 750 |
+
height: 28px;
|
| 751 |
+
color: white;
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
.feature-card h3 {
|
| 755 |
+
font-size: 1.125rem;
|
| 756 |
+
font-weight: 600;
|
| 757 |
+
margin-bottom: 0.75rem;
|
| 758 |
+
}
|
| 759 |
+
|
| 760 |
+
.feature-card p {
|
| 761 |
+
font-size: 0.9rem;
|
| 762 |
+
color: var(--text-secondary);
|
| 763 |
+
line-height: 1.6;
|
| 764 |
+
}
|
| 765 |
+
|
| 766 |
+
/* ===== TECH GRID ===== */
|
| 767 |
+
.tech-grid {
|
| 768 |
+
display: grid;
|
| 769 |
+
grid-template-columns: repeat(2, 1fr);
|
| 770 |
+
gap: 1.5rem;
|
| 771 |
+
}
|
| 772 |
+
|
| 773 |
+
.tech-card {
|
| 774 |
+
padding: 1.5rem;
|
| 775 |
+
background: var(--bg-card);
|
| 776 |
+
border: var(--border-primary);
|
| 777 |
+
border-radius: var(--radius-xl);
|
| 778 |
+
transition: all var(--transition-base);
|
| 779 |
+
backdrop-filter: blur(10px);
|
| 780 |
+
}
|
| 781 |
+
|
| 782 |
+
.tech-card:hover {
|
| 783 |
+
border-color: rgba(139, 92, 246, 0.3);
|
| 784 |
+
transform: translateY(-2px);
|
| 785 |
+
}
|
| 786 |
+
|
| 787 |
+
.tech-header {
|
| 788 |
+
display: flex;
|
| 789 |
+
align-items: center;
|
| 790 |
+
gap: 1rem;
|
| 791 |
+
margin-bottom: 0.75rem;
|
| 792 |
+
}
|
| 793 |
+
|
| 794 |
+
.tech-badge {
|
| 795 |
+
font-size: 0.7rem;
|
| 796 |
+
font-weight: 600;
|
| 797 |
+
text-transform: uppercase;
|
| 798 |
+
letter-spacing: 0.05em;
|
| 799 |
+
padding: 0.25rem 0.5rem;
|
| 800 |
+
background: var(--gradient-primary);
|
| 801 |
+
color: white;
|
| 802 |
+
border-radius: var(--radius-sm);
|
| 803 |
+
}
|
| 804 |
+
|
| 805 |
+
.tech-header h3 {
|
| 806 |
+
font-size: 1rem;
|
| 807 |
+
font-weight: 600;
|
| 808 |
+
font-family: var(--font-mono);
|
| 809 |
+
}
|
| 810 |
+
|
| 811 |
+
.tech-card p {
|
| 812 |
+
font-size: 0.875rem;
|
| 813 |
+
color: var(--text-secondary);
|
| 814 |
+
line-height: 1.6;
|
| 815 |
+
}
|
| 816 |
+
|
| 817 |
+
/* ===== DISCLAIMER ===== */
|
| 818 |
+
.disclaimer {
|
| 819 |
+
display: flex;
|
| 820 |
+
align-items: flex-start;
|
| 821 |
+
gap: 1rem;
|
| 822 |
+
padding: 1.5rem;
|
| 823 |
+
background: rgba(234, 179, 8, 0.1);
|
| 824 |
+
border: 1px solid rgba(234, 179, 8, 0.3);
|
| 825 |
+
border-radius: var(--radius-xl);
|
| 826 |
+
margin: 2rem 0;
|
| 827 |
+
}
|
| 828 |
+
|
| 829 |
+
.disclaimer-icon {
|
| 830 |
+
font-size: 1.5rem;
|
| 831 |
+
flex-shrink: 0;
|
| 832 |
+
}
|
| 833 |
+
|
| 834 |
+
.disclaimer p {
|
| 835 |
+
font-size: 0.875rem;
|
| 836 |
+
color: var(--text-secondary);
|
| 837 |
+
line-height: 1.6;
|
| 838 |
+
}
|
| 839 |
+
|
| 840 |
+
.disclaimer strong {
|
| 841 |
+
color: #fbbf24;
|
| 842 |
+
}
|
| 843 |
+
|
| 844 |
+
/* ===== FOOTER ===== */
|
| 845 |
+
.footer {
|
| 846 |
+
padding: 2rem 0;
|
| 847 |
+
text-align: center;
|
| 848 |
+
border-top: var(--border-primary);
|
| 849 |
+
}
|
| 850 |
+
|
| 851 |
+
.footer p {
|
| 852 |
+
font-size: 0.875rem;
|
| 853 |
+
color: var(--text-muted);
|
| 854 |
+
}
|
| 855 |
+
|
| 856 |
+
.footer-tech {
|
| 857 |
+
font-size: 0.75rem;
|
| 858 |
+
margin-top: 0.5rem;
|
| 859 |
+
font-family: var(--font-mono);
|
| 860 |
+
}
|
| 861 |
+
|
| 862 |
+
/* ===== RESPONSIVE DESIGN ===== */
|
| 863 |
+
@media (max-width: 1024px) {
|
| 864 |
+
.results-grid {
|
| 865 |
+
grid-template-columns: repeat(2, 1fr);
|
| 866 |
+
}
|
| 867 |
+
|
| 868 |
+
.results-grid .overlay-card {
|
| 869 |
+
grid-column: span 2;
|
| 870 |
+
}
|
| 871 |
+
|
| 872 |
+
.metrics-panel {
|
| 873 |
+
grid-template-columns: repeat(2, 1fr);
|
| 874 |
+
}
|
| 875 |
+
|
| 876 |
+
.metrics-panel .metric-card:last-child {
|
| 877 |
+
grid-column: span 2;
|
| 878 |
+
}
|
| 879 |
+
|
| 880 |
+
.features-grid {
|
| 881 |
+
grid-template-columns: repeat(2, 1fr);
|
| 882 |
+
}
|
| 883 |
+
|
| 884 |
+
.features-grid .feature-card:last-child {
|
| 885 |
+
grid-column: span 2;
|
| 886 |
+
}
|
| 887 |
+
}
|
| 888 |
+
|
| 889 |
+
@media (max-width: 768px) {
|
| 890 |
+
.app-container {
|
| 891 |
+
padding: 0 1rem;
|
| 892 |
+
}
|
| 893 |
+
|
| 894 |
+
.header {
|
| 895 |
+
flex-wrap: wrap;
|
| 896 |
+
gap: 1rem;
|
| 897 |
+
}
|
| 898 |
+
|
| 899 |
+
.nav {
|
| 900 |
+
order: 3;
|
| 901 |
+
width: 100%;
|
| 902 |
+
justify-content: center;
|
| 903 |
+
gap: 1.5rem;
|
| 904 |
+
}
|
| 905 |
+
|
| 906 |
+
.results-grid {
|
| 907 |
+
grid-template-columns: 1fr;
|
| 908 |
+
}
|
| 909 |
+
|
| 910 |
+
.results-grid .overlay-card {
|
| 911 |
+
grid-column: span 1;
|
| 912 |
+
}
|
| 913 |
+
|
| 914 |
+
.metrics-panel {
|
| 915 |
+
grid-template-columns: 1fr;
|
| 916 |
+
}
|
| 917 |
+
|
| 918 |
+
.metrics-panel .metric-card:last-child {
|
| 919 |
+
grid-column: span 1;
|
| 920 |
+
}
|
| 921 |
+
|
| 922 |
+
.features-grid,
|
| 923 |
+
.tech-grid {
|
| 924 |
+
grid-template-columns: 1fr;
|
| 925 |
+
}
|
| 926 |
+
|
| 927 |
+
.features-grid .feature-card:last-child {
|
| 928 |
+
grid-column: span 1;
|
| 929 |
+
}
|
| 930 |
+
|
| 931 |
+
.action-buttons {
|
| 932 |
+
flex-direction: column;
|
| 933 |
+
}
|
| 934 |
+
|
| 935 |
+
.action-buttons .btn {
|
| 936 |
+
width: 100%;
|
| 937 |
+
justify-content: center;
|
| 938 |
+
}
|
| 939 |
+
|
| 940 |
+
.upload-card {
|
| 941 |
+
padding: 2.5rem 1.5rem;
|
| 942 |
+
}
|
| 943 |
+
}
|
| 944 |
+
|
| 945 |
+
@media (max-width: 480px) {
|
| 946 |
+
.hero-title {
|
| 947 |
+
font-size: 1.75rem;
|
| 948 |
+
}
|
| 949 |
+
|
| 950 |
+
.hero-subtitle {
|
| 951 |
+
font-size: 1rem;
|
| 952 |
+
}
|
| 953 |
+
|
| 954 |
+
.metric-card {
|
| 955 |
+
flex-direction: column;
|
| 956 |
+
text-align: center;
|
| 957 |
+
}
|
| 958 |
+
}
|
| 959 |
+
|
| 960 |
+
/* ===== UTILITY CLASSES ===== */
|
| 961 |
+
.hidden {
|
| 962 |
+
display: none !important;
|
| 963 |
+
}
|
| 964 |
+
|
| 965 |
+
.sr-only {
|
| 966 |
+
position: absolute;
|
| 967 |
+
width: 1px;
|
| 968 |
+
height: 1px;
|
| 969 |
+
padding: 0;
|
| 970 |
+
margin: -1px;
|
| 971 |
+
overflow: hidden;
|
| 972 |
+
clip: rect(0, 0, 0, 0);
|
| 973 |
+
white-space: nowrap;
|
| 974 |
+
border: 0;
|
| 975 |
+
}
|
data/1.jpg
ADDED
|
data/2.jpeg
ADDED
|
data/3.jpeg
ADDED
|
notebooks/mobileNetUnetAttention.py
ADDED
|
@@ -0,0 +1,844 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.optim as optim
|
| 6 |
+
from torch.utils.data import Dataset, DataLoader
|
| 7 |
+
import torchvision.transforms as transforms
|
| 8 |
+
from torchvision.models import mobilenet_v2
|
| 9 |
+
import matplotlib.pyplot as plt
|
| 10 |
+
from tqdm.notebook import tqdm
|
| 11 |
+
import cv2
|
| 12 |
+
from glob import glob
|
| 13 |
+
import albumentations as A
|
| 14 |
+
from albumentations.pytorch import ToTensorV2
|
| 15 |
+
|
| 16 |
+
# Set random seeds
|
| 17 |
+
np.random.seed(42)
|
| 18 |
+
torch.manual_seed(42)
|
| 19 |
+
if torch.cuda.is_available():
|
| 20 |
+
torch.cuda.manual_seed(42)
|
| 21 |
+
|
| 22 |
+
# Device configuration
|
| 23 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 24 |
+
print(f"Using device: {device}")
|
| 25 |
+
|
| 26 |
+
# Paths
|
| 27 |
+
TRAIN_IMG_PATH = '/kaggle/input/isic-2018/data/images/train'
|
| 28 |
+
TRAIN_MASK_PATH = '/kaggle/input/isic-2018/data/annotations/train'
|
| 29 |
+
VAL_IMG_PATH = '/kaggle/input/isic-2018/data/images/val'
|
| 30 |
+
VAL_MASK_PATH = '/kaggle/input/isic-2018/data/annotations/val'
|
| 31 |
+
TEST_IMG_PATH = '/kaggle/input/isic-2018/data/images/test'
|
| 32 |
+
TEST_MASK_PATH = '/kaggle/input/isic-2018/data/annotations/test'
|
| 33 |
+
|
| 34 |
+
IMG_SIZE = 256
|
| 35 |
+
BATCH_SIZE = 32
|
| 36 |
+
EPOCHS = 50
|
| 37 |
+
LEARNING_RATE = 1e-4
|
| 38 |
+
ENCODER_FREEZE_EPOCHS = 5 # Number of epochs to freeze encoder
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# =============================================
|
| 42 |
+
# 1. ADVANCED DATA AUGMENTATION (Albumentations)
|
| 43 |
+
# =============================================
|
| 44 |
+
|
| 45 |
+
def get_train_transforms(img_size=IMG_SIZE):
|
| 46 |
+
"""
|
| 47 |
+
Advanced augmentation pipeline for training:
|
| 48 |
+
- Horizontal/Vertical Flips: Lesions are orientation-independent
|
| 49 |
+
- Random Rotation: Skin images can be taken at any angle
|
| 50 |
+
- Color Jitter: To handle different camera lighting and skin tones
|
| 51 |
+
- CoarseDropout (Cutout): Forces model to look at edges if center is obscured
|
| 52 |
+
"""
|
| 53 |
+
return A.Compose([
|
| 54 |
+
A.Resize(img_size, img_size),
|
| 55 |
+
# Geometric augmentations
|
| 56 |
+
A.HorizontalFlip(p=0.5),
|
| 57 |
+
A.VerticalFlip(p=0.5),
|
| 58 |
+
A.RandomRotate90(p=0.5),
|
| 59 |
+
# Using Affine instead of ShiftScaleRotate (recommended in Albumentations v2.x)
|
| 60 |
+
A.Affine(
|
| 61 |
+
translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)},
|
| 62 |
+
scale=(0.8, 1.2),
|
| 63 |
+
rotate=(-45, 45),
|
| 64 |
+
border_mode=cv2.BORDER_REFLECT, # Use border_mode instead of mode
|
| 65 |
+
p=0.5
|
| 66 |
+
),
|
| 67 |
+
# Elastic deformation for shape variation
|
| 68 |
+
A.ElasticTransform(alpha=120, sigma=120 * 0.05, p=0.3),
|
| 69 |
+
# Color augmentations
|
| 70 |
+
A.ColorJitter(
|
| 71 |
+
brightness=0.2,
|
| 72 |
+
contrast=0.2,
|
| 73 |
+
saturation=0.2,
|
| 74 |
+
hue=0.1,
|
| 75 |
+
p=0.5
|
| 76 |
+
),
|
| 77 |
+
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5),
|
| 78 |
+
A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.3),
|
| 79 |
+
# Blur and noise augmentations
|
| 80 |
+
A.GaussianBlur(blur_limit=(3, 5), p=0.2),
|
| 81 |
+
A.GaussNoise(std_range=(0.02, 0.1), p=0.2), # Updated for Albumentations v2.x
|
| 82 |
+
# CoarseDropout (Cutout): Forces the model to look at edges (Albumentations v2.x API)
|
| 83 |
+
A.CoarseDropout(
|
| 84 |
+
num_holes_range=(1, 8),
|
| 85 |
+
hole_height_range=(img_size // 16, img_size // 8),
|
| 86 |
+
hole_width_range=(img_size // 16, img_size // 8),
|
| 87 |
+
fill=0.0, # Fill with black (numeric value, not string)
|
| 88 |
+
fill_mask=0.0, # Fill mask with 0 (background)
|
| 89 |
+
p=0.5
|
| 90 |
+
),
|
| 91 |
+
# Normalize
|
| 92 |
+
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
|
| 93 |
+
ToTensorV2(),
|
| 94 |
+
])
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def get_val_transforms(img_size=IMG_SIZE):
|
| 98 |
+
"""Minimal transforms for validation/test (only resize and normalize)"""
|
| 99 |
+
return A.Compose([
|
| 100 |
+
A.Resize(img_size, img_size),
|
| 101 |
+
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
|
| 102 |
+
ToTensorV2(),
|
| 103 |
+
])
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# Custom Dataset with Albumentations
|
| 107 |
+
class ISICDataset(Dataset):
|
| 108 |
+
def __init__(self, img_paths, mask_paths, transform=None, img_size=IMG_SIZE):
|
| 109 |
+
self.img_paths = img_paths
|
| 110 |
+
self.mask_paths = mask_paths
|
| 111 |
+
self.transform = transform
|
| 112 |
+
self.img_size = img_size
|
| 113 |
+
|
| 114 |
+
def __len__(self):
|
| 115 |
+
return len(self.img_paths)
|
| 116 |
+
|
| 117 |
+
def __getitem__(self, idx):
|
| 118 |
+
# Load image
|
| 119 |
+
img = cv2.imread(self.img_paths[idx])
|
| 120 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 121 |
+
|
| 122 |
+
# Load mask
|
| 123 |
+
mask = cv2.imread(self.mask_paths[idx], cv2.IMREAD_GRAYSCALE)
|
| 124 |
+
mask = mask / 255.0 # Normalize mask to [0, 1]
|
| 125 |
+
|
| 126 |
+
# Apply augmentations
|
| 127 |
+
if self.transform:
|
| 128 |
+
transformed = self.transform(image=img, mask=mask)
|
| 129 |
+
img = transformed['image']
|
| 130 |
+
mask = transformed['mask']
|
| 131 |
+
else:
|
| 132 |
+
# Fallback if no transform (shouldn't happen)
|
| 133 |
+
img = cv2.resize(img, (self.img_size, self.img_size))
|
| 134 |
+
img = img / 255.0
|
| 135 |
+
img = torch.FloatTensor(np.transpose(img, (2, 0, 1)))
|
| 136 |
+
mask = cv2.resize(mask, (self.img_size, self.img_size))
|
| 137 |
+
mask = torch.FloatTensor(mask)
|
| 138 |
+
|
| 139 |
+
# Ensure mask has channel dimension
|
| 140 |
+
if mask.ndim == 2:
|
| 141 |
+
mask = mask.unsqueeze(0)
|
| 142 |
+
|
| 143 |
+
return img.float(), mask.float()
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def get_matching_files(img_dir, mask_dir):
|
| 147 |
+
"""Find matching image and mask pairs by filename"""
|
| 148 |
+
img_files = {}
|
| 149 |
+
for ext in ['*.jpg', '*.jpeg', '*.png', '*.bmp']:
|
| 150 |
+
for path in glob(os.path.join(img_dir, ext)):
|
| 151 |
+
basename = os.path.splitext(os.path.basename(path))[0]
|
| 152 |
+
img_files[basename] = path
|
| 153 |
+
|
| 154 |
+
mask_files = {}
|
| 155 |
+
for ext in ['*.jpg', '*.jpeg', '*.png', '*.bmp']:
|
| 156 |
+
for path in glob(os.path.join(mask_dir, ext)):
|
| 157 |
+
basename = os.path.splitext(os.path.basename(path))[0]
|
| 158 |
+
basename = basename.replace('_segmentation', '').replace('_mask', '')
|
| 159 |
+
mask_files[basename] = path
|
| 160 |
+
|
| 161 |
+
common = set(img_files.keys()) & set(mask_files.keys())
|
| 162 |
+
|
| 163 |
+
img_paths = [img_files[k] for k in sorted(common)]
|
| 164 |
+
mask_paths = [mask_files[k] for k in sorted(common)]
|
| 165 |
+
|
| 166 |
+
return img_paths, mask_paths
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
# =============================================
|
| 170 |
+
# 3. ARCHITECTURE REFINEMENTS - Attention Gate
|
| 171 |
+
# =============================================
|
| 172 |
+
|
| 173 |
+
class AttentionGate(nn.Module):
|
| 174 |
+
"""
|
| 175 |
+
Attention U-Net gate for skip connections.
|
| 176 |
+
Helps the decoder focus on the lesion and ignore background noise/hairs.
|
| 177 |
+
"""
|
| 178 |
+
def __init__(self, F_g, F_l, F_int):
|
| 179 |
+
"""
|
| 180 |
+
Args:
|
| 181 |
+
F_g: Number of channels in gating signal (from decoder)
|
| 182 |
+
F_l: Number of channels in skip connection (from encoder)
|
| 183 |
+
F_int: Number of intermediate channels
|
| 184 |
+
"""
|
| 185 |
+
super(AttentionGate, self).__init__()
|
| 186 |
+
|
| 187 |
+
self.W_g = nn.Sequential(
|
| 188 |
+
nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),
|
| 189 |
+
nn.BatchNorm2d(F_int)
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
self.W_x = nn.Sequential(
|
| 193 |
+
nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),
|
| 194 |
+
nn.BatchNorm2d(F_int)
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
self.psi = nn.Sequential(
|
| 198 |
+
nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),
|
| 199 |
+
nn.BatchNorm2d(1),
|
| 200 |
+
nn.Sigmoid()
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
self.relu = nn.ReLU(inplace=True)
|
| 204 |
+
|
| 205 |
+
def forward(self, g, x):
|
| 206 |
+
"""
|
| 207 |
+
Args:
|
| 208 |
+
g: Gating signal from decoder (coarse features)
|
| 209 |
+
x: Skip connection from encoder (fine features)
|
| 210 |
+
"""
|
| 211 |
+
g1 = self.W_g(g)
|
| 212 |
+
x1 = self.W_x(x)
|
| 213 |
+
|
| 214 |
+
# Upsample g1 to match x1's spatial size if needed
|
| 215 |
+
if g1.shape[2:] != x1.shape[2:]:
|
| 216 |
+
g1 = nn.functional.interpolate(g1, size=x1.shape[2:], mode='bilinear', align_corners=True)
|
| 217 |
+
|
| 218 |
+
psi = self.relu(g1 + x1)
|
| 219 |
+
psi = self.psi(psi)
|
| 220 |
+
|
| 221 |
+
return x * psi
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
# Boundary Refinement Head
|
| 225 |
+
class BoundaryRefinementHead(nn.Module):
|
| 226 |
+
def __init__(self, in_channels, filters=64):
|
| 227 |
+
super(BoundaryRefinementHead, self).__init__()
|
| 228 |
+
self.conv1 = nn.Conv2d(in_channels, filters, kernel_size=3, padding=1)
|
| 229 |
+
self.relu1 = nn.ReLU(inplace=True)
|
| 230 |
+
self.conv2 = nn.Conv2d(filters, filters, kernel_size=3, padding=1)
|
| 231 |
+
self.relu2 = nn.ReLU(inplace=True)
|
| 232 |
+
self.conv3 = nn.Conv2d(filters, 1, kernel_size=1)
|
| 233 |
+
self.sigmoid = nn.Sigmoid()
|
| 234 |
+
|
| 235 |
+
def forward(self, x):
|
| 236 |
+
x = self.relu1(self.conv1(x))
|
| 237 |
+
x = self.relu2(self.conv2(x))
|
| 238 |
+
x = self.sigmoid(self.conv3(x))
|
| 239 |
+
return x
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
# =============================================
|
| 243 |
+
# 3. ARCHITECTURE - Attention U-Net with Deep Supervision
|
| 244 |
+
# =============================================
|
| 245 |
+
|
| 246 |
+
class AttentionUNetMobileNet(nn.Module):
|
| 247 |
+
"""
|
| 248 |
+
Enhanced U-Net with:
|
| 249 |
+
- MobileNetV2 encoder (pre-trained)
|
| 250 |
+
- Attention Gates on skip connections
|
| 251 |
+
- Deep Supervision (multi-scale loss)
|
| 252 |
+
- Encoder freezing support for first N epochs
|
| 253 |
+
"""
|
| 254 |
+
def __init__(self, deep_supervision=True):
|
| 255 |
+
super(AttentionUNetMobileNet, self).__init__()
|
| 256 |
+
self.deep_supervision = deep_supervision
|
| 257 |
+
|
| 258 |
+
# Encoder (MobileNetV2 - pretrained)
|
| 259 |
+
mobilenet = mobilenet_v2(weights='DEFAULT')
|
| 260 |
+
self.encoder_features = mobilenet.features
|
| 261 |
+
|
| 262 |
+
# Attention Gates for skip connections
|
| 263 |
+
self.att1 = AttentionGate(F_g=256, F_l=96, F_int=64)
|
| 264 |
+
self.att2 = AttentionGate(F_g=128, F_l=32, F_int=32)
|
| 265 |
+
self.att3 = AttentionGate(F_g=64, F_l=24, F_int=16)
|
| 266 |
+
self.att4 = AttentionGate(F_g=32, F_l=16, F_int=8)
|
| 267 |
+
|
| 268 |
+
# Decoder with skip connections
|
| 269 |
+
self.up1 = nn.ConvTranspose2d(1280, 256, kernel_size=2, stride=2)
|
| 270 |
+
self.dec1_conv1 = nn.Conv2d(256 + 96, 256, kernel_size=3, padding=1)
|
| 271 |
+
self.dec1_bn1 = nn.BatchNorm2d(256)
|
| 272 |
+
self.dec1_relu1 = nn.ReLU(inplace=True)
|
| 273 |
+
self.dec1_conv2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
|
| 274 |
+
self.dec1_bn2 = nn.BatchNorm2d(256)
|
| 275 |
+
self.dec1_relu2 = nn.ReLU(inplace=True)
|
| 276 |
+
|
| 277 |
+
self.up2 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2)
|
| 278 |
+
self.dec2_conv1 = nn.Conv2d(128 + 32, 128, kernel_size=3, padding=1)
|
| 279 |
+
self.dec2_bn1 = nn.BatchNorm2d(128)
|
| 280 |
+
self.dec2_relu1 = nn.ReLU(inplace=True)
|
| 281 |
+
self.dec2_conv2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
|
| 282 |
+
self.dec2_bn2 = nn.BatchNorm2d(128)
|
| 283 |
+
self.dec2_relu2 = nn.ReLU(inplace=True)
|
| 284 |
+
|
| 285 |
+
self.up3 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2)
|
| 286 |
+
self.dec3_conv1 = nn.Conv2d(64 + 24, 64, kernel_size=3, padding=1)
|
| 287 |
+
self.dec3_bn1 = nn.BatchNorm2d(64)
|
| 288 |
+
self.dec3_relu1 = nn.ReLU(inplace=True)
|
| 289 |
+
self.dec3_conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
|
| 290 |
+
self.dec3_bn2 = nn.BatchNorm2d(64)
|
| 291 |
+
self.dec3_relu2 = nn.ReLU(inplace=True)
|
| 292 |
+
|
| 293 |
+
self.up4 = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2)
|
| 294 |
+
self.dec4_conv1 = nn.Conv2d(32 + 16, 32, kernel_size=3, padding=1)
|
| 295 |
+
self.dec4_bn1 = nn.BatchNorm2d(32)
|
| 296 |
+
self.dec4_relu1 = nn.ReLU(inplace=True)
|
| 297 |
+
self.dec4_conv2 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
|
| 298 |
+
self.dec4_bn2 = nn.BatchNorm2d(32)
|
| 299 |
+
self.dec4_relu2 = nn.ReLU(inplace=True)
|
| 300 |
+
|
| 301 |
+
# Additional upsampling to reach 256x256
|
| 302 |
+
self.up5 = nn.ConvTranspose2d(32, 16, kernel_size=2, stride=2)
|
| 303 |
+
self.dec5_conv1 = nn.Conv2d(16, 16, kernel_size=3, padding=1)
|
| 304 |
+
self.dec5_bn1 = nn.BatchNorm2d(16)
|
| 305 |
+
self.dec5_relu1 = nn.ReLU(inplace=True)
|
| 306 |
+
self.dec5_conv2 = nn.Conv2d(16, 16, kernel_size=3, padding=1)
|
| 307 |
+
self.dec5_bn2 = nn.BatchNorm2d(16)
|
| 308 |
+
self.dec5_relu2 = nn.ReLU(inplace=True)
|
| 309 |
+
|
| 310 |
+
# Main segmentation output
|
| 311 |
+
self.seg_output = nn.Sequential(
|
| 312 |
+
nn.Conv2d(16, 1, kernel_size=1),
|
| 313 |
+
nn.Sigmoid()
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
# Boundary refinement head
|
| 317 |
+
self.boundary_head = BoundaryRefinementHead(16, filters=64)
|
| 318 |
+
|
| 319 |
+
# Deep supervision outputs (at different scales)
|
| 320 |
+
if self.deep_supervision:
|
| 321 |
+
self.ds_out1 = nn.Sequential(
|
| 322 |
+
nn.Conv2d(256, 1, kernel_size=1),
|
| 323 |
+
nn.Sigmoid()
|
| 324 |
+
)
|
| 325 |
+
self.ds_out2 = nn.Sequential(
|
| 326 |
+
nn.Conv2d(128, 1, kernel_size=1),
|
| 327 |
+
nn.Sigmoid()
|
| 328 |
+
)
|
| 329 |
+
self.ds_out3 = nn.Sequential(
|
| 330 |
+
nn.Conv2d(64, 1, kernel_size=1),
|
| 331 |
+
nn.Sigmoid()
|
| 332 |
+
)
|
| 333 |
+
self.ds_out4 = nn.Sequential(
|
| 334 |
+
nn.Conv2d(32, 1, kernel_size=1),
|
| 335 |
+
nn.Sigmoid()
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
def freeze_encoder(self, num_layers=7):
|
| 339 |
+
"""Freeze the first N layers of the encoder to preserve pretrained features"""
|
| 340 |
+
for idx, layer in enumerate(self.encoder_features):
|
| 341 |
+
if idx < num_layers:
|
| 342 |
+
for param in layer.parameters():
|
| 343 |
+
param.requires_grad = False
|
| 344 |
+
print(f"Froze first {num_layers} encoder layers")
|
| 345 |
+
|
| 346 |
+
def unfreeze_encoder(self):
|
| 347 |
+
"""Unfreeze all encoder layers for fine-tuning"""
|
| 348 |
+
for layer in self.encoder_features:
|
| 349 |
+
for param in layer.parameters():
|
| 350 |
+
param.requires_grad = True
|
| 351 |
+
print("Unfroze all encoder layers")
|
| 352 |
+
|
| 353 |
+
def forward(self, x):
|
| 354 |
+
# Encoder with skip connections
|
| 355 |
+
skips = []
|
| 356 |
+
for idx, layer in enumerate(self.encoder_features):
|
| 357 |
+
x = layer(x)
|
| 358 |
+
if idx in [1, 3, 6, 13, 18]: # Save skip connections
|
| 359 |
+
skips.append(x)
|
| 360 |
+
|
| 361 |
+
ds_outputs = [] # Deep supervision outputs
|
| 362 |
+
|
| 363 |
+
# Decoder Stage 1
|
| 364 |
+
d1 = self.up1(x)
|
| 365 |
+
skip1_att = self.att1(d1, skips[3]) # Apply attention
|
| 366 |
+
d1 = torch.cat([d1, skip1_att], dim=1)
|
| 367 |
+
d1 = self.dec1_relu1(self.dec1_bn1(self.dec1_conv1(d1)))
|
| 368 |
+
d1 = self.dec1_relu2(self.dec1_bn2(self.dec1_conv2(d1)))
|
| 369 |
+
if self.deep_supervision and self.training:
|
| 370 |
+
ds_outputs.append(self.ds_out1(d1))
|
| 371 |
+
|
| 372 |
+
# Decoder Stage 2
|
| 373 |
+
d2 = self.up2(d1)
|
| 374 |
+
skip2_att = self.att2(d2, skips[2])
|
| 375 |
+
d2 = torch.cat([d2, skip2_att], dim=1)
|
| 376 |
+
d2 = self.dec2_relu1(self.dec2_bn1(self.dec2_conv1(d2)))
|
| 377 |
+
d2 = self.dec2_relu2(self.dec2_bn2(self.dec2_conv2(d2)))
|
| 378 |
+
if self.deep_supervision and self.training:
|
| 379 |
+
ds_outputs.append(self.ds_out2(d2))
|
| 380 |
+
|
| 381 |
+
# Decoder Stage 3
|
| 382 |
+
d3 = self.up3(d2)
|
| 383 |
+
skip3_att = self.att3(d3, skips[1])
|
| 384 |
+
d3 = torch.cat([d3, skip3_att], dim=1)
|
| 385 |
+
d3 = self.dec3_relu1(self.dec3_bn1(self.dec3_conv1(d3)))
|
| 386 |
+
d3 = self.dec3_relu2(self.dec3_bn2(self.dec3_conv2(d3)))
|
| 387 |
+
if self.deep_supervision and self.training:
|
| 388 |
+
ds_outputs.append(self.ds_out3(d3))
|
| 389 |
+
|
| 390 |
+
# Decoder Stage 4
|
| 391 |
+
d4 = self.up4(d3)
|
| 392 |
+
skip4_att = self.att4(d4, skips[0])
|
| 393 |
+
d4 = torch.cat([d4, skip4_att], dim=1)
|
| 394 |
+
d4 = self.dec4_relu1(self.dec4_bn1(self.dec4_conv1(d4)))
|
| 395 |
+
d4 = self.dec4_relu2(self.dec4_bn2(self.dec4_conv2(d4)))
|
| 396 |
+
if self.deep_supervision and self.training:
|
| 397 |
+
ds_outputs.append(self.ds_out4(d4))
|
| 398 |
+
|
| 399 |
+
# Final upsampling to 256x256
|
| 400 |
+
d5 = self.up5(d4)
|
| 401 |
+
d5 = self.dec5_relu1(self.dec5_bn1(self.dec5_conv1(d5)))
|
| 402 |
+
d5 = self.dec5_relu2(self.dec5_bn2(self.dec5_conv2(d5)))
|
| 403 |
+
|
| 404 |
+
# Main output
|
| 405 |
+
seg_output = self.seg_output(d5)
|
| 406 |
+
|
| 407 |
+
# Boundary refinement
|
| 408 |
+
boundary_output = self.boundary_head(d5)
|
| 409 |
+
|
| 410 |
+
# Average outputs
|
| 411 |
+
final_output = (seg_output + boundary_output) / 2
|
| 412 |
+
|
| 413 |
+
if self.deep_supervision and self.training:
|
| 414 |
+
return final_output, ds_outputs
|
| 415 |
+
return final_output
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
# =============================================
|
| 419 |
+
# 4. LOSS FUNCTION TUNING - Tversky Loss + BCE/Dice weighting
|
| 420 |
+
# =============================================
|
| 421 |
+
|
| 422 |
+
class TverskyLoss(nn.Module):
|
| 423 |
+
"""
|
| 424 |
+
Tversky Loss: A generalization of Dice loss that allows you to
|
| 425 |
+
penalize False Negatives (missing part of a lesion) more than False Positives.
|
| 426 |
+
|
| 427 |
+
Useful for class imbalance where lesion is much smaller than background.
|
| 428 |
+
"""
|
| 429 |
+
def __init__(self, alpha=0.7, beta=0.3, smooth=1e-7):
|
| 430 |
+
"""
|
| 431 |
+
Args:
|
| 432 |
+
alpha: Weight for False Negatives (higher = penalize FN more)
|
| 433 |
+
beta: Weight for False Positives
|
| 434 |
+
smooth: Smoothing factor to avoid division by zero
|
| 435 |
+
"""
|
| 436 |
+
super(TverskyLoss, self).__init__()
|
| 437 |
+
self.alpha = alpha
|
| 438 |
+
self.beta = beta
|
| 439 |
+
self.smooth = smooth
|
| 440 |
+
|
| 441 |
+
def forward(self, pred, target):
|
| 442 |
+
# Flatten predictions and targets
|
| 443 |
+
pred = pred.view(-1)
|
| 444 |
+
target = target.view(-1)
|
| 445 |
+
|
| 446 |
+
# True Positives, False Negatives, False Positives
|
| 447 |
+
TP = (pred * target).sum()
|
| 448 |
+
FN = (target * (1 - pred)).sum()
|
| 449 |
+
FP = ((1 - target) * pred).sum()
|
| 450 |
+
|
| 451 |
+
tversky = (TP + self.smooth) / (TP + self.alpha * FN + self.beta * FP + self.smooth)
|
| 452 |
+
|
| 453 |
+
return 1 - tversky
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
# Dice Coefficient metric
|
| 457 |
+
def dice_coefficient(pred, target, threshold=0.5, smooth=1e-7):
|
| 458 |
+
pred = (pred > threshold).float()
|
| 459 |
+
target = (target > threshold).float()
|
| 460 |
+
intersection = (pred * target).sum()
|
| 461 |
+
dice = (2. * intersection + smooth) / (pred.sum() + target.sum() + smooth)
|
| 462 |
+
return dice.item()
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
# Dice Loss
|
| 466 |
+
class DiceLoss(nn.Module):
|
| 467 |
+
def __init__(self, smooth=1e-7):
|
| 468 |
+
super(DiceLoss, self).__init__()
|
| 469 |
+
self.smooth = smooth
|
| 470 |
+
|
| 471 |
+
def forward(self, pred, target):
|
| 472 |
+
intersection = (pred * target).sum(dim=(2, 3))
|
| 473 |
+
dice = (2. * intersection + self.smooth) / (pred.sum(dim=(2, 3)) + target.sum(dim=(2, 3)) + self.smooth)
|
| 474 |
+
return 1 - dice.mean()
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
# Combined Loss with Tversky + BCE + Dice
|
| 478 |
+
class CombinedLoss(nn.Module):
|
| 479 |
+
"""
|
| 480 |
+
Enhanced loss function combining:
|
| 481 |
+
- BCE Loss (pixel-wise accuracy)
|
| 482 |
+
- Dice Loss (overlap quality)
|
| 483 |
+
- Tversky Loss (class imbalance handling)
|
| 484 |
+
|
| 485 |
+
Weights: 0.3 BCE / 0.4 Dice / 0.3 Tversky (prioritizing Dice for overlap quality)
|
| 486 |
+
"""
|
| 487 |
+
def __init__(self, bce_weight=0.3, dice_weight=0.4, tversky_weight=0.3,
|
| 488 |
+
tversky_alpha=0.7, tversky_beta=0.3):
|
| 489 |
+
super(CombinedLoss, self).__init__()
|
| 490 |
+
self.bce = nn.BCELoss()
|
| 491 |
+
self.dice = DiceLoss()
|
| 492 |
+
self.tversky = TverskyLoss(alpha=tversky_alpha, beta=tversky_beta)
|
| 493 |
+
self.bce_weight = bce_weight
|
| 494 |
+
self.dice_weight = dice_weight
|
| 495 |
+
self.tversky_weight = tversky_weight
|
| 496 |
+
|
| 497 |
+
def forward(self, pred, target):
|
| 498 |
+
bce_loss = self.bce(pred, target)
|
| 499 |
+
dice_loss = self.dice(pred, target)
|
| 500 |
+
tversky_loss = self.tversky(pred, target)
|
| 501 |
+
return (self.bce_weight * bce_loss +
|
| 502 |
+
self.dice_weight * dice_loss +
|
| 503 |
+
self.tversky_weight * tversky_loss)
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
class DeepSupervisionLoss(nn.Module):
|
| 507 |
+
"""
|
| 508 |
+
Loss function for deep supervision with multi-scale outputs.
|
| 509 |
+
Calculates loss at different decoder scales and averages them.
|
| 510 |
+
"""
|
| 511 |
+
def __init__(self, base_criterion, weights=None):
|
| 512 |
+
super(DeepSupervisionLoss, self).__init__()
|
| 513 |
+
self.base_criterion = base_criterion
|
| 514 |
+
# Weights for each deep supervision output (shallower = less weight)
|
| 515 |
+
self.weights = weights or [0.5, 0.3, 0.2, 0.1]
|
| 516 |
+
|
| 517 |
+
def forward(self, main_output, ds_outputs, target):
|
| 518 |
+
# Main output loss (full weight)
|
| 519 |
+
loss = self.base_criterion(main_output, target)
|
| 520 |
+
|
| 521 |
+
# Deep supervision losses at different scales
|
| 522 |
+
for i, ds_out in enumerate(ds_outputs):
|
| 523 |
+
# Upsample deep supervision output to target size
|
| 524 |
+
ds_upsampled = nn.functional.interpolate(
|
| 525 |
+
ds_out, size=target.shape[2:], mode='bilinear', align_corners=True
|
| 526 |
+
)
|
| 527 |
+
loss += self.weights[i] * self.base_criterion(ds_upsampled, target)
|
| 528 |
+
|
| 529 |
+
return loss
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
# =============================================
|
| 533 |
+
# TRAINING AND VALIDATION FUNCTIONS
|
| 534 |
+
# =============================================
|
| 535 |
+
|
| 536 |
+
def train_epoch(model, dataloader, criterion, optimizer, device, deep_supervision=True):
|
| 537 |
+
"""Training function with deep supervision support"""
|
| 538 |
+
model.train()
|
| 539 |
+
running_loss = 0.0
|
| 540 |
+
running_acc = 0.0
|
| 541 |
+
running_dice = 0.0
|
| 542 |
+
|
| 543 |
+
pbar = tqdm(dataloader, desc='Training')
|
| 544 |
+
for images, masks in pbar:
|
| 545 |
+
images = images.to(device)
|
| 546 |
+
masks = masks.to(device)
|
| 547 |
+
|
| 548 |
+
optimizer.zero_grad()
|
| 549 |
+
|
| 550 |
+
if deep_supervision:
|
| 551 |
+
outputs, ds_outputs = model(images)
|
| 552 |
+
loss = criterion(outputs, ds_outputs, masks)
|
| 553 |
+
else:
|
| 554 |
+
outputs = model(images)
|
| 555 |
+
loss = criterion.base_criterion(outputs, masks)
|
| 556 |
+
|
| 557 |
+
loss.backward()
|
| 558 |
+
optimizer.step()
|
| 559 |
+
|
| 560 |
+
# Metrics (use main output)
|
| 561 |
+
running_loss += loss.item()
|
| 562 |
+
acc = ((outputs > 0.5).float() == masks).float().mean()
|
| 563 |
+
running_acc += acc.item()
|
| 564 |
+
running_dice += dice_coefficient(outputs, masks)
|
| 565 |
+
|
| 566 |
+
pbar.set_postfix({'loss': loss.item(), 'acc': acc.item()})
|
| 567 |
+
|
| 568 |
+
epoch_loss = running_loss / len(dataloader)
|
| 569 |
+
epoch_acc = running_acc / len(dataloader)
|
| 570 |
+
epoch_dice = running_dice / len(dataloader)
|
| 571 |
+
|
| 572 |
+
return epoch_loss, epoch_acc, epoch_dice
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
def validate_epoch(model, dataloader, criterion, device):
|
| 576 |
+
"""Validation function (no deep supervision during eval)"""
|
| 577 |
+
model.eval()
|
| 578 |
+
running_loss = 0.0
|
| 579 |
+
running_acc = 0.0
|
| 580 |
+
running_dice = 0.0
|
| 581 |
+
|
| 582 |
+
# Get base criterion for validation (no deep supervision)
|
| 583 |
+
if hasattr(criterion, 'base_criterion'):
|
| 584 |
+
val_criterion = criterion.base_criterion
|
| 585 |
+
else:
|
| 586 |
+
val_criterion = criterion
|
| 587 |
+
|
| 588 |
+
with torch.no_grad():
|
| 589 |
+
pbar = tqdm(dataloader, desc='Validation')
|
| 590 |
+
for images, masks in pbar:
|
| 591 |
+
images = images.to(device)
|
| 592 |
+
masks = masks.to(device)
|
| 593 |
+
|
| 594 |
+
outputs = model(images) # No deep supervision during eval
|
| 595 |
+
loss = val_criterion(outputs, masks)
|
| 596 |
+
|
| 597 |
+
running_loss += loss.item()
|
| 598 |
+
acc = ((outputs > 0.5).float() == masks).float().mean()
|
| 599 |
+
running_acc += acc.item()
|
| 600 |
+
running_dice += dice_coefficient(outputs, masks)
|
| 601 |
+
|
| 602 |
+
pbar.set_postfix({'loss': loss.item(), 'acc': acc.item()})
|
| 603 |
+
|
| 604 |
+
epoch_loss = running_loss / len(dataloader)
|
| 605 |
+
epoch_acc = running_acc / len(dataloader)
|
| 606 |
+
epoch_dice = running_dice / len(dataloader)
|
| 607 |
+
|
| 608 |
+
return epoch_loss, epoch_acc, epoch_dice
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
# =============================================
|
| 612 |
+
# MAIN EXECUTION
|
| 613 |
+
# =============================================
|
| 614 |
+
|
| 615 |
+
# Load datasets
|
| 616 |
+
print("Loading training data...")
|
| 617 |
+
train_img_paths, train_mask_paths = get_matching_files(TRAIN_IMG_PATH, TRAIN_MASK_PATH)
|
| 618 |
+
print(f"Found {len(train_img_paths)} training pairs")
|
| 619 |
+
|
| 620 |
+
print("\nLoading validation data...")
|
| 621 |
+
val_img_paths, val_mask_paths = get_matching_files(VAL_IMG_PATH, VAL_MASK_PATH)
|
| 622 |
+
print(f"Found {len(val_img_paths)} validation pairs")
|
| 623 |
+
|
| 624 |
+
print("\nLoading test data...")
|
| 625 |
+
test_img_paths, test_mask_paths = get_matching_files(TEST_IMG_PATH, TEST_MASK_PATH)
|
| 626 |
+
print(f"Found {len(test_img_paths)} test pairs")
|
| 627 |
+
|
| 628 |
+
# Create datasets with augmentation transforms
|
| 629 |
+
train_dataset = ISICDataset(train_img_paths, train_mask_paths, transform=get_train_transforms())
|
| 630 |
+
val_dataset = ISICDataset(val_img_paths, val_mask_paths, transform=get_val_transforms())
|
| 631 |
+
test_dataset = ISICDataset(test_img_paths, test_mask_paths, transform=get_val_transforms())
|
| 632 |
+
|
| 633 |
+
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)
|
| 634 |
+
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
|
| 635 |
+
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
|
| 636 |
+
|
| 637 |
+
# Build model with Attention U-Net and Deep Supervision
|
| 638 |
+
print("\nBuilding Attention U-Net model with Deep Supervision...")
|
| 639 |
+
model = AttentionUNetMobileNet(deep_supervision=True).to(device)
|
| 640 |
+
|
| 641 |
+
# Freeze encoder for first N epochs
|
| 642 |
+
model.freeze_encoder(num_layers=7)
|
| 643 |
+
|
| 644 |
+
# Loss function with improved weighting (0.3 BCE / 0.4 Dice / 0.3 Tversky)
|
| 645 |
+
base_criterion = CombinedLoss(
|
| 646 |
+
bce_weight=0.3,
|
| 647 |
+
dice_weight=0.4,
|
| 648 |
+
tversky_weight=0.3,
|
| 649 |
+
tversky_alpha=0.7, # Higher penalty for False Negatives
|
| 650 |
+
tversky_beta=0.3 # Lower penalty for False Positives
|
| 651 |
+
)
|
| 652 |
+
criterion = DeepSupervisionLoss(base_criterion)
|
| 653 |
+
|
| 654 |
+
# Optimizer
|
| 655 |
+
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
|
| 656 |
+
|
| 657 |
+
# Learning Rate Scheduler (ReduceLROnPlateau)
|
| 658 |
+
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
|
| 659 |
+
optimizer,
|
| 660 |
+
mode='min', # Reduce LR when val_loss stops decreasing
|
| 661 |
+
patience=3, # Wait 3 epochs before reducing
|
| 662 |
+
factor=0.5, # Reduce LR by half
|
| 663 |
+
min_lr=1e-7 # Minimum LR
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
total_params = sum(p.numel() for p in model.parameters())
|
| 667 |
+
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 668 |
+
print(f"Model built. Total parameters: {total_params:,}")
|
| 669 |
+
print(f"Trainable parameters: {trainable_params:,}")
|
| 670 |
+
|
| 671 |
+
# Training loop with early stopping, LR scheduling, and encoder unfreezing
|
| 672 |
+
print("\nStarting training...")
|
| 673 |
+
print(f"Encoder will be unfrozen after epoch {ENCODER_FREEZE_EPOCHS}")
|
| 674 |
+
history = {
|
| 675 |
+
'train_loss': [], 'train_acc': [], 'train_dice': [],
|
| 676 |
+
'val_loss': [], 'val_acc': [], 'val_dice': [],
|
| 677 |
+
'learning_rate': []
|
| 678 |
+
}
|
| 679 |
+
|
| 680 |
+
best_val_loss = float('inf')
|
| 681 |
+
patience = 7 # Increased patience due to LR scheduling
|
| 682 |
+
patience_counter = 0
|
| 683 |
+
|
| 684 |
+
for epoch in range(EPOCHS):
|
| 685 |
+
print(f"\nEpoch {epoch+1}/{EPOCHS}")
|
| 686 |
+
|
| 687 |
+
# Unfreeze encoder after N epochs (for fine-tuning)
|
| 688 |
+
if epoch == ENCODER_FREEZE_EPOCHS:
|
| 689 |
+
model.unfreeze_encoder()
|
| 690 |
+
print(f"Starting fine-tuning of encoder layers")
|
| 691 |
+
|
| 692 |
+
# Log current learning rate
|
| 693 |
+
current_lr = optimizer.param_groups[0]['lr']
|
| 694 |
+
history['learning_rate'].append(current_lr)
|
| 695 |
+
print(f"Current LR: {current_lr:.2e}")
|
| 696 |
+
|
| 697 |
+
# Train
|
| 698 |
+
train_loss, train_acc, train_dice = train_epoch(
|
| 699 |
+
model, train_loader, criterion, optimizer, device, deep_supervision=True
|
| 700 |
+
)
|
| 701 |
+
history['train_loss'].append(train_loss)
|
| 702 |
+
history['train_acc'].append(train_acc)
|
| 703 |
+
history['train_dice'].append(train_dice)
|
| 704 |
+
|
| 705 |
+
# Validate
|
| 706 |
+
val_loss, val_acc, val_dice = validate_epoch(model, val_loader, criterion, device)
|
| 707 |
+
history['val_loss'].append(val_loss)
|
| 708 |
+
history['val_acc'].append(val_acc)
|
| 709 |
+
history['val_dice'].append(val_dice)
|
| 710 |
+
|
| 711 |
+
print(f"Train Loss: {train_loss:.4f}, Acc: {train_acc:.4f}, Dice: {train_dice:.4f}")
|
| 712 |
+
print(f"Val Loss: {val_loss:.4f}, Acc: {val_acc:.4f}, Dice: {val_dice:.4f}")
|
| 713 |
+
|
| 714 |
+
# Step the learning rate scheduler based on validation loss
|
| 715 |
+
scheduler.step(val_loss)
|
| 716 |
+
|
| 717 |
+
# Early stopping
|
| 718 |
+
if val_loss < best_val_loss:
|
| 719 |
+
best_val_loss = val_loss
|
| 720 |
+
patience_counter = 0
|
| 721 |
+
torch.save(model.state_dict(), 'best_model.pth')
|
| 722 |
+
print("✓ Model saved!")
|
| 723 |
+
else:
|
| 724 |
+
patience_counter += 1
|
| 725 |
+
if patience_counter >= patience:
|
| 726 |
+
print(f"\nEarly stopping triggered after {epoch+1} epochs")
|
| 727 |
+
break
|
| 728 |
+
|
| 729 |
+
# Load best model
|
| 730 |
+
model.load_state_dict(torch.load('best_model.pth'))
|
| 731 |
+
|
| 732 |
+
# Plot training history
|
| 733 |
+
fig, axes = plt.subplots(2, 2, figsize=(14, 10))
|
| 734 |
+
|
| 735 |
+
# Loss
|
| 736 |
+
axes[0, 0].plot(history['train_loss'], label='Train Loss')
|
| 737 |
+
axes[0, 0].plot(history['val_loss'], label='Val Loss')
|
| 738 |
+
axes[0, 0].set_title('Model Loss (BCE + Dice + Tversky)', fontsize=14, fontweight='bold')
|
| 739 |
+
axes[0, 0].set_xlabel('Epoch')
|
| 740 |
+
axes[0, 0].set_ylabel('Loss')
|
| 741 |
+
axes[0, 0].legend()
|
| 742 |
+
axes[0, 0].grid(True, alpha=0.3)
|
| 743 |
+
|
| 744 |
+
# Accuracy
|
| 745 |
+
axes[0, 1].plot(history['train_acc'], label='Train Accuracy')
|
| 746 |
+
axes[0, 1].plot(history['val_acc'], label='Val Accuracy')
|
| 747 |
+
axes[0, 1].set_title('Model Accuracy', fontsize=14, fontweight='bold')
|
| 748 |
+
axes[0, 1].set_xlabel('Epoch')
|
| 749 |
+
axes[0, 1].set_ylabel('Accuracy')
|
| 750 |
+
axes[0, 1].legend()
|
| 751 |
+
axes[0, 1].grid(True, alpha=0.3)
|
| 752 |
+
|
| 753 |
+
# Dice Coefficient
|
| 754 |
+
axes[1, 0].plot(history['train_dice'], label='Train Dice')
|
| 755 |
+
axes[1, 0].plot(history['val_dice'], label='Val Dice')
|
| 756 |
+
axes[1, 0].set_title('Dice Coefficient', fontsize=14, fontweight='bold')
|
| 757 |
+
axes[1, 0].set_xlabel('Epoch')
|
| 758 |
+
axes[1, 0].set_ylabel('Dice')
|
| 759 |
+
axes[1, 0].legend()
|
| 760 |
+
axes[1, 0].grid(True, alpha=0.3)
|
| 761 |
+
|
| 762 |
+
# Learning Rate
|
| 763 |
+
axes[1, 1].plot(history['learning_rate'], label='Learning Rate', color='orange')
|
| 764 |
+
axes[1, 1].set_title('Learning Rate Schedule', fontsize=14, fontweight='bold')
|
| 765 |
+
axes[1, 1].set_xlabel('Epoch')
|
| 766 |
+
axes[1, 1].set_ylabel('Learning Rate')
|
| 767 |
+
axes[1, 1].set_yscale('log')
|
| 768 |
+
axes[1, 1].legend()
|
| 769 |
+
axes[1, 1].grid(True, alpha=0.3)
|
| 770 |
+
|
| 771 |
+
plt.tight_layout()
|
| 772 |
+
plt.savefig('training_history.png', dpi=150, bbox_inches='tight')
|
| 773 |
+
plt.show()
|
| 774 |
+
|
| 775 |
+
# Evaluate on test set
|
| 776 |
+
print("\nEvaluating on test set...")
|
| 777 |
+
test_loss, test_acc, test_dice = validate_epoch(model, test_loader, criterion, device)
|
| 778 |
+
print(f"Test Loss: {test_loss:.4f}")
|
| 779 |
+
print(f"Test Accuracy: {test_acc:.4f}")
|
| 780 |
+
print(f"Test Dice Coefficient: {test_dice:.4f}")
|
| 781 |
+
|
| 782 |
+
# Visualize 3 test predictions
|
| 783 |
+
print("\nGenerating predictions...")
|
| 784 |
+
model.eval()
|
| 785 |
+
test_samples = []
|
| 786 |
+
for i in range(3):
|
| 787 |
+
img, mask = test_dataset[i]
|
| 788 |
+
test_samples.append((img, mask))
|
| 789 |
+
|
| 790 |
+
fig, axes = plt.subplots(3, 3, figsize=(12, 12))
|
| 791 |
+
|
| 792 |
+
# Denormalization for visualization
|
| 793 |
+
mean = torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)
|
| 794 |
+
std = torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)
|
| 795 |
+
|
| 796 |
+
with torch.no_grad():
|
| 797 |
+
for i, (img, mask) in enumerate(test_samples):
|
| 798 |
+
img_input = img.unsqueeze(0).to(device)
|
| 799 |
+
pred = model(img_input).cpu().squeeze()
|
| 800 |
+
|
| 801 |
+
# Denormalize image for display
|
| 802 |
+
img_denorm = img * std + mean
|
| 803 |
+
img_denorm = torch.clamp(img_denorm, 0, 1)
|
| 804 |
+
img_display = img_denorm.permute(1, 2, 0).numpy()
|
| 805 |
+
mask_display = mask.squeeze().numpy()
|
| 806 |
+
pred_display = pred.numpy()
|
| 807 |
+
|
| 808 |
+
# Original image
|
| 809 |
+
axes[i, 0].imshow(img_display)
|
| 810 |
+
axes[i, 0].set_title('Original Image', fontweight='bold')
|
| 811 |
+
axes[i, 0].axis('off')
|
| 812 |
+
|
| 813 |
+
# Ground truth mask
|
| 814 |
+
axes[i, 1].imshow(mask_display, cmap='gray')
|
| 815 |
+
axes[i, 1].set_title('Ground Truth', fontweight='bold')
|
| 816 |
+
axes[i, 1].axis('off')
|
| 817 |
+
|
| 818 |
+
# Predicted mask
|
| 819 |
+
axes[i, 2].imshow(pred_display, cmap='gray')
|
| 820 |
+
axes[i, 2].set_title('Prediction', fontweight='bold')
|
| 821 |
+
axes[i, 2].axis('off')
|
| 822 |
+
|
| 823 |
+
plt.tight_layout()
|
| 824 |
+
plt.savefig('test_predictions.png', dpi=150, bbox_inches='tight')
|
| 825 |
+
plt.show()
|
| 826 |
+
|
| 827 |
+
print("\n✓ Training completed successfully!")
|
| 828 |
+
print(f"Best validation loss: {best_val_loss:.4f}")
|
| 829 |
+
print("\n=== ENHANCEMENTS APPLIED ===")
|
| 830 |
+
print("1. ✓ Advanced Data Augmentation (Albumentations)")
|
| 831 |
+
print(" - HorizontalFlip, VerticalFlip, RandomRotate90")
|
| 832 |
+
print(" - ShiftScaleRotate, ElasticTransform")
|
| 833 |
+
print(" - ColorJitter, RandomBrightnessContrast, HueSaturationValue")
|
| 834 |
+
print(" - GaussianBlur, GaussNoise")
|
| 835 |
+
print(" - CoarseDropout (Cutout)")
|
| 836 |
+
print("2. ✓ Learning Rate Scheduler (ReduceLROnPlateau)")
|
| 837 |
+
print(" - Patience: 3, Factor: 0.5, Min LR: 1e-7")
|
| 838 |
+
print("3. ✓ Architecture Refinements")
|
| 839 |
+
print(" - Attention Gates on skip connections")
|
| 840 |
+
print(" - Deep Supervision at multiple scales")
|
| 841 |
+
print(f" - Encoder freezing for first {ENCODER_FREEZE_EPOCHS} epochs")
|
| 842 |
+
print("4. ✓ Loss Function Tuning")
|
| 843 |
+
print(" - Combined: 0.3 BCE + 0.4 Dice + 0.3 Tversky")
|
| 844 |
+
print(" - Tversky: alpha=0.7 (penalize FN), beta=0.3 (FP)")
|
server/main.py
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Skin Cancer Lesion Segmentation - FastAPI Backend
|
| 3 |
+
================================================
|
| 4 |
+
This API provides endpoints for skin lesion segmentation using an
|
| 5 |
+
Attention U-Net model with MobileNetV2 encoder.
|
| 6 |
+
|
| 7 |
+
Features:
|
| 8 |
+
- Real-time segmentation of dermoscopic images
|
| 9 |
+
- Confidence score calculation
|
| 10 |
+
- Lesion area analysis
|
| 11 |
+
- Support for multiple image formats (JPEG, PNG, WebP)
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
import io
|
| 16 |
+
import base64
|
| 17 |
+
import numpy as np
|
| 18 |
+
import torch
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
from torchvision.models import mobilenet_v2
|
| 21 |
+
import cv2
|
| 22 |
+
from PIL import Image
|
| 23 |
+
from fastapi import FastAPI, File, UploadFile, HTTPException
|
| 24 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 25 |
+
from fastapi.staticfiles import StaticFiles
|
| 26 |
+
from fastapi.responses import HTMLResponse, JSONResponse
|
| 27 |
+
from pydantic import BaseModel
|
| 28 |
+
from typing import Optional
|
| 29 |
+
import albumentations as A
|
| 30 |
+
from albumentations.pytorch import ToTensorV2
|
| 31 |
+
|
| 32 |
+
# =============================================
|
| 33 |
+
# CONFIGURATION
|
| 34 |
+
# =============================================
|
| 35 |
+
MODEL_PATH = os.path.join(os.path.dirname(__file__), "..", "models", "model.pth")
|
| 36 |
+
IMG_SIZE = 256
|
| 37 |
+
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 38 |
+
|
| 39 |
+
# =============================================
|
| 40 |
+
# MODEL ARCHITECTURE (must match training)
|
| 41 |
+
# =============================================
|
| 42 |
+
|
| 43 |
+
class AttentionGate(nn.Module):
|
| 44 |
+
"""Attention U-Net gate for skip connections."""
|
| 45 |
+
def __init__(self, F_g, F_l, F_int):
|
| 46 |
+
super(AttentionGate, self).__init__()
|
| 47 |
+
self.W_g = nn.Sequential(
|
| 48 |
+
nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),
|
| 49 |
+
nn.BatchNorm2d(F_int)
|
| 50 |
+
)
|
| 51 |
+
self.W_x = nn.Sequential(
|
| 52 |
+
nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),
|
| 53 |
+
nn.BatchNorm2d(F_int)
|
| 54 |
+
)
|
| 55 |
+
self.psi = nn.Sequential(
|
| 56 |
+
nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),
|
| 57 |
+
nn.BatchNorm2d(1),
|
| 58 |
+
nn.Sigmoid()
|
| 59 |
+
)
|
| 60 |
+
self.relu = nn.ReLU(inplace=True)
|
| 61 |
+
|
| 62 |
+
def forward(self, g, x):
|
| 63 |
+
g1 = self.W_g(g)
|
| 64 |
+
x1 = self.W_x(x)
|
| 65 |
+
if g1.shape[2:] != x1.shape[2:]:
|
| 66 |
+
g1 = nn.functional.interpolate(g1, size=x1.shape[2:], mode='bilinear', align_corners=True)
|
| 67 |
+
psi = self.relu(g1 + x1)
|
| 68 |
+
psi = self.psi(psi)
|
| 69 |
+
return x * psi
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class BoundaryRefinementHead(nn.Module):
|
| 73 |
+
"""Refines segmentation boundaries."""
|
| 74 |
+
def __init__(self, in_channels, filters=64):
|
| 75 |
+
super(BoundaryRefinementHead, self).__init__()
|
| 76 |
+
self.conv1 = nn.Conv2d(in_channels, filters, kernel_size=3, padding=1)
|
| 77 |
+
self.relu1 = nn.ReLU(inplace=True)
|
| 78 |
+
self.conv2 = nn.Conv2d(filters, filters, kernel_size=3, padding=1)
|
| 79 |
+
self.relu2 = nn.ReLU(inplace=True)
|
| 80 |
+
self.conv3 = nn.Conv2d(filters, 1, kernel_size=1)
|
| 81 |
+
self.sigmoid = nn.Sigmoid()
|
| 82 |
+
|
| 83 |
+
def forward(self, x):
|
| 84 |
+
x = self.relu1(self.conv1(x))
|
| 85 |
+
x = self.relu2(self.conv2(x))
|
| 86 |
+
x = self.sigmoid(self.conv3(x))
|
| 87 |
+
return x
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class AttentionUNetMobileNet(nn.Module):
|
| 91 |
+
"""
|
| 92 |
+
Attention U-Net with MobileNetV2 encoder for skin lesion segmentation.
|
| 93 |
+
"""
|
| 94 |
+
def __init__(self, deep_supervision=False):
|
| 95 |
+
super(AttentionUNetMobileNet, self).__init__()
|
| 96 |
+
self.deep_supervision = deep_supervision
|
| 97 |
+
|
| 98 |
+
# Encoder (MobileNetV2 - pretrained)
|
| 99 |
+
mobilenet = mobilenet_v2(weights='DEFAULT')
|
| 100 |
+
self.encoder_features = mobilenet.features
|
| 101 |
+
|
| 102 |
+
# Attention Gates
|
| 103 |
+
self.att1 = AttentionGate(F_g=256, F_l=96, F_int=64)
|
| 104 |
+
self.att2 = AttentionGate(F_g=128, F_l=32, F_int=32)
|
| 105 |
+
self.att3 = AttentionGate(F_g=64, F_l=24, F_int=16)
|
| 106 |
+
self.att4 = AttentionGate(F_g=32, F_l=16, F_int=8)
|
| 107 |
+
|
| 108 |
+
# Decoder
|
| 109 |
+
self.up1 = nn.ConvTranspose2d(1280, 256, kernel_size=2, stride=2)
|
| 110 |
+
self.dec1_conv1 = nn.Conv2d(256 + 96, 256, kernel_size=3, padding=1)
|
| 111 |
+
self.dec1_bn1 = nn.BatchNorm2d(256)
|
| 112 |
+
self.dec1_relu1 = nn.ReLU(inplace=True)
|
| 113 |
+
self.dec1_conv2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
|
| 114 |
+
self.dec1_bn2 = nn.BatchNorm2d(256)
|
| 115 |
+
self.dec1_relu2 = nn.ReLU(inplace=True)
|
| 116 |
+
|
| 117 |
+
self.up2 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2)
|
| 118 |
+
self.dec2_conv1 = nn.Conv2d(128 + 32, 128, kernel_size=3, padding=1)
|
| 119 |
+
self.dec2_bn1 = nn.BatchNorm2d(128)
|
| 120 |
+
self.dec2_relu1 = nn.ReLU(inplace=True)
|
| 121 |
+
self.dec2_conv2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
|
| 122 |
+
self.dec2_bn2 = nn.BatchNorm2d(128)
|
| 123 |
+
self.dec2_relu2 = nn.ReLU(inplace=True)
|
| 124 |
+
|
| 125 |
+
self.up3 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2)
|
| 126 |
+
self.dec3_conv1 = nn.Conv2d(64 + 24, 64, kernel_size=3, padding=1)
|
| 127 |
+
self.dec3_bn1 = nn.BatchNorm2d(64)
|
| 128 |
+
self.dec3_relu1 = nn.ReLU(inplace=True)
|
| 129 |
+
self.dec3_conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
|
| 130 |
+
self.dec3_bn2 = nn.BatchNorm2d(64)
|
| 131 |
+
self.dec3_relu2 = nn.ReLU(inplace=True)
|
| 132 |
+
|
| 133 |
+
self.up4 = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2)
|
| 134 |
+
self.dec4_conv1 = nn.Conv2d(32 + 16, 32, kernel_size=3, padding=1)
|
| 135 |
+
self.dec4_bn1 = nn.BatchNorm2d(32)
|
| 136 |
+
self.dec4_relu1 = nn.ReLU(inplace=True)
|
| 137 |
+
self.dec4_conv2 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
|
| 138 |
+
self.dec4_bn2 = nn.BatchNorm2d(32)
|
| 139 |
+
self.dec4_relu2 = nn.ReLU(inplace=True)
|
| 140 |
+
|
| 141 |
+
self.up5 = nn.ConvTranspose2d(32, 16, kernel_size=2, stride=2)
|
| 142 |
+
self.dec5_conv1 = nn.Conv2d(16, 16, kernel_size=3, padding=1)
|
| 143 |
+
self.dec5_bn1 = nn.BatchNorm2d(16)
|
| 144 |
+
self.dec5_relu1 = nn.ReLU(inplace=True)
|
| 145 |
+
self.dec5_conv2 = nn.Conv2d(16, 16, kernel_size=3, padding=1)
|
| 146 |
+
self.dec5_bn2 = nn.BatchNorm2d(16)
|
| 147 |
+
self.dec5_relu2 = nn.ReLU(inplace=True)
|
| 148 |
+
|
| 149 |
+
# Main output
|
| 150 |
+
self.seg_output = nn.Sequential(
|
| 151 |
+
nn.Conv2d(16, 1, kernel_size=1),
|
| 152 |
+
nn.Sigmoid()
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
# Boundary head
|
| 156 |
+
self.boundary_head = BoundaryRefinementHead(16, filters=64)
|
| 157 |
+
|
| 158 |
+
# Deep supervision outputs (only used during training)
|
| 159 |
+
if self.deep_supervision:
|
| 160 |
+
self.ds_out1 = nn.Sequential(nn.Conv2d(256, 1, kernel_size=1), nn.Sigmoid())
|
| 161 |
+
self.ds_out2 = nn.Sequential(nn.Conv2d(128, 1, kernel_size=1), nn.Sigmoid())
|
| 162 |
+
self.ds_out3 = nn.Sequential(nn.Conv2d(64, 1, kernel_size=1), nn.Sigmoid())
|
| 163 |
+
self.ds_out4 = nn.Sequential(nn.Conv2d(32, 1, kernel_size=1), nn.Sigmoid())
|
| 164 |
+
|
| 165 |
+
def forward(self, x):
|
| 166 |
+
# Encoder
|
| 167 |
+
skips = []
|
| 168 |
+
for idx, layer in enumerate(self.encoder_features):
|
| 169 |
+
x = layer(x)
|
| 170 |
+
if idx in [1, 3, 6, 13, 18]:
|
| 171 |
+
skips.append(x)
|
| 172 |
+
|
| 173 |
+
# Decoder Stage 1
|
| 174 |
+
d1 = self.up1(x)
|
| 175 |
+
skip1_att = self.att1(d1, skips[3])
|
| 176 |
+
d1 = torch.cat([d1, skip1_att], dim=1)
|
| 177 |
+
d1 = self.dec1_relu1(self.dec1_bn1(self.dec1_conv1(d1)))
|
| 178 |
+
d1 = self.dec1_relu2(self.dec1_bn2(self.dec1_conv2(d1)))
|
| 179 |
+
|
| 180 |
+
# Decoder Stage 2
|
| 181 |
+
d2 = self.up2(d1)
|
| 182 |
+
skip2_att = self.att2(d2, skips[2])
|
| 183 |
+
d2 = torch.cat([d2, skip2_att], dim=1)
|
| 184 |
+
d2 = self.dec2_relu1(self.dec2_bn1(self.dec2_conv1(d2)))
|
| 185 |
+
d2 = self.dec2_relu2(self.dec2_bn2(self.dec2_conv2(d2)))
|
| 186 |
+
|
| 187 |
+
# Decoder Stage 3
|
| 188 |
+
d3 = self.up3(d2)
|
| 189 |
+
skip3_att = self.att3(d3, skips[1])
|
| 190 |
+
d3 = torch.cat([d3, skip3_att], dim=1)
|
| 191 |
+
d3 = self.dec3_relu1(self.dec3_bn1(self.dec3_conv1(d3)))
|
| 192 |
+
d3 = self.dec3_relu2(self.dec3_bn2(self.dec3_conv2(d3)))
|
| 193 |
+
|
| 194 |
+
# Decoder Stage 4
|
| 195 |
+
d4 = self.up4(d3)
|
| 196 |
+
skip4_att = self.att4(d4, skips[0])
|
| 197 |
+
d4 = torch.cat([d4, skip4_att], dim=1)
|
| 198 |
+
d4 = self.dec4_relu1(self.dec4_bn1(self.dec4_conv1(d4)))
|
| 199 |
+
d4 = self.dec4_relu2(self.dec4_bn2(self.dec4_conv2(d4)))
|
| 200 |
+
|
| 201 |
+
# Final upsampling
|
| 202 |
+
d5 = self.up5(d4)
|
| 203 |
+
d5 = self.dec5_relu1(self.dec5_bn1(self.dec5_conv1(d5)))
|
| 204 |
+
d5 = self.dec5_relu2(self.dec5_bn2(self.dec5_conv2(d5)))
|
| 205 |
+
|
| 206 |
+
# Main output
|
| 207 |
+
seg_output = self.seg_output(d5)
|
| 208 |
+
boundary_output = self.boundary_head(d5)
|
| 209 |
+
final_output = (seg_output + boundary_output) / 2
|
| 210 |
+
|
| 211 |
+
return final_output
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
# =============================================
|
| 215 |
+
# PREPROCESSING
|
| 216 |
+
# =============================================
|
| 217 |
+
|
| 218 |
+
def get_inference_transforms(img_size=IMG_SIZE):
|
| 219 |
+
"""Transform pipeline for inference."""
|
| 220 |
+
return A.Compose([
|
| 221 |
+
A.Resize(img_size, img_size),
|
| 222 |
+
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
|
| 223 |
+
ToTensorV2(),
|
| 224 |
+
])
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def preprocess_image(image: np.ndarray) -> torch.Tensor:
|
| 228 |
+
"""Preprocess image for model inference."""
|
| 229 |
+
# Convert to RGB if needed
|
| 230 |
+
if len(image.shape) == 2:
|
| 231 |
+
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
|
| 232 |
+
elif image.shape[2] == 4:
|
| 233 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
|
| 234 |
+
elif image.shape[2] == 3:
|
| 235 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 236 |
+
|
| 237 |
+
# Apply transforms
|
| 238 |
+
transform = get_inference_transforms()
|
| 239 |
+
transformed = transform(image=image)
|
| 240 |
+
img_tensor = transformed['image'].unsqueeze(0)
|
| 241 |
+
|
| 242 |
+
return img_tensor
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def postprocess_mask(mask: np.ndarray, original_size: tuple) -> np.ndarray:
|
| 246 |
+
"""Postprocess model output to original image size."""
|
| 247 |
+
# Resize to original size
|
| 248 |
+
mask_resized = cv2.resize(mask, (original_size[1], original_size[0]),
|
| 249 |
+
interpolation=cv2.INTER_LINEAR)
|
| 250 |
+
return mask_resized
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def mask_to_base64(mask: np.ndarray) -> str:
|
| 254 |
+
"""Convert numpy mask to base64 string."""
|
| 255 |
+
# Normalize to 0-255
|
| 256 |
+
mask_uint8 = (mask * 255).astype(np.uint8)
|
| 257 |
+
# Encode as PNG
|
| 258 |
+
_, buffer = cv2.imencode('.png', mask_uint8)
|
| 259 |
+
return base64.b64encode(buffer).decode('utf-8')
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def create_overlay(image: np.ndarray, mask: np.ndarray, alpha: float = 0.5) -> np.ndarray:
|
| 263 |
+
"""Create overlay of mask on original image."""
|
| 264 |
+
# Create colored mask (magenta for lesion)
|
| 265 |
+
colored_mask = np.zeros_like(image)
|
| 266 |
+
colored_mask[:, :, 0] = int(255 * 0.94) # R
|
| 267 |
+
colored_mask[:, :, 1] = int(255 * 0.38) # G
|
| 268 |
+
colored_mask[:, :, 2] = int(255 * 0.57) # B
|
| 269 |
+
|
| 270 |
+
# Apply mask
|
| 271 |
+
mask_3ch = np.stack([mask] * 3, axis=-1)
|
| 272 |
+
overlay = image * (1 - mask_3ch * alpha) + colored_mask * (mask_3ch * alpha)
|
| 273 |
+
|
| 274 |
+
return overlay.astype(np.uint8)
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def image_to_base64(image: np.ndarray) -> str:
|
| 278 |
+
"""Convert numpy image to base64 string."""
|
| 279 |
+
_, buffer = cv2.imencode('.png', cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
|
| 280 |
+
return base64.b64encode(buffer).decode('utf-8')
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
# =============================================
|
| 284 |
+
# FASTAPI APP
|
| 285 |
+
# =============================================
|
| 286 |
+
|
| 287 |
+
app = FastAPI(
|
| 288 |
+
title="Skin Lesion Segmentation API",
|
| 289 |
+
description="AI-powered skin cancer lesion segmentation using Attention U-Net",
|
| 290 |
+
version="1.0.0"
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
# CORS middleware
|
| 294 |
+
app.add_middleware(
|
| 295 |
+
CORSMiddleware,
|
| 296 |
+
allow_origins=["*"],
|
| 297 |
+
allow_credentials=True,
|
| 298 |
+
allow_methods=["*"],
|
| 299 |
+
allow_headers=["*"],
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
# Global model variable
|
| 303 |
+
model = None
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
@app.on_event("startup")
|
| 307 |
+
async def load_model():
|
| 308 |
+
"""Load the model on startup."""
|
| 309 |
+
global model
|
| 310 |
+
print(f"Loading model from {MODEL_PATH}...")
|
| 311 |
+
print(f"Using device: {DEVICE}")
|
| 312 |
+
|
| 313 |
+
try:
|
| 314 |
+
model = AttentionUNetMobileNet(deep_supervision=False).to(DEVICE)
|
| 315 |
+
|
| 316 |
+
if os.path.exists(MODEL_PATH):
|
| 317 |
+
state_dict = torch.load(MODEL_PATH, map_location=DEVICE)
|
| 318 |
+
model.load_state_dict(state_dict, strict=False)
|
| 319 |
+
print("✓ Model loaded successfully!")
|
| 320 |
+
else:
|
| 321 |
+
print(f"⚠ Model file not found at {MODEL_PATH}")
|
| 322 |
+
print(" The API will work but predictions will use untrained weights.")
|
| 323 |
+
|
| 324 |
+
model.eval()
|
| 325 |
+
except Exception as e:
|
| 326 |
+
print(f"✗ Error loading model: {e}")
|
| 327 |
+
model = AttentionUNetMobileNet(deep_supervision=False).to(DEVICE)
|
| 328 |
+
model.eval()
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
class SegmentationResponse(BaseModel):
|
| 332 |
+
"""Response model for segmentation endpoint."""
|
| 333 |
+
success: bool
|
| 334 |
+
mask_base64: Optional[str] = None
|
| 335 |
+
overlay_base64: Optional[str] = None
|
| 336 |
+
confidence: Optional[float] = None
|
| 337 |
+
lesion_area_percent: Optional[float] = None
|
| 338 |
+
message: Optional[str] = None
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
@app.get("/", response_class=HTMLResponse)
|
| 342 |
+
async def read_root():
|
| 343 |
+
"""Redirect to frontend."""
|
| 344 |
+
return """
|
| 345 |
+
<html>
|
| 346 |
+
<head>
|
| 347 |
+
<meta http-equiv="refresh" content="0; url=/static/index.html" />
|
| 348 |
+
</head>
|
| 349 |
+
<body>
|
| 350 |
+
<p>Redirecting to the application...</p>
|
| 351 |
+
</body>
|
| 352 |
+
</html>
|
| 353 |
+
"""
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
@app.get("/api/health")
|
| 357 |
+
async def health_check():
|
| 358 |
+
"""Health check endpoint."""
|
| 359 |
+
return {
|
| 360 |
+
"status": "healthy",
|
| 361 |
+
"model_loaded": model is not None,
|
| 362 |
+
"device": str(DEVICE)
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
@app.post("/api/segment", response_model=SegmentationResponse)
|
| 367 |
+
async def segment_image(file: UploadFile = File(...)):
|
| 368 |
+
"""
|
| 369 |
+
Segment a skin lesion from an uploaded dermoscopic image.
|
| 370 |
+
|
| 371 |
+
Args:
|
| 372 |
+
file: Image file (JPEG, PNG, WebP)
|
| 373 |
+
|
| 374 |
+
Returns:
|
| 375 |
+
SegmentationResponse with mask and overlay in base64
|
| 376 |
+
"""
|
| 377 |
+
if model is None:
|
| 378 |
+
raise HTTPException(status_code=503, detail="Model not loaded")
|
| 379 |
+
|
| 380 |
+
# Validate file type
|
| 381 |
+
allowed_types = ["image/jpeg", "image/png", "image/webp", "image/jpg"]
|
| 382 |
+
if file.content_type not in allowed_types:
|
| 383 |
+
return SegmentationResponse(
|
| 384 |
+
success=False,
|
| 385 |
+
message=f"Invalid file type. Allowed: {', '.join(allowed_types)}"
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
try:
|
| 389 |
+
# Read image
|
| 390 |
+
contents = await file.read()
|
| 391 |
+
nparr = np.frombuffer(contents, np.uint8)
|
| 392 |
+
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
| 393 |
+
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 394 |
+
original_size = image_rgb.shape[:2]
|
| 395 |
+
|
| 396 |
+
# Preprocess
|
| 397 |
+
img_tensor = preprocess_image(image_rgb).to(DEVICE)
|
| 398 |
+
|
| 399 |
+
# Inference
|
| 400 |
+
with torch.no_grad():
|
| 401 |
+
output = model(img_tensor)
|
| 402 |
+
mask = output.cpu().numpy().squeeze()
|
| 403 |
+
|
| 404 |
+
# Calculate metrics
|
| 405 |
+
confidence = float(np.mean(mask[mask > 0.5])) if np.any(mask > 0.5) else 0.0
|
| 406 |
+
lesion_area = float(np.mean(mask > 0.5) * 100)
|
| 407 |
+
|
| 408 |
+
# Threshold mask
|
| 409 |
+
mask_binary = (mask > 0.5).astype(np.float32)
|
| 410 |
+
|
| 411 |
+
# Resize to original size
|
| 412 |
+
mask_resized = postprocess_mask(mask_binary, original_size)
|
| 413 |
+
|
| 414 |
+
# Create overlay
|
| 415 |
+
overlay = create_overlay(image_rgb, mask_resized, alpha=0.4)
|
| 416 |
+
|
| 417 |
+
return SegmentationResponse(
|
| 418 |
+
success=True,
|
| 419 |
+
mask_base64=mask_to_base64(mask_resized),
|
| 420 |
+
overlay_base64=image_to_base64(overlay),
|
| 421 |
+
confidence=round(confidence * 100, 2),
|
| 422 |
+
lesion_area_percent=round(lesion_area, 2)
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
except Exception as e:
|
| 426 |
+
return SegmentationResponse(
|
| 427 |
+
success=False,
|
| 428 |
+
message=f"Error processing image: {str(e)}"
|
| 429 |
+
)
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
# Mount static files (frontend)
|
| 433 |
+
client_path = os.path.join(os.path.dirname(__file__), "..", "client")
|
| 434 |
+
if os.path.exists(client_path):
|
| 435 |
+
app.mount("/static", StaticFiles(directory=client_path), name="static")
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
if __name__ == "__main__":
|
| 439 |
+
import uvicorn
|
| 440 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
server/requirements.txt
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Skin Cancer Lesion Segmentation - Python Dependencies
|
| 2 |
+
# =====================================================
|
| 3 |
+
|
| 4 |
+
# Web Framework
|
| 5 |
+
fastapi>=0.104.0
|
| 6 |
+
uvicorn>=0.24.0
|
| 7 |
+
python-multipart>=0.0.6
|
| 8 |
+
|
| 9 |
+
# Deep Learning
|
| 10 |
+
torch>=2.0.0
|
| 11 |
+
torchvision>=0.15.0
|
| 12 |
+
|
| 13 |
+
# Image Processing
|
| 14 |
+
opencv-python>=4.8.0
|
| 15 |
+
Pillow>=10.0.0
|
| 16 |
+
albumentations>=1.3.0
|
| 17 |
+
|
| 18 |
+
# Data Processing
|
| 19 |
+
numpy>=1.24.0
|
| 20 |
+
|
| 21 |
+
# Type hints
|
| 22 |
+
pydantic>=2.0.0
|