all
Browse files- Acnes_model.pth +3 -0
- README.md +68 -7
- app.py +92 -0
- requirements.txt +9 -0
Acnes_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aa18e19b1c76c33a2edc8c5a820497fe9ff4e023be88cabae83d1ce9e289da47
|
| 3 |
+
size 97947414
|
README.md
CHANGED
|
@@ -1,14 +1,75 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.25.2
|
| 8 |
app_file: app.py
|
| 9 |
-
pinned:
|
| 10 |
license: apache-2.0
|
| 11 |
-
short_description:
|
| 12 |
---
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Acne Detection AI 🔥
|
| 3 |
+
emoji: 🔥
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: yellow
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.25.2
|
| 8 |
app_file: app.py
|
| 9 |
+
pinned: true
|
| 10 |
license: apache-2.0
|
| 11 |
+
short_description: Advanced AI for Acne Segmentation & Severity Classification
|
| 12 |
---
|
| 13 |
|
| 14 |
+
# 🧠 Acne Detection & Classification with Deep Learning
|
| 15 |
+
|
| 16 |
+
 <!-- You can upload a real image here -->
|
| 17 |
+
|
| 18 |
+
This application demonstrates a powerful **AI-driven pipeline** for **acne detection, segmentation**, and **severity classification**, combining:
|
| 19 |
+
|
| 20 |
+
- A **UNet-based CNN** for precise acne lesion segmentation (pixel-wise accuracy ≈ **96–98%**)
|
| 21 |
+
- A **transformer-based classifier** (fine-tuned Vision Transformer) for grading severity based on dermatological standards
|
| 22 |
+
|
| 23 |
+
Try it by uploading a face image.
|
| 24 |
+
It will return:
|
| 25 |
+
✅ a **visual overlay** showing detected acne regions
|
| 26 |
+
✅ a **severity label** (from clear skin to very severe acne)
|
| 27 |
+
|
| 28 |
+
---
|
| 29 |
+
|
| 30 |
+
## 🧪 Model Architecture
|
| 31 |
+
|
| 32 |
+
### 🔹 Segmentation Model
|
| 33 |
+
- Architecture: `UNet` with `ResNet34` backbone (from [smp](https://github.com/qubvel/segmentation_models.pytorch))
|
| 34 |
+
- Optimized for: Binary mask prediction of acne regions
|
| 35 |
+
- Trained on: Annotated dermatological datasets
|
| 36 |
+
- Accuracy: **Pixel Accuracy ≈ 98%**, **IoU ≈ 91%**
|
| 37 |
+
|
| 38 |
+
### 🔹 Classification Model
|
| 39 |
+
- Architecture: Vision Transformer (ViT)
|
| 40 |
+
- Source: [`imfarzanansari/skintelligent-acne`](https://huggingface.co/imfarzanansari/skintelligent-acne)
|
| 41 |
+
- Labels: From `Level -1 (Clear)` to `Level 4 (Very Severe Acne)`
|
| 42 |
+
- Input: Facial image
|
| 43 |
+
- Output: Severity level + confidence score
|
| 44 |
+
|
| 45 |
+
---
|
| 46 |
+
|
| 47 |
+
## 📈 Example Output
|
| 48 |
+
|
| 49 |
+
| Input Image | Segmentation Overlay | Acne Level |
|
| 50 |
+
|-------------|----------------------|------------|
|
| 51 |
+
|  |  | Level 2: Moderate Acne |
|
| 52 |
+
|
| 53 |
+
---
|
| 54 |
+
|
| 55 |
+
## 💡 Use Cases
|
| 56 |
+
- Dermatology research and screening
|
| 57 |
+
- Skincare and cosmetic product testing
|
| 58 |
+
- Automated health monitoring platforms
|
| 59 |
+
|
| 60 |
+
---
|
| 61 |
+
|
| 62 |
+
## 🧩 Tech Stack
|
| 63 |
+
- `PyTorch`, `Segmentation Models PyTorch`
|
| 64 |
+
- `Transformers` by Hugging Face
|
| 65 |
+
- `Albumentations` for fast preprocessing
|
| 66 |
+
- `OpenCV`, `Gradio` for live interface
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
## 🤖 Try it now!
|
| 72 |
+
|
| 73 |
+
Click below and upload a photo to get real-time predictions:
|
| 74 |
+
|
| 75 |
+
Check out the configuration reference at [https://huggingface.co/docs/hub/spaces-config-reference](https://huggingface.co/docs/hub/spaces-config-reference)
|
app.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import torch
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
import segmentation_models_pytorch as smp
|
| 6 |
+
from albumentations import Compose, Normalize, Resize
|
| 7 |
+
from albumentations.pytorch import ToTensorV2
|
| 8 |
+
from transformers import pipeline
|
| 9 |
+
|
| 10 |
+
# Constants
|
| 11 |
+
MODEL_PATH = "Acnes_model.pth"
|
| 12 |
+
MASK_OPACITY = 0.9
|
| 13 |
+
DEVICE = torch.device("cpu")
|
| 14 |
+
|
| 15 |
+
# ----------------- LOAD MODEL -----------------
|
| 16 |
+
model = torch.load(MODEL_PATH, map_location=DEVICE)
|
| 17 |
+
model.to(DEVICE)
|
| 18 |
+
model.eval()
|
| 19 |
+
|
| 20 |
+
# ----------------- Classification Model -----------------
|
| 21 |
+
classification_pipe = pipeline("image-classification", model="imfarzanansari/skintelligent-acne")
|
| 22 |
+
|
| 23 |
+
# ----------------- Preprocessing -----------------
|
| 24 |
+
def preprocess_image(image, img_size=(256, 256)):
|
| 25 |
+
transform = Compose([
|
| 26 |
+
Resize(*img_size),
|
| 27 |
+
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 28 |
+
ToTensorV2()
|
| 29 |
+
])
|
| 30 |
+
augmented = transform(image=image)
|
| 31 |
+
tensor_image = augmented["image"].unsqueeze(0)
|
| 32 |
+
return tensor_image, image
|
| 33 |
+
|
| 34 |
+
# ----------------- Inference -----------------
|
| 35 |
+
def predict_mask(model, image_tensor):
|
| 36 |
+
with torch.no_grad():
|
| 37 |
+
image_tensor = image_tensor.to(DEVICE)
|
| 38 |
+
output = model(image_tensor)
|
| 39 |
+
mask = torch.sigmoid(output)
|
| 40 |
+
return mask.squeeze().cpu().numpy()
|
| 41 |
+
|
| 42 |
+
# ----------------- Overlay -----------------
|
| 43 |
+
def overlay_mask(image, mask, color=(255, 0, 0), alpha=MASK_OPACITY):
|
| 44 |
+
mask_resized = cv2.resize(mask, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST)
|
| 45 |
+
overlay = image.copy()
|
| 46 |
+
mask_colored = np.zeros_like(image, dtype=np.uint8)
|
| 47 |
+
mask_colored[mask_resized > 0.2] = color
|
| 48 |
+
blended = cv2.addWeighted(overlay, 1, mask_colored, alpha, 0)
|
| 49 |
+
return blended
|
| 50 |
+
|
| 51 |
+
# ----------------- Severity Mapping -----------------
|
| 52 |
+
def map_classification_label_to_level(label):
|
| 53 |
+
levels = {
|
| 54 |
+
'level -1': "Level -1: Clear Skin",
|
| 55 |
+
'level 0': "Level 0: Occasional Spots",
|
| 56 |
+
'level 1': "Level 1: Mild Acne",
|
| 57 |
+
'level 2': "Level 2: Moderate Acne",
|
| 58 |
+
'level 3': "Level 3: Severe Acne",
|
| 59 |
+
'level 4': "Level 4: Very Severe Acne"
|
| 60 |
+
}
|
| 61 |
+
return levels.get(label, "Unknown")
|
| 62 |
+
|
| 63 |
+
# ----------------- Combined Prediction -----------------
|
| 64 |
+
def predict(image):
|
| 65 |
+
input_tensor, original_image = preprocess_image(image)
|
| 66 |
+
predicted_mask = predict_mask(model, input_tensor)
|
| 67 |
+
overlayed_image = overlay_mask(original_image, predicted_mask, color=(255, 0, 0), alpha=MASK_OPACITY)
|
| 68 |
+
|
| 69 |
+
# Save to disk for classification model (transformers pipeline accepts paths)
|
| 70 |
+
temp_path = "/tmp/temp_image.jpg"
|
| 71 |
+
cv2.imwrite(temp_path, cv2.cvtColor(original_image, cv2.COLOR_RGB2BGR))
|
| 72 |
+
|
| 73 |
+
classification_result = classification_pipe(temp_path)
|
| 74 |
+
predicted_label = max(classification_result, key=lambda x: x['score'])['label']
|
| 75 |
+
confidence = max(classification_result, key=lambda x: x['score'])['score']
|
| 76 |
+
severity = map_classification_label_to_level(predicted_label)
|
| 77 |
+
|
| 78 |
+
return overlayed_image, f"{severity}\nConfidence: {confidence:.2f}"
|
| 79 |
+
|
| 80 |
+
# ----------------- Gradio UI -----------------
|
| 81 |
+
demo = gr.Interface(
|
| 82 |
+
fn=predict,
|
| 83 |
+
inputs=gr.Image(type="numpy", label="Upload Face Image"),
|
| 84 |
+
outputs=[
|
| 85 |
+
gr.Image(label="Segmentation Overlay"),
|
| 86 |
+
gr.Text(label="Acne Severity Prediction")
|
| 87 |
+
],
|
| 88 |
+
title="Acne Segmentation & Severity Classification",
|
| 89 |
+
description="Upload a facial image to detect acne regions and predict severity level using UNet and a pretrained classifier."
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
demo.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
torch
|
| 3 |
+
torchvision
|
| 4 |
+
segmentation-models-pytorch
|
| 5 |
+
opencv-python-headless
|
| 6 |
+
albumentations
|
| 7 |
+
transformers
|
| 8 |
+
numpy
|
| 9 |
+
Pillow
|