DermaGPT / app.py
ali-kanbar's picture
Upload 3 files
b4696a0 verified
import gradio as gr
import numpy as np
from ultralytics import YOLO
from PIL import Image, ImageDraw
import torch
# Load acne detection models
regions_model = YOLO("9_Regions_Model_Weights.pt")
acne_model = YOLO("Acne_Detection_Model_Weights.pt")
# Define anatomical regions order
anatomical_order = [
"forehead", "left-eye", "right-eye", "left-cheek",
"nose", "right-cheek", "between-eyes", "mouth", "chin"
]
def process_region(region_img):
"""Detect acne in a region and draw bounding boxes"""
# Run acne detection
results = acne_model(region_img)
# Create PIL image
pil_img = Image.fromarray(region_img)
# Only draw bounding boxes if acne is detected
if results[0].boxes is not None:
draw = ImageDraw.Draw(pil_img)
for box in results[0].boxes:
x1, y1, x2, y2 = map(int, box.xyxy[0].cpu().numpy())
draw.rectangle([x1, y1, x2, y2], outline="blue", width=2)
return pil_img, len(results[0].boxes) if results[0].boxes else 0
def analyze_acne(region_counts):
"""Generate acne analysis report based on region counts"""
total_acne = sum(region_counts.values())
# Simple severity assessment
if total_acne == 0:
severity = "Healthy"
elif total_acne < 10:
severity = "Mild"
elif total_acne < 20:
severity = "Moderate"
else:
severity = "Severe"
# Identify most affected area
most_affected = max(region_counts, key=region_counts.get)
# Generate report
report = f"Acne Assessment\n"
report += f"• Total Acne: {total_acne}\n"
report += f"• Severity: {severity}\n"
report += f"• Most affected area: {most_affected.capitalize()} ({region_counts[most_affected]} Acne)\n\n"
report += "Region Breakdown:\n"
for region, count in region_counts.items():
report += f" • {region.capitalize()}: {count} Acne\n"
return report
def perform_prediction(pil_image):
"""Main processing function for Gradio"""
# Convert PIL to numpy array (RGB)
image = np.array(pil_image)
# Detect facial regions
regions = {}
results = regions_model(image)
# Extract first detected instance of each region
for box in results[0].boxes:
class_id = int(box.cls)
class_name = regions_model.names[class_id]
if class_name not in regions:
x1, y1, x2, y2 = map(int, box.xyxy[0].cpu().numpy())
regions[class_name] = image[y1:y2, x1:x2]
# Process each region in anatomical order
output_images = []
region_counts = {}
for region_name in anatomical_order:
region_img = regions.get(region_name, None)
if region_img is None or region_img.size == 0:
# Create placeholder for missing regions
blank = Image.new('RGB', (300, 300), color='white')
draw = ImageDraw.Draw(blank)
draw.text((10, 10), f"Missing: {region_name}", fill="black")
output_images.append(blank)
region_counts[region_name] = 0
else:
pil_img, count = process_region(region_img)
output_images.append(pil_img)
region_counts[region_name] = count
# Generate acne analysis report
analysis_report = analyze_acne(region_counts)
# Return each image individually + the analysis report
return (*output_images, analysis_report)
title = "DermaGPT - Acne Detection & Analysis"
description = """
**Comprehensive Facial Acne Analysis System**
This advanced dermatological tool provides a detailed assessment of acne conditions through a two-stage analysis process:
1. **Precision Acne Detection**
Utilizes state-of-the-art computer vision models to:
- Segment the face into 9 anatomical regions
- Detect and localize acne lesions with bounding boxes
- Provide visual mapping of affected areas
2. **Clinical Assessment Report**
Generates an analysis featuring:
- Quantitative lesion count and distribution mapping
- Severity classification (Healthy/Mild/Moderate/Severe)
- Identification of most affected facial zones
**Medical-Grade Insights**
Designed for both clinical professionals and personal skincare assessment, this tool provides objective, consistent analysis to support treatment planning and skin health monitoring. All outputs adhere to dermatological best practices and classification standards.
*Note: For optimal results, use well-lit front-facing images without obstructions.*
*Note: Due to free resources limitation on Hugging Face, We were unable to use out LLM model to generate the Clinical Assessment Report*
"""
# Create interface with combined outputs
with gr.Blocks() as demo:
gr.Markdown(f"## {title}")
gr.Markdown(description)
with gr.Row():
image_input = gr.Image(type="pil", label="Upload Face Image")
submit_btn = gr.Button("Analyze Acne", variant="primary")
with gr.Row():
gr.Markdown("### Facial Region Analysis")
# Create 3x3 grid for region images
region_outputs = []
with gr.Row():
for i in range(3):
with gr.Column():
for j in range(3):
idx = i*3 + j
if idx < len(anatomical_order):
region_output = gr.Image(
type="pil",
label=anatomical_order[idx].capitalize(),
elem_id=f"region-{idx}"
)
region_outputs.append(region_output)
with gr.Row():
gr.Markdown("### Acne Analysis Report")
with gr.Row():
analysis_output = gr.Textbox(
label="Detailed Acne Assessment",
interactive=False,
lines=10,
max_lines=20
)
submit_btn.click(
fn=perform_prediction,
inputs=image_input,
outputs=region_outputs + [analysis_output]
)
if __name__ == "__main__":
demo.launch()