App / app.py
RICHERGIRL's picture
Update app.py
326ae0e verified
raw
history blame
5.99 kB
import gradio as gr
import cv2
import numpy as np
import mediapipe as mp
from sklearn.cluster import KMeans
import os
# Train the model once at startup
if not os.path.exists("mask_model.pkl"):
exec(open("train_model.py").read())
# ADD this function near the top
def recommend_mask_style(face_shape, skin_tone):
rules = {
("oval", "fair"): "Floral Pastel",
("oval", "medium"): "Elegant Pearl",
("oval", "dark"): "Tribal Geometric",
("round", "fair"): "Soft Petals",
("round", "medium"): "Bold Striped",
("round", "dark"): "Neon Carnival",
("square", "fair"): "Royal Blue Lace",
("square", "medium"): "Copper Edge",
("square", "dark"): "Metallic Mask"
}
return rules.get((face_shape, skin_tone), "Mystery Style")
# UPDATE overlay_mask function to add the style
def overlay_mask(image, face_shape, skin_tone, x, y, w, h):
overlay = image.copy()
mask = np.zeros_like(image, dtype=np.uint8)
color_dict = {
"fair": (255, 182, 193),
"medium": (0, 191, 255),
"dark": (138, 43, 226)
}
color = color_dict.get(skin_tone, (255, 255, 255))
if face_shape == "oval":
center = (x + w // 2, y + h // 2)
axes = (w // 2, h // 2)
cv2.ellipse(mask, center, axes, 0, 0, 360, color, -1)
elif face_shape == "round":
radius = min(w, h) // 2
center = (x + w // 2, y + h // 2)
cv2.circle(mask, center, radius, color, -1)
elif face_shape == "square":
cv2.rectangle(mask, (x, y), (x + w, y + h), color, -1)
else:
cv2.rectangle(mask, (x, y), (x + w, y + h), color, -1)
# Get mask style
style = recommend_mask_style(face_shape, skin_tone)
# Blend mask overlay
alpha = 0.4
blended = cv2.addWeighted(mask, alpha, image, 1 - alpha, 0)
# Add style label
label_text = f"{face_shape}, {skin_tone}, {style}"
cv2.putText(blended, label_text, (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
return blended
# Initialize MediaPipe modules
mp_face_detection = mp.solutions.face_detection
mp_face_mesh = mp.solutions.face_mesh
face_detector = mp_face_detection.FaceDetection(model_selection=0, min_detection_confidence=0.6)
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, refine_landmarks=True)
def detect_face_shape(landmarks, image_width, image_height):
# Extract specific landmarks
jaw_left = landmarks[234]
jaw_right = landmarks[454]
chin = landmarks[152]
forehead = landmarks[10]
x1 = int(jaw_left.x * image_width)
x2 = int(jaw_right.x * image_width)
y1 = int(chin.y * image_height)
y2 = int(forehead.y * image_height)
face_width = abs(x2 - x1)
face_height = abs(y1 - y2)
ratio = face_width / face_height if face_height != 0 else 0
if ratio > 1.05:
return "round"
elif 0.95 < ratio <= 1.05:
return "square"
else:
return "oval"
def detect_skin_tone(image, x, y, w, h):
roi = image[y:y+h, x:x+w]
roi_rgb = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
roi_flat = roi_rgb.reshape((-1, 3))
kmeans = KMeans(n_clusters=3, n_init=10)
kmeans.fit(roi_flat)
avg_color = kmeans.cluster_centers_[0]
brightness = np.mean(avg_color)
if brightness > 200:
return "fair"
elif brightness > 100:
return "medium"
else:
return "dark"
def overlay_mask(image, face_shape, skin_tone, x, y, w, h):
overlay = image.copy()
mask = np.zeros_like(image, dtype=np.uint8)
color_dict = {
"fair": (255, 182, 193), # light pink
"medium": (0, 191, 255), # deep sky blue
"dark": (138, 43, 226) # blue violet
}
color = color_dict.get(skin_tone, (255, 255, 255))
if face_shape == "oval":
center = (x + w // 2, y + h // 2)
axes = (w // 2, h // 2)
cv2.ellipse(mask, center, axes, 0, 0, 360, color, -1)
elif face_shape == "round":
radius = min(w, h) // 2
center = (x + w // 2, y + h // 2)
cv2.circle(mask, center, radius, color, -1)
elif face_shape == "square":
cv2.rectangle(mask, (x, y), (x + w, y + h), color, -1)
else:
cv2.rectangle(mask, (x, y), (x + w, y + h), color, -1)
alpha = 0.4
blended = cv2.addWeighted(mask, alpha, image, 1 - alpha, 0)
cv2.putText(blended, f"{face_shape}, {skin_tone}", (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
return blended
def process_image(image):
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
ih, iw, _ = image.shape
results = face_detector.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if not results.detections:
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
for detection in results.detections:
bboxC = detection.location_data.relative_bounding_box
x = int(bboxC.xmin * iw)
y = int(bboxC.ymin * ih)
w = int(bboxC.width * iw)
h = int(bboxC.height * ih)
x, y = max(x, 0), max(y, 0)
# Detect mesh
results_mesh = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if results_mesh.multi_face_landmarks:
landmarks = results_mesh.multi_face_landmarks[0].landmark
face_shape = detect_face_shape(landmarks, iw, ih)
else:
face_shape = "oval"
skin_tone = detect_skin_tone(image, x, y, w, h)
image = overlay_mask(image, face_shape, skin_tone, x, y, w, h)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Gradio UI
demo = gr.Interface(
fn=process_image,
inputs=gr.Image(type="numpy", label="Upload or Snap Image"),
outputs=gr.Image(label="Face Shape + Skin Tone + Mask Overlay"),
live=True,
title="Face Shape & Skin Tone Analyzer",
description="This app detects face shape & skin tone and overlays a dynamic mask using OpenCV."
)
if __name__ == "__main__":
demo.launch()