Spaces:
Configuration error
Configuration error
File size: 5,986 Bytes
c4b1b71 525dc2e 2dc9eef 525dc2e afb77bc 326ae0e 2dc9eef 01605ac 525dc2e c4b1b71 525dc2e afb77bc 525dc2e c4b1b71 525dc2e c4b1b71 525dc2e afb77bc c4b1b71 525dc2e c4b1b71 525dc2e 2dc9eef 525dc2e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 | import gradio as gr
import cv2
import numpy as np
import mediapipe as mp
from sklearn.cluster import KMeans
import os
# Train the model once at startup
if not os.path.exists("mask_model.pkl"):
exec(open("train_model.py").read())
# ADD this function near the top
def recommend_mask_style(face_shape, skin_tone):
rules = {
("oval", "fair"): "Floral Pastel",
("oval", "medium"): "Elegant Pearl",
("oval", "dark"): "Tribal Geometric",
("round", "fair"): "Soft Petals",
("round", "medium"): "Bold Striped",
("round", "dark"): "Neon Carnival",
("square", "fair"): "Royal Blue Lace",
("square", "medium"): "Copper Edge",
("square", "dark"): "Metallic Mask"
}
return rules.get((face_shape, skin_tone), "Mystery Style")
# UPDATE overlay_mask function to add the style
def overlay_mask(image, face_shape, skin_tone, x, y, w, h):
overlay = image.copy()
mask = np.zeros_like(image, dtype=np.uint8)
color_dict = {
"fair": (255, 182, 193),
"medium": (0, 191, 255),
"dark": (138, 43, 226)
}
color = color_dict.get(skin_tone, (255, 255, 255))
if face_shape == "oval":
center = (x + w // 2, y + h // 2)
axes = (w // 2, h // 2)
cv2.ellipse(mask, center, axes, 0, 0, 360, color, -1)
elif face_shape == "round":
radius = min(w, h) // 2
center = (x + w // 2, y + h // 2)
cv2.circle(mask, center, radius, color, -1)
elif face_shape == "square":
cv2.rectangle(mask, (x, y), (x + w, y + h), color, -1)
else:
cv2.rectangle(mask, (x, y), (x + w, y + h), color, -1)
# Get mask style
style = recommend_mask_style(face_shape, skin_tone)
# Blend mask overlay
alpha = 0.4
blended = cv2.addWeighted(mask, alpha, image, 1 - alpha, 0)
# Add style label
label_text = f"{face_shape}, {skin_tone}, {style}"
cv2.putText(blended, label_text, (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
return blended
# Initialize MediaPipe modules
mp_face_detection = mp.solutions.face_detection
mp_face_mesh = mp.solutions.face_mesh
face_detector = mp_face_detection.FaceDetection(model_selection=0, min_detection_confidence=0.6)
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, refine_landmarks=True)
def detect_face_shape(landmarks, image_width, image_height):
# Extract specific landmarks
jaw_left = landmarks[234]
jaw_right = landmarks[454]
chin = landmarks[152]
forehead = landmarks[10]
x1 = int(jaw_left.x * image_width)
x2 = int(jaw_right.x * image_width)
y1 = int(chin.y * image_height)
y2 = int(forehead.y * image_height)
face_width = abs(x2 - x1)
face_height = abs(y1 - y2)
ratio = face_width / face_height if face_height != 0 else 0
if ratio > 1.05:
return "round"
elif 0.95 < ratio <= 1.05:
return "square"
else:
return "oval"
def detect_skin_tone(image, x, y, w, h):
roi = image[y:y+h, x:x+w]
roi_rgb = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
roi_flat = roi_rgb.reshape((-1, 3))
kmeans = KMeans(n_clusters=3, n_init=10)
kmeans.fit(roi_flat)
avg_color = kmeans.cluster_centers_[0]
brightness = np.mean(avg_color)
if brightness > 200:
return "fair"
elif brightness > 100:
return "medium"
else:
return "dark"
def overlay_mask(image, face_shape, skin_tone, x, y, w, h):
overlay = image.copy()
mask = np.zeros_like(image, dtype=np.uint8)
color_dict = {
"fair": (255, 182, 193), # light pink
"medium": (0, 191, 255), # deep sky blue
"dark": (138, 43, 226) # blue violet
}
color = color_dict.get(skin_tone, (255, 255, 255))
if face_shape == "oval":
center = (x + w // 2, y + h // 2)
axes = (w // 2, h // 2)
cv2.ellipse(mask, center, axes, 0, 0, 360, color, -1)
elif face_shape == "round":
radius = min(w, h) // 2
center = (x + w // 2, y + h // 2)
cv2.circle(mask, center, radius, color, -1)
elif face_shape == "square":
cv2.rectangle(mask, (x, y), (x + w, y + h), color, -1)
else:
cv2.rectangle(mask, (x, y), (x + w, y + h), color, -1)
alpha = 0.4
blended = cv2.addWeighted(mask, alpha, image, 1 - alpha, 0)
cv2.putText(blended, f"{face_shape}, {skin_tone}", (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
return blended
def process_image(image):
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
ih, iw, _ = image.shape
results = face_detector.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if not results.detections:
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
for detection in results.detections:
bboxC = detection.location_data.relative_bounding_box
x = int(bboxC.xmin * iw)
y = int(bboxC.ymin * ih)
w = int(bboxC.width * iw)
h = int(bboxC.height * ih)
x, y = max(x, 0), max(y, 0)
# Detect mesh
results_mesh = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if results_mesh.multi_face_landmarks:
landmarks = results_mesh.multi_face_landmarks[0].landmark
face_shape = detect_face_shape(landmarks, iw, ih)
else:
face_shape = "oval"
skin_tone = detect_skin_tone(image, x, y, w, h)
image = overlay_mask(image, face_shape, skin_tone, x, y, w, h)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Gradio UI
demo = gr.Interface(
fn=process_image,
inputs=gr.Image(type="numpy", label="Upload or Snap Image"),
outputs=gr.Image(label="Face Shape + Skin Tone + Mask Overlay"),
live=True,
title="Face Shape & Skin Tone Analyzer",
description="This app detects face shape & skin tone and overlays a dynamic mask using OpenCV."
)
if __name__ == "__main__":
demo.launch()
|