Spaces:
Configuration error
Configuration error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,135 +1,127 @@
|
|
| 1 |
-
import cv2
|
| 2 |
import gradio as gr
|
|
|
|
| 3 |
import numpy as np
|
| 4 |
-
|
| 5 |
from sklearn.cluster import KMeans
|
| 6 |
|
| 7 |
-
#
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
mask_rgb = mask_resized[:, :, :3]
|
| 33 |
-
|
| 34 |
-
# Define the region of interest (ROI) on the face image
|
| 35 |
-
roi = face_image[face_y:face_y + face_h, face_x:face_x + face_w]
|
| 36 |
-
|
| 37 |
-
# Blend the mask onto the face using the alpha mask
|
| 38 |
-
for c in range(0, 3): # For each color channel
|
| 39 |
-
roi[:, :, c] = (1 - mask_alpha) * roi[:, :, c] + mask_alpha * mask_rgb[:, :, c]
|
| 40 |
-
|
| 41 |
-
face_image[face_y:face_y + face_h, face_x:face_x + face_w] = roi
|
| 42 |
-
return face_image
|
| 43 |
-
|
| 44 |
-
# Function to detect face shape using MediaPipe FaceMesh
|
| 45 |
-
def detect_face_shape(landmarks):
|
| 46 |
-
# Extract relevant facial landmarks (e.g., eyes, chin, etc.)
|
| 47 |
-
left_eye = landmarks[33]
|
| 48 |
-
right_eye = landmarks[133]
|
| 49 |
-
chin = landmarks[1]
|
| 50 |
-
jawline = landmarks[7]
|
| 51 |
-
|
| 52 |
-
# Calculate distances and ratios to infer face shape
|
| 53 |
-
eye_distance = np.linalg.norm(np.array(left_eye) - np.array(right_eye))
|
| 54 |
-
chin_distance = np.linalg.norm(np.array(chin) - np.array(jawline))
|
| 55 |
-
|
| 56 |
-
# Classify face shape based on proportions (this can be adjusted based on your observations)
|
| 57 |
-
if eye_distance > chin_distance:
|
| 58 |
-
return "oval"
|
| 59 |
-
else:
|
| 60 |
return "round"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
# Apply KMeans to classify the skin tone based on the dominant color
|
| 74 |
-
kmeans = KMeans(n_clusters=1, random_state=0).fit(face_region_flat)
|
| 75 |
-
|
| 76 |
-
# Get the cluster center (dominant color)
|
| 77 |
-
dominant_color = kmeans.cluster_centers_[0]
|
| 78 |
-
|
| 79 |
-
# Classify skin tone based on dominant color
|
| 80 |
-
if dominant_color[0] < 100 and dominant_color[1] < 120 and dominant_color[2] < 130: # darker shades
|
| 81 |
-
return "dark"
|
| 82 |
-
elif dominant_color[0] > 120 and dominant_color[1] > 130 and dominant_color[2] > 140: # fair shades
|
| 83 |
return "fair"
|
| 84 |
-
|
| 85 |
return "medium"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
-
# Function to process the image: detect face, determine shape and skin tone, and apply mask
|
| 88 |
def process_image(image):
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
|
|
|
| 111 |
skin_tone = detect_skin_tone(image, x, y, w, h)
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
return processed_image
|
| 125 |
-
|
| 126 |
-
# Create Gradio interface
|
| 127 |
-
iface = gr.Interface(
|
| 128 |
-
fn=gradio_interface,
|
| 129 |
-
inputs=gr.inputs.Image(type="numpy", label="Upload Image or Use Webcam"),
|
| 130 |
-
outputs=gr.outputs.Image(type="numpy", label="Image with Mask"),
|
| 131 |
-
live=True
|
| 132 |
)
|
| 133 |
|
| 134 |
-
|
| 135 |
-
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import cv2
|
| 3 |
import numpy as np
|
| 4 |
+
import mediapipe as mp
|
| 5 |
from sklearn.cluster import KMeans
|
| 6 |
|
| 7 |
+
# Initialize MediaPipe modules
|
| 8 |
+
mp_face_detection = mp.solutions.face_detection
|
| 9 |
+
mp_face_mesh = mp.solutions.face_mesh
|
| 10 |
+
|
| 11 |
+
face_detector = mp_face_detection.FaceDetection(model_selection=0, min_detection_confidence=0.6)
|
| 12 |
+
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, refine_landmarks=True)
|
| 13 |
+
|
| 14 |
+
def detect_face_shape(landmarks, image_width, image_height):
|
| 15 |
+
# Extract specific landmarks
|
| 16 |
+
jaw_left = landmarks[234]
|
| 17 |
+
jaw_right = landmarks[454]
|
| 18 |
+
chin = landmarks[152]
|
| 19 |
+
forehead = landmarks[10]
|
| 20 |
+
|
| 21 |
+
x1 = int(jaw_left.x * image_width)
|
| 22 |
+
x2 = int(jaw_right.x * image_width)
|
| 23 |
+
y1 = int(chin.y * image_height)
|
| 24 |
+
y2 = int(forehead.y * image_height)
|
| 25 |
+
|
| 26 |
+
face_width = abs(x2 - x1)
|
| 27 |
+
face_height = abs(y1 - y2)
|
| 28 |
+
|
| 29 |
+
ratio = face_width / face_height if face_height != 0 else 0
|
| 30 |
+
|
| 31 |
+
if ratio > 1.05:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
return "round"
|
| 33 |
+
elif 0.95 < ratio <= 1.05:
|
| 34 |
+
return "square"
|
| 35 |
+
else:
|
| 36 |
+
return "oval"
|
| 37 |
|
| 38 |
+
def detect_skin_tone(image, x, y, w, h):
|
| 39 |
+
roi = image[y:y+h, x:x+w]
|
| 40 |
+
roi_rgb = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
|
| 41 |
+
roi_flat = roi_rgb.reshape((-1, 3))
|
| 42 |
+
|
| 43 |
+
kmeans = KMeans(n_clusters=3, n_init=10)
|
| 44 |
+
kmeans.fit(roi_flat)
|
| 45 |
+
avg_color = kmeans.cluster_centers_[0]
|
| 46 |
+
brightness = np.mean(avg_color)
|
| 47 |
+
|
| 48 |
+
if brightness > 200:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
return "fair"
|
| 50 |
+
elif brightness > 100:
|
| 51 |
return "medium"
|
| 52 |
+
else:
|
| 53 |
+
return "dark"
|
| 54 |
+
|
| 55 |
+
def overlay_mask(image, face_shape, skin_tone, x, y, w, h):
|
| 56 |
+
overlay = image.copy()
|
| 57 |
+
mask = np.zeros_like(image, dtype=np.uint8)
|
| 58 |
+
|
| 59 |
+
color_dict = {
|
| 60 |
+
"fair": (255, 182, 193), # light pink
|
| 61 |
+
"medium": (0, 191, 255), # deep sky blue
|
| 62 |
+
"dark": (138, 43, 226) # blue violet
|
| 63 |
+
}
|
| 64 |
+
color = color_dict.get(skin_tone, (255, 255, 255))
|
| 65 |
+
|
| 66 |
+
if face_shape == "oval":
|
| 67 |
+
center = (x + w // 2, y + h // 2)
|
| 68 |
+
axes = (w // 2, h // 2)
|
| 69 |
+
cv2.ellipse(mask, center, axes, 0, 0, 360, color, -1)
|
| 70 |
+
elif face_shape == "round":
|
| 71 |
+
radius = min(w, h) // 2
|
| 72 |
+
center = (x + w // 2, y + h // 2)
|
| 73 |
+
cv2.circle(mask, center, radius, color, -1)
|
| 74 |
+
elif face_shape == "square":
|
| 75 |
+
cv2.rectangle(mask, (x, y), (x + w, y + h), color, -1)
|
| 76 |
+
else:
|
| 77 |
+
cv2.rectangle(mask, (x, y), (x + w, y + h), color, -1)
|
| 78 |
+
|
| 79 |
+
alpha = 0.4
|
| 80 |
+
blended = cv2.addWeighted(mask, alpha, image, 1 - alpha, 0)
|
| 81 |
+
|
| 82 |
+
cv2.putText(blended, f"{face_shape}, {skin_tone}", (x, y - 10),
|
| 83 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
|
| 84 |
+
|
| 85 |
+
return blended
|
| 86 |
|
|
|
|
| 87 |
def process_image(image):
|
| 88 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 89 |
+
ih, iw, _ = image.shape
|
| 90 |
+
|
| 91 |
+
results = face_detector.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
| 92 |
+
if not results.detections:
|
| 93 |
+
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 94 |
+
|
| 95 |
+
for detection in results.detections:
|
| 96 |
+
bboxC = detection.location_data.relative_bounding_box
|
| 97 |
+
x = int(bboxC.xmin * iw)
|
| 98 |
+
y = int(bboxC.ymin * ih)
|
| 99 |
+
w = int(bboxC.width * iw)
|
| 100 |
+
h = int(bboxC.height * ih)
|
| 101 |
+
x, y = max(x, 0), max(y, 0)
|
| 102 |
+
|
| 103 |
+
# Detect mesh
|
| 104 |
+
results_mesh = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
| 105 |
+
if results_mesh.multi_face_landmarks:
|
| 106 |
+
landmarks = results_mesh.multi_face_landmarks[0].landmark
|
| 107 |
+
face_shape = detect_face_shape(landmarks, iw, ih)
|
| 108 |
+
else:
|
| 109 |
+
face_shape = "oval"
|
| 110 |
+
|
| 111 |
skin_tone = detect_skin_tone(image, x, y, w, h)
|
| 112 |
+
image = overlay_mask(image, face_shape, skin_tone, x, y, w, h)
|
| 113 |
+
|
| 114 |
+
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 115 |
+
|
| 116 |
+
# Gradio UI
|
| 117 |
+
demo = gr.Interface(
|
| 118 |
+
fn=process_image,
|
| 119 |
+
inputs=gr.Image(type="numpy", label="Upload or Snap Image"),
|
| 120 |
+
outputs=gr.Image(label="Face Shape + Skin Tone + Mask Overlay"),
|
| 121 |
+
live=True,
|
| 122 |
+
title="Face Shape & Skin Tone Analyzer",
|
| 123 |
+
description="This app detects face shape & skin tone and overlays a dynamic mask using OpenCV."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
)
|
| 125 |
|
| 126 |
+
if __name__ == "__main__":
|
| 127 |
+
demo.launch()
|