RICHERGIRL commited on
Commit
525dc2e
·
verified ·
1 Parent(s): c4b1b71

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -122
app.py CHANGED
@@ -1,135 +1,127 @@
1
- import cv2
2
  import gradio as gr
 
3
  import numpy as np
4
- from mediapipe import solutions
5
  from sklearn.cluster import KMeans
6
 
7
- # Load the mask images based on face shape and skin tone
8
- mask_dict = {
9
- "oval": {
10
- "fair": "masks/oval_fair_mask.png",
11
- "medium": "masks/oval_medium_mask.png",
12
- "dark": "masks/oval_dark_mask.png"
13
- },
14
- "round": {
15
- "fair": "masks/round_fair_mask.png",
16
- "medium": "masks/round_medium_mask.png",
17
- "dark": "masks/round_dark_mask.png"
18
- }
19
- # Add more shapes and tones as necessary
20
- }
21
-
22
- # Function to overlay the mask on the face using OpenCV
23
- def overlay_mask(face_image, mask_path, face_x, face_y, face_w, face_h):
24
- # Load the mask
25
- mask = cv2.imread(mask_path, cv2.IMREAD_UNCHANGED)
26
-
27
- # Resize the mask to fit the face dimensions
28
- mask_resized = cv2.resize(mask, (face_w, face_h))
29
-
30
- # Create a mask and inverse mask (for transparency handling)
31
- mask_alpha = mask_resized[:, :, 3] / 255.0 # Alpha channel
32
- mask_rgb = mask_resized[:, :, :3]
33
-
34
- # Define the region of interest (ROI) on the face image
35
- roi = face_image[face_y:face_y + face_h, face_x:face_x + face_w]
36
-
37
- # Blend the mask onto the face using the alpha mask
38
- for c in range(0, 3): # For each color channel
39
- roi[:, :, c] = (1 - mask_alpha) * roi[:, :, c] + mask_alpha * mask_rgb[:, :, c]
40
-
41
- face_image[face_y:face_y + face_h, face_x:face_x + face_w] = roi
42
- return face_image
43
-
44
- # Function to detect face shape using MediaPipe FaceMesh
45
- def detect_face_shape(landmarks):
46
- # Extract relevant facial landmarks (e.g., eyes, chin, etc.)
47
- left_eye = landmarks[33]
48
- right_eye = landmarks[133]
49
- chin = landmarks[1]
50
- jawline = landmarks[7]
51
-
52
- # Calculate distances and ratios to infer face shape
53
- eye_distance = np.linalg.norm(np.array(left_eye) - np.array(right_eye))
54
- chin_distance = np.linalg.norm(np.array(chin) - np.array(jawline))
55
-
56
- # Classify face shape based on proportions (this can be adjusted based on your observations)
57
- if eye_distance > chin_distance:
58
- return "oval"
59
- else:
60
  return "round"
 
 
 
 
61
 
62
- # Function to detect skin tone based on KMeans clustering
63
- def detect_skin_tone(face_image, face_x, face_y, face_w, face_h):
64
- # Extract the face region
65
- face_region = face_image[face_y:face_y + face_h, face_x:face_x + face_w]
66
-
67
- # Convert the face region to RGB (if it's BGR)
68
- face_region_rgb = cv2.cvtColor(face_region, cv2.COLOR_BGR2RGB)
69
-
70
- # Reshape for clustering
71
- face_region_flat = face_region_rgb.reshape((-1, 3))
72
-
73
- # Apply KMeans to classify the skin tone based on the dominant color
74
- kmeans = KMeans(n_clusters=1, random_state=0).fit(face_region_flat)
75
-
76
- # Get the cluster center (dominant color)
77
- dominant_color = kmeans.cluster_centers_[0]
78
-
79
- # Classify skin tone based on dominant color
80
- if dominant_color[0] < 100 and dominant_color[1] < 120 and dominant_color[2] < 130: # darker shades
81
- return "dark"
82
- elif dominant_color[0] > 120 and dominant_color[1] > 130 and dominant_color[2] > 140: # fair shades
83
  return "fair"
84
- else:
85
  return "medium"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
- # Function to process the image: detect face, determine shape and skin tone, and apply mask
88
  def process_image(image):
89
- # Convert to grayscale for face detection
90
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
91
-
92
- # Initialize face detection
93
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
94
- faces = face_cascade.detectMultiScale(gray, 1.1, 4)
95
-
96
- # Initialize MediaPipe FaceMesh for face shape detection
97
- mp_face_mesh = solutions.face_mesh.FaceMesh(static_image_mode=True)
98
- mp_results = mp_face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
99
-
100
- # Get face shape (assuming landmarks are available from MediaPipe FaceMesh)
101
- face_shape = "oval" # Default value; it should be determined by landmark analysis
102
- skin_tone = "fair" # Default value; it should be determined by skin tone detection
103
-
104
- if mp_results.multi_face_landmarks:
105
- for landmarks in mp_results.multi_face_landmarks:
106
- face_shape = detect_face_shape(landmarks)
107
-
108
- # Assume we are getting the first face (you can extend this if there are multiple faces)
109
- for (x, y, w, h) in faces:
110
- # Detect skin tone for the face region
 
111
  skin_tone = detect_skin_tone(image, x, y, w, h)
112
-
113
- # Select mask based on face shape and skin tone
114
- mask_path = mask_dict[face_shape][skin_tone]
115
-
116
- # Apply the mask
117
- image = overlay_mask(image, mask_path, x, y, w, h)
118
-
119
- return image
120
-
121
- # Gradio interface
122
- def gradio_interface(image):
123
- processed_image = process_image(image)
124
- return processed_image
125
-
126
- # Create Gradio interface
127
- iface = gr.Interface(
128
- fn=gradio_interface,
129
- inputs=gr.inputs.Image(type="numpy", label="Upload Image or Use Webcam"),
130
- outputs=gr.outputs.Image(type="numpy", label="Image with Mask"),
131
- live=True
132
  )
133
 
134
- # Launch the Gradio interface
135
- iface.launch()
 
 
1
  import gradio as gr
2
+ import cv2
3
  import numpy as np
4
+ import mediapipe as mp
5
  from sklearn.cluster import KMeans
6
 
7
+ # Initialize MediaPipe modules
8
+ mp_face_detection = mp.solutions.face_detection
9
+ mp_face_mesh = mp.solutions.face_mesh
10
+
11
+ face_detector = mp_face_detection.FaceDetection(model_selection=0, min_detection_confidence=0.6)
12
+ face_mesh = mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, refine_landmarks=True)
13
+
14
+ def detect_face_shape(landmarks, image_width, image_height):
15
+ # Extract specific landmarks
16
+ jaw_left = landmarks[234]
17
+ jaw_right = landmarks[454]
18
+ chin = landmarks[152]
19
+ forehead = landmarks[10]
20
+
21
+ x1 = int(jaw_left.x * image_width)
22
+ x2 = int(jaw_right.x * image_width)
23
+ y1 = int(chin.y * image_height)
24
+ y2 = int(forehead.y * image_height)
25
+
26
+ face_width = abs(x2 - x1)
27
+ face_height = abs(y1 - y2)
28
+
29
+ ratio = face_width / face_height if face_height != 0 else 0
30
+
31
+ if ratio > 1.05:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  return "round"
33
+ elif 0.95 < ratio <= 1.05:
34
+ return "square"
35
+ else:
36
+ return "oval"
37
 
38
+ def detect_skin_tone(image, x, y, w, h):
39
+ roi = image[y:y+h, x:x+w]
40
+ roi_rgb = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
41
+ roi_flat = roi_rgb.reshape((-1, 3))
42
+
43
+ kmeans = KMeans(n_clusters=3, n_init=10)
44
+ kmeans.fit(roi_flat)
45
+ avg_color = kmeans.cluster_centers_[0]
46
+ brightness = np.mean(avg_color)
47
+
48
+ if brightness > 200:
 
 
 
 
 
 
 
 
 
 
49
  return "fair"
50
+ elif brightness > 100:
51
  return "medium"
52
+ else:
53
+ return "dark"
54
+
55
+ def overlay_mask(image, face_shape, skin_tone, x, y, w, h):
56
+ overlay = image.copy()
57
+ mask = np.zeros_like(image, dtype=np.uint8)
58
+
59
+ color_dict = {
60
+ "fair": (255, 182, 193), # light pink
61
+ "medium": (0, 191, 255), # deep sky blue
62
+ "dark": (138, 43, 226) # blue violet
63
+ }
64
+ color = color_dict.get(skin_tone, (255, 255, 255))
65
+
66
+ if face_shape == "oval":
67
+ center = (x + w // 2, y + h // 2)
68
+ axes = (w // 2, h // 2)
69
+ cv2.ellipse(mask, center, axes, 0, 0, 360, color, -1)
70
+ elif face_shape == "round":
71
+ radius = min(w, h) // 2
72
+ center = (x + w // 2, y + h // 2)
73
+ cv2.circle(mask, center, radius, color, -1)
74
+ elif face_shape == "square":
75
+ cv2.rectangle(mask, (x, y), (x + w, y + h), color, -1)
76
+ else:
77
+ cv2.rectangle(mask, (x, y), (x + w, y + h), color, -1)
78
+
79
+ alpha = 0.4
80
+ blended = cv2.addWeighted(mask, alpha, image, 1 - alpha, 0)
81
+
82
+ cv2.putText(blended, f"{face_shape}, {skin_tone}", (x, y - 10),
83
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
84
+
85
+ return blended
86
 
 
87
  def process_image(image):
88
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
89
+ ih, iw, _ = image.shape
90
+
91
+ results = face_detector.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
92
+ if not results.detections:
93
+ return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
94
+
95
+ for detection in results.detections:
96
+ bboxC = detection.location_data.relative_bounding_box
97
+ x = int(bboxC.xmin * iw)
98
+ y = int(bboxC.ymin * ih)
99
+ w = int(bboxC.width * iw)
100
+ h = int(bboxC.height * ih)
101
+ x, y = max(x, 0), max(y, 0)
102
+
103
+ # Detect mesh
104
+ results_mesh = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
105
+ if results_mesh.multi_face_landmarks:
106
+ landmarks = results_mesh.multi_face_landmarks[0].landmark
107
+ face_shape = detect_face_shape(landmarks, iw, ih)
108
+ else:
109
+ face_shape = "oval"
110
+
111
  skin_tone = detect_skin_tone(image, x, y, w, h)
112
+ image = overlay_mask(image, face_shape, skin_tone, x, y, w, h)
113
+
114
+ return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
115
+
116
+ # Gradio UI
117
+ demo = gr.Interface(
118
+ fn=process_image,
119
+ inputs=gr.Image(type="numpy", label="Upload or Snap Image"),
120
+ outputs=gr.Image(label="Face Shape + Skin Tone + Mask Overlay"),
121
+ live=True,
122
+ title="Face Shape & Skin Tone Analyzer",
123
+ description="This app detects face shape & skin tone and overlays a dynamic mask using OpenCV."
 
 
 
 
 
 
 
 
124
  )
125
 
126
+ if __name__ == "__main__":
127
+ demo.launch()