LittleMonkeyLab commited on
Commit
1f80bce
·
verified ·
1 Parent(s): a408f31

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -33
app.py CHANGED
@@ -18,6 +18,69 @@ face_mesh = mp_face_mesh.FaceMesh(
18
  min_detection_confidence=0.5
19
  )
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def analyze_expression(image):
22
  if image is None:
23
  return None, "No image provided"
@@ -38,53 +101,86 @@ def analyze_expression(image):
38
  landmarks = results.multi_face_landmarks[0]
39
  points = np.array([[lm.x, lm.y, lm.z] for lm in landmarks.landmark])
40
 
41
- # Calculate basic measurements
42
- # Smile detection (AU12)
43
- smile_width = abs(points[61][0] - points[291][0])
44
- smile_height = (points[61][1] + points[291][1])/2 - points[0][1]
45
-
46
- # Brow movement (AU1, AU4)
47
- brow_height = abs(points[52][1] - points[65][1])
48
- brow_furrow = abs(points[9][0] - points[336][0])
49
-
50
- # Mouth opening (AU25, AU26)
51
- mouth_open = abs(points[13][1] - points[14][1])
 
 
52
 
53
- # Simple emotion classification
54
  emotions = {
55
- "Happy": (smile_width > 0.3 and smile_height < 0),
56
- "Sad": (smile_height > 0 and brow_height > 0.1),
57
- "Surprised": (mouth_open > 0.1 and brow_height > 0.15),
58
- "Neutral": (smile_width < 0.3 and mouth_open < 0.1 and brow_height < 0.1)
59
  }
60
 
61
  # Create visualization
62
  viz_image = image.copy()
63
  h, w = viz_image.shape[:2]
64
 
65
- # Draw landmarks
66
- for landmark in landmarks.landmark:
67
- pos = (int(landmark.x * w), int(landmark.y * h))
68
- cv2.circle(viz_image, pos, 1, (0, 255, 0), -1)
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  # Add emotion text
71
  detected_emotions = [emotion for emotion, is_present in emotions.items() if is_present]
72
  emotion_text = " + ".join(detected_emotions) if detected_emotions else "Neutral"
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  # Add text with black background
75
- text = f"Expression: {emotion_text}"
76
  font = cv2.FONT_HERSHEY_SIMPLEX
77
- font_scale = 0.8
78
  thickness = 2
79
- (text_w, text_h), _ = cv2.getTextSize(text, font, font_scale, thickness)
80
-
81
- # Draw black background
82
- cv2.rectangle(viz_image, (10, 10), (text_w + 20, text_h + 20), (0, 0, 0), -1)
83
 
84
- # Draw text
85
- cv2.putText(viz_image, text, (15, text_h + 15), font, font_scale, (255, 255, 255), thickness)
 
 
 
86
 
87
- return viz_image, emotion_text
88
 
89
  # Create Gradio interface
90
  with gr.Blocks(css="app.css") as demo:
@@ -102,13 +198,18 @@ with gr.Blocks(css="app.css") as demo:
102
  gr.Markdown("""
103
  ### Instructions:
104
  1. Upload a clear facial image
105
- 2. View the detected expression
106
- 3. Green dots show facial landmarks
 
 
 
 
 
107
  """)
108
 
109
  with gr.Column():
110
  output_image = gr.Image(label="Analysis")
111
- emotion_text = gr.Textbox(label="Detected Expression")
112
 
113
  # Footer
114
  with gr.Row(elem_classes="center-content"):
@@ -120,7 +221,7 @@ with gr.Blocks(css="app.css") as demo:
120
  input_image.change(
121
  fn=analyze_expression,
122
  inputs=input_image,
123
- outputs=[output_image, emotion_text]
124
  )
125
 
126
  if __name__ == "__main__":
 
18
  min_detection_confidence=0.5
19
  )
20
 
21
+ # Define key facial landmarks for expressions
22
+ FACIAL_LANDMARKS = {
23
+ 'left_brow': [52, 65, 46], # inner, middle, outer
24
+ 'right_brow': [285, 295, 276], # inner, middle, outer
25
+ 'left_eye': [159, 145, 133], # top, bottom, outer
26
+ 'right_eye': [386, 374, 362], # top, bottom, outer
27
+ 'nose': [6, 197], # bridge, tip
28
+ 'mouth': [61, 291, 0, 17, 13, 14], # left corner, right corner, top lip, bottom lip, upper inner, lower inner
29
+ 'jaw': [17, 84, 314] # center, left, right
30
+ }
31
+
32
+ def calculate_distances(points, landmarks):
33
+ """Calculate normalized distances between facial landmarks."""
34
+ def distance(p1_idx, p2_idx):
35
+ try:
36
+ p1 = points[p1_idx]
37
+ p2 = points[p2_idx]
38
+ return np.linalg.norm(p1 - p2)
39
+ except:
40
+ return 0.0
41
+
42
+ # Get face height for normalization
43
+ face_height = distance(FACIAL_LANDMARKS['nose'][0], FACIAL_LANDMARKS['jaw'][0])
44
+ if face_height == 0:
45
+ return {}
46
+
47
+ measurements = {
48
+ # Inner brow raising (AU1)
49
+ 'inner_brow_raise': (
50
+ distance(FACIAL_LANDMARKS['left_brow'][0], FACIAL_LANDMARKS['nose'][0]) +
51
+ distance(FACIAL_LANDMARKS['right_brow'][0], FACIAL_LANDMARKS['nose'][0])
52
+ ) / (2 * face_height),
53
+
54
+ # Outer brow raising (AU2)
55
+ 'outer_brow_raise': (
56
+ distance(FACIAL_LANDMARKS['left_brow'][2], FACIAL_LANDMARKS['nose'][0]) +
57
+ distance(FACIAL_LANDMARKS['right_brow'][2], FACIAL_LANDMARKS['nose'][0])
58
+ ) / (2 * face_height),
59
+
60
+ # Brow lowering (AU4)
61
+ 'brow_furrow': distance(FACIAL_LANDMARKS['left_brow'][0], FACIAL_LANDMARKS['right_brow'][0]) / face_height,
62
+
63
+ # Eye opening (AU5)
64
+ 'eye_opening': (
65
+ distance(FACIAL_LANDMARKS['left_eye'][0], FACIAL_LANDMARKS['left_eye'][1]) +
66
+ distance(FACIAL_LANDMARKS['right_eye'][0], FACIAL_LANDMARKS['right_eye'][1])
67
+ ) / (2 * face_height),
68
+
69
+ # Smile width (AU12)
70
+ 'smile_width': distance(FACIAL_LANDMARKS['mouth'][0], FACIAL_LANDMARKS['mouth'][1]) / face_height,
71
+
72
+ # Mouth height (AU25/26)
73
+ 'mouth_opening': distance(FACIAL_LANDMARKS['mouth'][4], FACIAL_LANDMARKS['mouth'][5]) / face_height,
74
+
75
+ # Lip corner height (for smile/frown detection)
76
+ 'lip_corner_height': (
77
+ (points[FACIAL_LANDMARKS['mouth'][0]][1] + points[FACIAL_LANDMARKS['mouth'][1]][1])/2 -
78
+ points[FACIAL_LANDMARKS['mouth'][2]][1]
79
+ ) / face_height
80
+ }
81
+
82
+ return measurements
83
+
84
  def analyze_expression(image):
85
  if image is None:
86
  return None, "No image provided"
 
101
  landmarks = results.multi_face_landmarks[0]
102
  points = np.array([[lm.x, lm.y, lm.z] for lm in landmarks.landmark])
103
 
104
+ # Calculate facial measurements
105
+ measurements = calculate_distances(points, landmarks)
106
+
107
+ # Analyze Action Units
108
+ aus = {
109
+ 'AU01': measurements['inner_brow_raise'] > 0.15, # Inner Brow Raiser
110
+ 'AU02': measurements['outer_brow_raise'] > 0.15, # Outer Brow Raiser
111
+ 'AU04': measurements['brow_furrow'] < 0.2, # Brow Lowerer
112
+ 'AU05': measurements['eye_opening'] > 0.12, # Upper Lid Raiser
113
+ 'AU12': measurements['smile_width'] > 0.45, # Lip Corner Puller
114
+ 'AU25': measurements['mouth_opening'] > 0.1, # Lips Part
115
+ 'AU26': measurements['mouth_opening'] > 0.2 # Jaw Drop
116
+ }
117
 
118
+ # Emotion classification based on AUs
119
  emotions = {
120
+ "Happy": (aus['AU12'] and measurements['lip_corner_height'] < 0),
121
+ "Sad": (not aus['AU12'] and measurements['lip_corner_height'] > 0 and (aus['AU01'] or aus['AU04'])),
122
+ "Surprised": (aus['AU01'] and aus['AU02'] and (aus['AU25'] or aus['AU26'])),
123
+ "Neutral": not any([aus['AU01'], aus['AU02'], aus['AU04'], aus['AU12'], aus['AU26']])
124
  }
125
 
126
  # Create visualization
127
  viz_image = image.copy()
128
  h, w = viz_image.shape[:2]
129
 
130
+ # Draw facial landmarks with different colors for key points
131
+ colors = {
132
+ 'brow': (0, 255, 0), # Green
133
+ 'eye': (255, 255, 0), # Yellow
134
+ 'nose': (0, 255, 255), # Cyan
135
+ 'mouth': (255, 0, 255), # Magenta
136
+ 'jaw': (255, 128, 0) # Orange
137
+ }
138
+
139
+ # Draw landmarks with feature-specific colors
140
+ for feature, points_list in FACIAL_LANDMARKS.items():
141
+ color = colors.get(feature.split('_')[0], (0, 255, 0))
142
+ for point_idx in points_list:
143
+ pos = (int(landmarks.landmark[point_idx].x * w),
144
+ int(landmarks.landmark[point_idx].y * h))
145
+ cv2.circle(viz_image, pos, 2, color, -1)
146
 
147
  # Add emotion text
148
  detected_emotions = [emotion for emotion, is_present in emotions.items() if is_present]
149
  emotion_text = " + ".join(detected_emotions) if detected_emotions else "Neutral"
150
 
151
+ # Create detailed analysis text
152
+ analysis = f"Expression: {emotion_text}\n\nActive Action Units:\n"
153
+ au_descriptions = {
154
+ 'AU01': 'Inner Brow Raiser',
155
+ 'AU02': 'Outer Brow Raiser',
156
+ 'AU04': 'Brow Lowerer',
157
+ 'AU05': 'Upper Lid Raiser',
158
+ 'AU12': 'Lip Corner Puller (Smile)',
159
+ 'AU25': 'Lips Part',
160
+ 'AU26': 'Jaw Drop'
161
+ }
162
+
163
+ active_aus = [f"{au} ({desc})" for au, active in aus.items() if active
164
+ for au_name, desc in au_descriptions.items() if au == au_name]
165
+
166
+ if active_aus:
167
+ analysis += "\n".join(f"• {au}" for au in active_aus)
168
+ else:
169
+ analysis += "No significant Action Units detected"
170
+
171
  # Add text with black background
 
172
  font = cv2.FONT_HERSHEY_SIMPLEX
173
+ font_scale = 0.7
174
  thickness = 2
175
+ y_pos = 30
 
 
 
176
 
177
+ for line in emotion_text.split('\n'):
178
+ (text_w, text_h), _ = cv2.getTextSize(line, font, font_scale, thickness)
179
+ cv2.rectangle(viz_image, (10, y_pos - text_h - 5), (text_w + 20, y_pos + 5), (0, 0, 0), -1)
180
+ cv2.putText(viz_image, line, (15, y_pos), font, font_scale, (255, 255, 255), thickness)
181
+ y_pos += text_h + 20
182
 
183
+ return viz_image, analysis
184
 
185
  # Create Gradio interface
186
  with gr.Blocks(css="app.css") as demo:
 
198
  gr.Markdown("""
199
  ### Instructions:
200
  1. Upload a clear facial image
201
+ 2. View the detected expression and Action Units (AUs)
202
+ 3. Colored dots show key facial features:
203
+ - Green: Eyebrows
204
+ - Yellow: Eyes
205
+ - Cyan: Nose
206
+ - Magenta: Mouth
207
+ - Orange: Jaw
208
  """)
209
 
210
  with gr.Column():
211
  output_image = gr.Image(label="Analysis")
212
+ analysis_text = gr.Textbox(label="Expression Analysis", lines=8)
213
 
214
  # Footer
215
  with gr.Row(elem_classes="center-content"):
 
221
  input_image.change(
222
  fn=analyze_expression,
223
  inputs=input_image,
224
+ outputs=[output_image, analysis_text]
225
  )
226
 
227
  if __name__ == "__main__":