ThatOrJohn commited on
Commit
7fa72fb
·
verified ·
1 Parent(s): 176fb3d

Initial app

Browse files
Files changed (3) hide show
  1. .gitignore +3 -0
  2. app.py +267 -0
  3. requirements.txt +67 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .venv
2
+ .env
3
+ token.txt
app.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ from roboflow import Roboflow
4
+ import numpy as np
5
+ from PIL import Image
6
+ import os
7
+ from dotenv import load_dotenv
8
+ import tempfile
9
+ import time
10
+ import random
11
+
12
+ load_dotenv()
13
+
14
+ # Initialize Roboflow
15
+ api_key = os.getenv("ROBOFLOW_API_KEY")
16
+ if not api_key:
17
+ raise ValueError("ROBOFLOW_API_KEY not found in environment variables.")
18
+ roboflow = Roboflow(api_key=api_key)
19
+
20
+ workspace = roboflow.workspace(
21
+ os.getenv("ROBOFLOW_WORKSPACE", "wayupuk-sommuang"))
22
+ project = workspace.project(
23
+ os.getenv("ROBOFLOW_PROJECT", "face-detection-vswnd"))
24
+ version = project.version(os.getenv("ROBOFLOW_VERSION", 11))
25
+
26
+ model = version.model
27
+
28
+ # Define emoji images with a fallback
29
+ emoji_images = {}
30
+ emoji_paths = {
31
+ "cat": 'images/cat_face.png',
32
+ "censored": 'images/censored.png',
33
+ "expressionless": 'images/expressionless.png',
34
+ "monkey": 'images/monky_see_no_evil.png'
35
+ }
36
+ for key, path in emoji_paths.items():
37
+ if os.path.exists(path):
38
+ emoji = cv2.imread(path, cv2.IMREAD_UNCHANGED)
39
+ if emoji is not None:
40
+ emoji_images[key] = emoji
41
+ else:
42
+ print(f"Failed to load emoji: {path}")
43
+ else:
44
+ print(f"Emoji file not found: {path}")
45
+
46
+ # Fallback emoji (yellow square with transparency)
47
+ if not emoji_images:
48
+ print("No emojis loaded. Using fallback.")
49
+ fallback_emoji = np.zeros((50, 50, 4), dtype=np.uint8)
50
+ fallback_emoji[..., :3] = (255, 255, 0) # Yellow
51
+ fallback_emoji[..., 3] = 255 # Fully opaque
52
+ emoji_images["fallback"] = fallback_emoji
53
+
54
+ # Dictionary to track faces and their assigned emojis
55
+ face_to_emoji = {}
56
+ emoji_keys = list(emoji_images.keys())
57
+
58
+
59
+ def trim_transparency(image):
60
+ if image.shape[2] != 4:
61
+ return image
62
+
63
+ alpha = image[..., 3]
64
+ non_zero = cv2.findNonZero(alpha)
65
+ if non_zero is None:
66
+ return image
67
+
68
+ x, y, w, h = cv2.boundingRect(non_zero)
69
+ cropped = image[y:y+h, x:x+w]
70
+ return cropped
71
+
72
+
73
+ def resize_emoji_to_fit(emoji, target_width, min_height):
74
+ emoji = trim_transparency(emoji)
75
+ height, width = emoji.shape[:2]
76
+ aspect_ratio = width / height
77
+
78
+ new_width = int(target_width)
79
+ new_height = int(new_width / aspect_ratio)
80
+
81
+ if new_height < min_height:
82
+ scale_factor = min_height / new_height
83
+ new_height = int(new_height * scale_factor)
84
+ new_width = int(new_width * scale_factor)
85
+
86
+ return cv2.resize(emoji, (new_width, new_height), interpolation=cv2.INTER_AREA)
87
+
88
+
89
+ def match_faces(current_faces, threshold=50):
90
+ global face_to_emoji
91
+ new_face_to_emoji = {}
92
+ matched_faces = []
93
+
94
+ for face in current_faces:
95
+ center_x, center_y = face['x'], face['y']
96
+ face_id = None
97
+
98
+ for old_id, (old_x, old_y, emoji_key) in face_to_emoji.items():
99
+ distance = np.sqrt((center_x - old_x)**2 + (center_y - old_y)**2)
100
+ if distance < threshold:
101
+ face_id = old_id
102
+ break
103
+
104
+ if face_id is None:
105
+ face_id = f"face_{len(new_face_to_emoji)}"
106
+ emoji_key = random.choice(emoji_keys)
107
+ new_face_to_emoji[face_id] = (center_x, center_y, emoji_key)
108
+ else:
109
+ _, _, emoji_key = face_to_emoji[face_id]
110
+ new_face_to_emoji[face_id] = (center_x, center_y, emoji_key)
111
+
112
+ matched_faces.append((face, face_id, emoji_key))
113
+
114
+ face_to_emoji = new_face_to_emoji
115
+ return matched_faces
116
+
117
+
118
+ def overlay_emoji_on_faces(frame, last_faces=None):
119
+ frame_height, frame_width = frame.shape[:2]
120
+ if last_faces is None:
121
+ return frame
122
+
123
+ if not emoji_keys:
124
+ return frame
125
+
126
+ matched_faces = match_faces(last_faces)
127
+
128
+ for face, face_id, emoji_key in matched_faces:
129
+ center_x, center_y = face['x'], face['y']
130
+ width, height = face['width'], face['height']
131
+
132
+ # Extend the bounding box height to include the mouth and chin
133
+ height_scale = 1.8
134
+ extended_height = height * height_scale
135
+ extended_center_y = center_y + (height * (height_scale - 1) / 2)
136
+
137
+ # Use the assigned emoji
138
+ emoji = emoji_images[emoji_key]
139
+
140
+ # Resize emoji to match the width and ensure minimum height
141
+ emoji_resized = resize_emoji_to_fit(emoji, width, extended_height)
142
+
143
+ # Get emoji dimensions
144
+ emoji_height, emoji_width = emoji_resized.shape[:2]
145
+
146
+ # Calculate top-left corner to center the emoji vertically on the extended bounding box
147
+ face_top = int(extended_center_y - extended_height / 2)
148
+ face_bottom = int(extended_center_y + extended_height / 2)
149
+ emoji_y_center = face_top + (face_bottom - face_top) // 2
150
+
151
+ # Adjust the y position with a reduced dynamic offset
152
+ offset_factor = max(0.1, 0.5 - height / 300) # Reduced offset
153
+ vertical_offset = int(height * offset_factor)
154
+ x = int(center_x - emoji_width / 2)
155
+ y = int(emoji_y_center - emoji_height / 2 + vertical_offset)
156
+
157
+ # Ensure coordinates are within frame bounds
158
+ x = max(0, x)
159
+ y = max(0, y)
160
+ if x + emoji_width > frame_width:
161
+ emoji_width = frame_width - x
162
+ if y + emoji_height > frame_height:
163
+ emoji_height = frame_height - y
164
+
165
+ if emoji_width <= 0 or emoji_height <= 0:
166
+ continue
167
+
168
+ # Resize emoji again to fit the adjusted region
169
+ emoji_resized = cv2.resize(emoji_resized, (int(emoji_width), int(
170
+ emoji_height)), interpolation=cv2.INTER_AREA)
171
+
172
+ # Check if the emoji has an alpha channel
173
+ if emoji_resized.shape[2] == 4:
174
+ emoji_rgb = emoji_resized[..., :3]
175
+ alpha = emoji_resized[..., 3] / 255.0
176
+
177
+ frame_region = frame[y:y+int(emoji_height), x:x+int(emoji_width)]
178
+ if frame_region.shape[2] != 3:
179
+ continue
180
+
181
+ if frame_region.shape[:2] != emoji_rgb.shape[:2]:
182
+ emoji_rgb = cv2.resize(
183
+ emoji_rgb, (frame_region.shape[1], frame_region.shape[0]), interpolation=cv2.INTER_AREA)
184
+ alpha = cv2.resize(
185
+ alpha, (frame_region.shape[1], frame_region.shape[0]), interpolation=cv2.INTER_AREA)
186
+
187
+ alpha_3d = alpha[..., np.newaxis]
188
+ blended = (frame_region * (1 - alpha_3d) +
189
+ emoji_rgb * alpha_3d).astype(np.uint8)
190
+ frame[y:y+int(emoji_height), x:x+int(emoji_width)] = blended
191
+ else:
192
+ frame[y:y+int(emoji_height), x:x+int(emoji_width)
193
+ ] = emoji_resized[..., :3]
194
+
195
+ # Debug output
196
+ print(f"Face ID: {face_id}, Center: ({center_x}, {center_y}), Size: {width}x{height}, Extended height: {extended_height}, Emoji at ({x}, {y}), Size: {emoji_width}x{emoji_height}, Emoji used: {emoji_key}, Vertical offset: {vertical_offset}")
197
+
198
+ return frame
199
+
200
+
201
+ def detect_faces(frame):
202
+ pil_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
203
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
204
+ temp_file_path = temp_file.name
205
+ pil_image.save(temp_file, format='JPEG')
206
+
207
+ try:
208
+ predictions = model.predict(
209
+ temp_file_path, confidence=40, overlap=30).json()
210
+ return predictions['predictions']
211
+ finally:
212
+ if os.path.exists(temp_file_path):
213
+ os.unlink(temp_file_path)
214
+
215
+
216
+ def webcam_interface():
217
+ capture = cv2.VideoCapture(0)
218
+ if not capture.isOpened():
219
+ print("Error: Could not open webcam.")
220
+ return
221
+
222
+ capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
223
+ capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
224
+
225
+ frame_count = 0
226
+ skip_frames = 10
227
+ last_faces = None
228
+ last_detection_time = 0
229
+ min_detection_interval = 0.2
230
+
231
+ try:
232
+ while True:
233
+ ret, frame = capture.read()
234
+ if not ret:
235
+ print("Error: Could not read frame.")
236
+ break
237
+
238
+ frame_count += 1
239
+ current_time = time.time()
240
+
241
+ if (frame_count % skip_frames == 0 or
242
+ (current_time - last_detection_time) >= min_detection_interval):
243
+ last_faces = detect_faces(frame)
244
+ last_detection_time = current_time
245
+
246
+ frame = overlay_emoji_on_faces(frame, last_faces)
247
+
248
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
249
+ pil_img = Image.fromarray(frame_rgb)
250
+ yield pil_img
251
+
252
+ finally:
253
+ capture.release()
254
+
255
+
256
+ # Gradio Interface
257
+ iface = gr.Interface(
258
+ fn=webcam_interface,
259
+ inputs=[],
260
+ outputs=gr.Image(streaming=True),
261
+ live=True,
262
+ title="Face Detection with Emoji Overlay",
263
+ description="This app detects faces in the camera feed and overlays emojis."
264
+ )
265
+
266
+ if __name__ == "__main__":
267
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==24.1.0
2
+ annotated-types==0.7.0
3
+ anyio==4.9.0
4
+ certifi==2025.4.26
5
+ charset-normalizer==3.4.1
6
+ click==8.1.8
7
+ contourpy==1.3.2
8
+ cycler==0.12.1
9
+ exceptiongroup==1.2.2
10
+ fastapi==0.115.12
11
+ ffmpy==0.5.0
12
+ filelock==3.18.0
13
+ filetype==1.2.0
14
+ fonttools==4.57.0
15
+ fsspec==2025.3.2
16
+ gradio==5.27.0
17
+ gradio_client==1.9.0
18
+ groovy==0.1.2
19
+ h11==0.16.0
20
+ httpcore==1.0.9
21
+ httpx==0.28.1
22
+ huggingface-hub==0.30.2
23
+ idna==3.7
24
+ Jinja2==3.1.6
25
+ kiwisolver==1.4.8
26
+ markdown-it-py==3.0.0
27
+ MarkupSafe==3.0.2
28
+ matplotlib==3.10.1
29
+ mdurl==0.1.2
30
+ numpy==2.2.5
31
+ opencv-python==4.11.0.86
32
+ opencv-python-headless==4.10.0.84
33
+ orjson==3.10.16
34
+ packaging==25.0
35
+ pandas==2.2.3
36
+ pillow==11.2.1
37
+ pillow_heif==0.22.0
38
+ pydantic==2.11.3
39
+ pydantic_core==2.33.1
40
+ pydub==0.25.1
41
+ Pygments==2.19.1
42
+ pyparsing==3.2.3
43
+ python-dateutil==2.9.0.post0
44
+ python-dotenv==1.1.0
45
+ python-multipart==0.0.20
46
+ pytz==2025.2
47
+ PyYAML==6.0.2
48
+ requests==2.32.3
49
+ requests-toolbelt==1.0.0
50
+ rich==14.0.0
51
+ roboflow==1.1.61
52
+ ruff==0.11.7
53
+ safehttpx==0.1.6
54
+ semantic-version==2.10.0
55
+ shellingham==1.5.4
56
+ six==1.17.0
57
+ sniffio==1.3.1
58
+ starlette==0.46.2
59
+ tomlkit==0.13.2
60
+ tqdm==4.67.1
61
+ typer==0.15.2
62
+ typing-inspection==0.4.0
63
+ typing_extensions==4.13.2
64
+ tzdata==2025.2
65
+ urllib3==2.4.0
66
+ uvicorn==0.34.2
67
+ websockets==15.0.1