Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -44,6 +44,19 @@ def create_temp_file(suffix=".mp4") -> str:
|
|
| 44 |
TEMP_FILES.append(path)
|
| 45 |
return path
|
| 46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
# ====================================================
|
| 48 |
# CONFIGURATION DATA CLASSES
|
| 49 |
# ====================================================
|
|
@@ -91,11 +104,11 @@ class GaussianBlur(BlurEffect):
|
|
| 91 |
def apply(self, image: np.ndarray, roi: Tuple[int, int, int, int]) -> np.ndarray:
|
| 92 |
x, y, w, h = roi
|
| 93 |
face_roi = image[y:y+h, x:x+w]
|
| 94 |
-
if face_roi.size == 0:
|
|
|
|
| 95 |
|
| 96 |
if self.config.adaptive_blur:
|
| 97 |
min_dim = min(w, h)
|
| 98 |
-
# Intensity now directly maps to kernel size percentage
|
| 99 |
kernel_val = int(min_dim * (self.config.intensity / 100.0))
|
| 100 |
kernel_val = max(self.config.min_kernel, min(kernel_val, self.config.max_kernel))
|
| 101 |
else:
|
|
@@ -111,11 +124,13 @@ class PixelateBlur(BlurEffect):
|
|
| 111 |
def apply(self, image: np.ndarray, roi: Tuple[int, int, int, int]) -> np.ndarray:
|
| 112 |
x, y, w, h = roi
|
| 113 |
face_roi = image[y:y+h, x:x+w]
|
| 114 |
-
if face_roi.size == 0:
|
|
|
|
| 115 |
|
| 116 |
h_roi, w_roi = face_roi.shape[:2]
|
| 117 |
pixel_size = self.config.pixel_size
|
| 118 |
-
if pixel_size <= 0:
|
|
|
|
| 119 |
|
| 120 |
small = cv2.resize(face_roi, (max(1, w_roi // pixel_size), max(1, h_roi // pixel_size)), interpolation=cv2.INTER_LINEAR)
|
| 121 |
pixelated = cv2.resize(small, (w_roi, h_roi), interpolation=cv2.INTER_NEAREST)
|
|
@@ -133,7 +148,8 @@ def get_blur_effect(config: BlurConfig) -> BlurEffect:
|
|
| 133 |
"""Factory function to create a blur effect instance."""
|
| 134 |
blur_effects = {"gaussian": GaussianBlur, "pixelate": PixelateBlur, "solid": SolidColorBlur}
|
| 135 |
blur_class = blur_effects.get(config.type)
|
| 136 |
-
if not blur_class:
|
|
|
|
| 137 |
return blur_class(config)
|
| 138 |
|
| 139 |
# ====================================================
|
|
@@ -158,20 +174,27 @@ class YOLOv8FaceDetector:
|
|
| 158 |
annotated_image = image.copy() if return_annotated else None
|
| 159 |
|
| 160 |
for r in results:
|
| 161 |
-
if r.boxes is None:
|
|
|
|
| 162 |
for box in r.boxes:
|
| 163 |
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
| 164 |
confidence = float(box.conf[0])
|
| 165 |
faces.append({"x": x1, "y": y1, "width": x2 - x1, "height": y2 - y1, "confidence": confidence})
|
|
|
|
| 166 |
if return_annotated:
|
|
|
|
| 167 |
cv2.rectangle(annotated_image, (x1, y1), (x2, y2), (0, 255, 0), 3)
|
| 168 |
-
|
|
|
|
|
|
|
| 169 |
(w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
|
| 170 |
cv2.rectangle(annotated_image, (x1, y1 - h - 10), (x1 + w, y1), (0, 255, 0), -1)
|
| 171 |
cv2.putText(annotated_image, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)
|
|
|
|
| 172 |
return faces, annotated_image
|
| 173 |
|
| 174 |
GLOBAL_DETECTOR: Optional[YOLOv8FaceDetector] = None
|
|
|
|
| 175 |
def get_global_detector() -> YOLOv8FaceDetector:
|
| 176 |
"""Initializes and returns the global singleton detector instance."""
|
| 177 |
global GLOBAL_DETECTOR
|
|
@@ -222,9 +245,13 @@ def get_app_instance(blur_type: str, blur_amount: float, blur_size: float) -> Fa
|
|
| 222 |
)
|
| 223 |
return FacePrivacyApp(app_config, detector)
|
| 224 |
|
| 225 |
-
def process_media(media, blur_type, blur_amount, blur_size,
|
| 226 |
-
|
|
|
|
|
|
|
| 227 |
try:
|
|
|
|
|
|
|
| 228 |
app = get_app_instance(blur_type, blur_amount, blur_size)
|
| 229 |
return app.process_image(media, confidence)
|
| 230 |
except Exception as e:
|
|
@@ -232,18 +259,24 @@ def process_media(media, blur_type, blur_amount, blur_size, confidence):
|
|
| 232 |
gr.Warning(f"An error occurred: {e}")
|
| 233 |
return media
|
| 234 |
|
| 235 |
-
def process_video(video_file, blur_type, blur_amount, blur_size,
|
| 236 |
-
|
|
|
|
|
|
|
| 237 |
try:
|
|
|
|
|
|
|
| 238 |
app = get_app_instance(blur_type, blur_amount, blur_size)
|
| 239 |
cap = cv2.VideoCapture(video_file.name)
|
| 240 |
-
if not cap.isOpened():
|
|
|
|
| 241 |
|
| 242 |
out_path = create_temp_file()
|
| 243 |
-
fourcc = cv2.VideoWriter_fourcc(*'avc1')
|
| 244 |
fps, w, h = cap.get(cv2.CAP_PROP_FPS), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 245 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 246 |
out_vid = cv2.VideoWriter(out_path, fourcc, fps, (w, h))
|
|
|
|
| 247 |
if not out_vid.isOpened():
|
| 248 |
logger.warning("H.264 codec failed, falling back to mp4v.")
|
| 249 |
out_vid = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
|
@@ -251,11 +284,13 @@ def process_video(video_file, blur_type, blur_amount, blur_size, confidence, pro
|
|
| 251 |
frame_num = 0
|
| 252 |
while cap.isOpened():
|
| 253 |
ret, frame = cap.read()
|
| 254 |
-
if not ret:
|
|
|
|
| 255 |
frame_num += 1
|
| 256 |
progress(frame_num / max(total_frames, 1), desc=f"Processing frame {frame_num}/{total_frames}")
|
| 257 |
processed_frame = app.process_image(frame, confidence)
|
| 258 |
out_vid.write(processed_frame)
|
|
|
|
| 259 |
cap.release()
|
| 260 |
out_vid.release()
|
| 261 |
return out_path, f"β
Processed {frame_num} frames."
|
|
@@ -264,65 +299,83 @@ def process_video(video_file, blur_type, blur_amount, blur_size, confidence, pro
|
|
| 264 |
gr.Error(f"Video processing failed: {e}")
|
| 265 |
return None, f"β Error: {e}"
|
| 266 |
|
| 267 |
-
def detect_faces_image(image,
|
| 268 |
-
|
|
|
|
|
|
|
| 269 |
try:
|
|
|
|
|
|
|
| 270 |
detector = get_global_detector()
|
| 271 |
faces, annotated_image = detector.detect_faces(image, confidence, return_annotated=True)
|
|
|
|
|
|
|
| 272 |
if faces:
|
| 273 |
-
result = f"β
**{len(faces)} face(s) detected!**
|
| 274 |
else:
|
| 275 |
result = "β **No faces detected.**"
|
|
|
|
| 276 |
return annotated_image, result
|
| 277 |
except Exception as e:
|
| 278 |
logger.error(f"Image detection error: {e}")
|
| 279 |
gr.Warning(f"An error occurred: {e}")
|
| 280 |
return image, f"β Error: {e}"
|
| 281 |
|
| 282 |
-
def detect_faces_video(video_file,
|
| 283 |
-
|
|
|
|
|
|
|
| 284 |
try:
|
|
|
|
|
|
|
| 285 |
detector = get_global_detector()
|
| 286 |
cap = cv2.VideoCapture(video_file.name)
|
| 287 |
-
if not cap.isOpened():
|
|
|
|
| 288 |
|
| 289 |
out_path = create_temp_file()
|
| 290 |
fourcc = cv2.VideoWriter_fourcc(*'avc1')
|
| 291 |
fps, w, h = cap.get(cv2.CAP_PROP_FPS), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 292 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 293 |
-
out_vid = cv2.VideoWriter(out_path, fourcc, fps, (w,h))
|
| 294 |
-
|
|
|
|
|
|
|
| 295 |
|
| 296 |
-
frame_num, frames_with_faces
|
| 297 |
while cap.isOpened():
|
| 298 |
ret, frame = cap.read()
|
| 299 |
-
if not ret:
|
|
|
|
| 300 |
frame_num += 1
|
| 301 |
progress(frame_num / max(total_frames, 1), desc=f"Analyzing frame {frame_num}/{total_frames}")
|
| 302 |
faces, annotated_frame = detector.detect_faces(frame, confidence, return_annotated=True)
|
| 303 |
if faces:
|
| 304 |
frames_with_faces += 1
|
| 305 |
-
all_confidences.extend([f["confidence"] for f in faces])
|
| 306 |
out_vid.write(annotated_frame)
|
|
|
|
| 307 |
cap.release()
|
| 308 |
out_vid.release()
|
| 309 |
|
|
|
|
| 310 |
if frames_with_faces > 0:
|
| 311 |
-
|
| 312 |
-
result = (f"β
**Faces detected in {frames_with_faces}/{frame_num} frames!**\n"
|
| 313 |
-
f"π Average Confidence: **{avg_conf:.2%}**\n"
|
| 314 |
-
f"π― Max Confidence: **{max(all_confidences):.2%}**")
|
| 315 |
else:
|
| 316 |
result = f"β **No faces detected in {frame_num} frames.**"
|
|
|
|
| 317 |
return out_path, result
|
| 318 |
except Exception as e:
|
| 319 |
logger.error(f"Video detection error: {e}")
|
| 320 |
gr.Error(f"Video detection failed: {e}")
|
| 321 |
return None, f"β Error: {e}"
|
| 322 |
|
| 323 |
-
def detect_faces_webcam(image,
|
| 324 |
-
|
|
|
|
|
|
|
| 325 |
try:
|
|
|
|
|
|
|
| 326 |
detector = get_global_detector()
|
| 327 |
_, annotated_image = detector.detect_faces(image, confidence, return_annotated=True)
|
| 328 |
return annotated_image
|
|
@@ -335,105 +388,227 @@ def detect_faces_webcam(image, confidence):
|
|
| 335 |
# ====================================================
|
| 336 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue"), title="Face Privacy Tool") as demo:
|
| 337 |
gr.Markdown("# π Face Privacy Tool")
|
| 338 |
-
gr.Markdown("AI-powered face detection and privacy protection using
|
| 339 |
|
| 340 |
with gr.Row():
|
|
|
|
| 341 |
with gr.Column(scale=1):
|
| 342 |
-
gr.
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 350 |
with gr.Column(scale=2):
|
| 351 |
with gr.Tabs():
|
|
|
|
| 352 |
with gr.TabItem("π Privacy Mode (Blur Faces)"):
|
| 353 |
gr.Markdown("### Apply privacy protection to your media.")
|
|
|
|
| 354 |
with gr.Tabs():
|
|
|
|
| 355 |
with gr.TabItem("π· Image"):
|
| 356 |
with gr.Row():
|
| 357 |
-
img_in_blur = gr.Image(
|
| 358 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 359 |
with gr.Row():
|
| 360 |
blur_img_btn = gr.Button("Apply Privacy Blur", variant="primary", scale=3)
|
| 361 |
gr.ClearButton([img_in_blur, img_out_blur], scale=1)
|
| 362 |
gr.Examples(
|
| 363 |
examples=[
|
| 364 |
["./examples/single_face.jpg"],
|
| 365 |
-
["./examples/two_faces.
|
| 366 |
-
["./examples/group_photo.
|
|
|
|
| 367 |
],
|
| 368 |
inputs=img_in_blur,
|
| 369 |
label="Click an example to try"
|
| 370 |
-
)
|
|
|
|
|
|
|
| 371 |
with gr.TabItem("π₯ Video"):
|
| 372 |
with gr.Row():
|
| 373 |
-
vid_in_blur = gr.File(
|
|
|
|
|
|
|
|
|
|
| 374 |
with gr.Column():
|
| 375 |
-
vid_out_blur = gr.Video(
|
|
|
|
|
|
|
|
|
|
| 376 |
vid_status_blur = gr.Markdown("")
|
| 377 |
with gr.Row():
|
| 378 |
blur_vid_btn = gr.Button("Process Video", variant="primary", scale=3)
|
| 379 |
gr.ClearButton([vid_in_blur, vid_out_blur, vid_status_blur], scale=1)
|
| 380 |
|
|
|
|
| 381 |
with gr.TabItem("πΉ Webcam"):
|
| 382 |
gr.Markdown("**Real-time privacy protection from your webcam feed.**")
|
| 383 |
with gr.Row():
|
| 384 |
-
web_in_blur = gr.Image(
|
| 385 |
-
|
| 386 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 387 |
with gr.TabItem("π Detection Mode (Check for Faces)"):
|
| 388 |
gr.Markdown("### Verify if your media contains human faces.")
|
|
|
|
| 389 |
with gr.Tabs():
|
|
|
|
| 390 |
with gr.TabItem("π· Image"):
|
| 391 |
with gr.Row():
|
| 392 |
-
img_in_detect = gr.Image(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 393 |
with gr.Column():
|
| 394 |
-
img_out_detect = gr.Image(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 395 |
img_status_detect = gr.Markdown("_Upload an image to start._")
|
| 396 |
-
|
| 397 |
with gr.Row():
|
| 398 |
detect_img_btn = gr.Button("Detect Faces", variant="primary", scale=3)
|
| 399 |
gr.ClearButton([img_in_detect, img_out_detect, img_status_detect], scale=1)
|
| 400 |
gr.Examples(
|
| 401 |
examples=[
|
| 402 |
["./examples/single_face.jpg"],
|
| 403 |
-
["./examples/two_faces.
|
| 404 |
-
["./examples/group_photo.
|
|
|
|
| 405 |
],
|
| 406 |
inputs=img_in_detect,
|
| 407 |
label="Click an example to try"
|
| 408 |
)
|
| 409 |
|
|
|
|
| 410 |
with gr.TabItem("π₯ Video"):
|
| 411 |
with gr.Row():
|
| 412 |
-
vid_in_detect = gr.File(
|
|
|
|
|
|
|
|
|
|
| 413 |
with gr.Column():
|
| 414 |
-
vid_out_detect = gr.Video(
|
|
|
|
|
|
|
|
|
|
| 415 |
vid_status_detect = gr.Markdown("_Upload a video to start._")
|
| 416 |
with gr.Row():
|
| 417 |
detect_vid_btn = gr.Button("Analyze Video for Faces", variant="primary", scale=3)
|
| 418 |
gr.ClearButton([vid_in_detect, vid_out_detect, vid_status_detect], scale=1)
|
| 419 |
|
|
|
|
| 420 |
with gr.TabItem("πΉ Webcam"):
|
| 421 |
gr.Markdown("**Live face detection from your webcam feed.**")
|
| 422 |
with gr.Row():
|
| 423 |
-
web_in_detect = gr.Image(
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 436 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 437 |
|
| 438 |
# ====================================================
|
| 439 |
# MAIN ENTRY POINT
|
|
@@ -446,4 +621,5 @@ if __name__ == "__main__":
|
|
| 446 |
demo.launch()
|
| 447 |
except Exception as e:
|
| 448 |
logger.error(f"β Startup failed: {e}")
|
| 449 |
-
logger.info("π‘ Make sure 'yolov8n-face.pt' is available in the current directory or will be downloaded automatically by ultralytics.")
|
|
|
|
|
|
| 44 |
TEMP_FILES.append(path)
|
| 45 |
return path
|
| 46 |
|
| 47 |
+
# ====================================================
|
| 48 |
+
# SENSITIVITY MAPPING
|
| 49 |
+
# ====================================================
|
| 50 |
+
SENSITIVITY_MAP = {
|
| 51 |
+
"Low (Catch More)": 0.3,
|
| 52 |
+
"Balanced (Default)": 0.5,
|
| 53 |
+
"High (Very Strict)": 0.7
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
def get_confidence_from_sensitivity(sensitivity: str) -> float:
|
| 57 |
+
"""Converts user-friendly sensitivity text to numerical confidence threshold."""
|
| 58 |
+
return SENSITIVITY_MAP.get(sensitivity, 0.5)
|
| 59 |
+
|
| 60 |
# ====================================================
|
| 61 |
# CONFIGURATION DATA CLASSES
|
| 62 |
# ====================================================
|
|
|
|
| 104 |
def apply(self, image: np.ndarray, roi: Tuple[int, int, int, int]) -> np.ndarray:
|
| 105 |
x, y, w, h = roi
|
| 106 |
face_roi = image[y:y+h, x:x+w]
|
| 107 |
+
if face_roi.size == 0:
|
| 108 |
+
return image
|
| 109 |
|
| 110 |
if self.config.adaptive_blur:
|
| 111 |
min_dim = min(w, h)
|
|
|
|
| 112 |
kernel_val = int(min_dim * (self.config.intensity / 100.0))
|
| 113 |
kernel_val = max(self.config.min_kernel, min(kernel_val, self.config.max_kernel))
|
| 114 |
else:
|
|
|
|
| 124 |
def apply(self, image: np.ndarray, roi: Tuple[int, int, int, int]) -> np.ndarray:
|
| 125 |
x, y, w, h = roi
|
| 126 |
face_roi = image[y:y+h, x:x+w]
|
| 127 |
+
if face_roi.size == 0:
|
| 128 |
+
return image
|
| 129 |
|
| 130 |
h_roi, w_roi = face_roi.shape[:2]
|
| 131 |
pixel_size = self.config.pixel_size
|
| 132 |
+
if pixel_size <= 0:
|
| 133 |
+
return image
|
| 134 |
|
| 135 |
small = cv2.resize(face_roi, (max(1, w_roi // pixel_size), max(1, h_roi // pixel_size)), interpolation=cv2.INTER_LINEAR)
|
| 136 |
pixelated = cv2.resize(small, (w_roi, h_roi), interpolation=cv2.INTER_NEAREST)
|
|
|
|
| 148 |
"""Factory function to create a blur effect instance."""
|
| 149 |
blur_effects = {"gaussian": GaussianBlur, "pixelate": PixelateBlur, "solid": SolidColorBlur}
|
| 150 |
blur_class = blur_effects.get(config.type)
|
| 151 |
+
if not blur_class:
|
| 152 |
+
raise ValueError(f"Unknown blur type: {config.type}")
|
| 153 |
return blur_class(config)
|
| 154 |
|
| 155 |
# ====================================================
|
|
|
|
| 174 |
annotated_image = image.copy() if return_annotated else None
|
| 175 |
|
| 176 |
for r in results:
|
| 177 |
+
if r.boxes is None:
|
| 178 |
+
continue
|
| 179 |
for box in r.boxes:
|
| 180 |
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
| 181 |
confidence = float(box.conf[0])
|
| 182 |
faces.append({"x": x1, "y": y1, "width": x2 - x1, "height": y2 - y1, "confidence": confidence})
|
| 183 |
+
|
| 184 |
if return_annotated:
|
| 185 |
+
# Draw bounding box
|
| 186 |
cv2.rectangle(annotated_image, (x1, y1), (x2, y2), (0, 255, 0), 3)
|
| 187 |
+
|
| 188 |
+
# Simplified label - just "Face" without percentage
|
| 189 |
+
label = "Face"
|
| 190 |
(w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
|
| 191 |
cv2.rectangle(annotated_image, (x1, y1 - h - 10), (x1 + w, y1), (0, 255, 0), -1)
|
| 192 |
cv2.putText(annotated_image, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)
|
| 193 |
+
|
| 194 |
return faces, annotated_image
|
| 195 |
|
| 196 |
GLOBAL_DETECTOR: Optional[YOLOv8FaceDetector] = None
|
| 197 |
+
|
| 198 |
def get_global_detector() -> YOLOv8FaceDetector:
|
| 199 |
"""Initializes and returns the global singleton detector instance."""
|
| 200 |
global GLOBAL_DETECTOR
|
|
|
|
| 245 |
)
|
| 246 |
return FacePrivacyApp(app_config, detector)
|
| 247 |
|
| 248 |
+
def process_media(media, blur_type, blur_amount, blur_size, sensitivity):
|
| 249 |
+
"""Process single image with blur effect."""
|
| 250 |
+
if media is None:
|
| 251 |
+
return None
|
| 252 |
try:
|
| 253 |
+
# Convert sensitivity to confidence threshold
|
| 254 |
+
confidence = get_confidence_from_sensitivity(sensitivity)
|
| 255 |
app = get_app_instance(blur_type, blur_amount, blur_size)
|
| 256 |
return app.process_image(media, confidence)
|
| 257 |
except Exception as e:
|
|
|
|
| 259 |
gr.Warning(f"An error occurred: {e}")
|
| 260 |
return media
|
| 261 |
|
| 262 |
+
def process_video(video_file, blur_type, blur_amount, blur_size, sensitivity, progress=gr.Progress()):
|
| 263 |
+
"""Process video with blur effect."""
|
| 264 |
+
if video_file is None:
|
| 265 |
+
return None, "β οΈ No video provided."
|
| 266 |
try:
|
| 267 |
+
# Convert sensitivity to confidence threshold
|
| 268 |
+
confidence = get_confidence_from_sensitivity(sensitivity)
|
| 269 |
app = get_app_instance(blur_type, blur_amount, blur_size)
|
| 270 |
cap = cv2.VideoCapture(video_file.name)
|
| 271 |
+
if not cap.isOpened():
|
| 272 |
+
return None, "β Cannot open video file."
|
| 273 |
|
| 274 |
out_path = create_temp_file()
|
| 275 |
+
fourcc = cv2.VideoWriter_fourcc(*'avc1')
|
| 276 |
fps, w, h = cap.get(cv2.CAP_PROP_FPS), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 277 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 278 |
out_vid = cv2.VideoWriter(out_path, fourcc, fps, (w, h))
|
| 279 |
+
|
| 280 |
if not out_vid.isOpened():
|
| 281 |
logger.warning("H.264 codec failed, falling back to mp4v.")
|
| 282 |
out_vid = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
|
|
|
| 284 |
frame_num = 0
|
| 285 |
while cap.isOpened():
|
| 286 |
ret, frame = cap.read()
|
| 287 |
+
if not ret:
|
| 288 |
+
break
|
| 289 |
frame_num += 1
|
| 290 |
progress(frame_num / max(total_frames, 1), desc=f"Processing frame {frame_num}/{total_frames}")
|
| 291 |
processed_frame = app.process_image(frame, confidence)
|
| 292 |
out_vid.write(processed_frame)
|
| 293 |
+
|
| 294 |
cap.release()
|
| 295 |
out_vid.release()
|
| 296 |
return out_path, f"β
Processed {frame_num} frames."
|
|
|
|
| 299 |
gr.Error(f"Video processing failed: {e}")
|
| 300 |
return None, f"β Error: {e}"
|
| 301 |
|
| 302 |
+
def detect_faces_image(image, sensitivity):
|
| 303 |
+
"""Detect faces in single image."""
|
| 304 |
+
if image is None:
|
| 305 |
+
return None, "β οΈ No image provided."
|
| 306 |
try:
|
| 307 |
+
# Convert sensitivity to confidence threshold
|
| 308 |
+
confidence = get_confidence_from_sensitivity(sensitivity)
|
| 309 |
detector = get_global_detector()
|
| 310 |
faces, annotated_image = detector.detect_faces(image, confidence, return_annotated=True)
|
| 311 |
+
|
| 312 |
+
# Simplified result - just show count
|
| 313 |
if faces:
|
| 314 |
+
result = f"β
**{len(faces)} face(s) detected!**"
|
| 315 |
else:
|
| 316 |
result = "β **No faces detected.**"
|
| 317 |
+
|
| 318 |
return annotated_image, result
|
| 319 |
except Exception as e:
|
| 320 |
logger.error(f"Image detection error: {e}")
|
| 321 |
gr.Warning(f"An error occurred: {e}")
|
| 322 |
return image, f"β Error: {e}"
|
| 323 |
|
| 324 |
+
def detect_faces_video(video_file, sensitivity, progress=gr.Progress()):
|
| 325 |
+
"""Detect faces in video."""
|
| 326 |
+
if video_file is None:
|
| 327 |
+
return None, "β οΈ No video provided."
|
| 328 |
try:
|
| 329 |
+
# Convert sensitivity to confidence threshold
|
| 330 |
+
confidence = get_confidence_from_sensitivity(sensitivity)
|
| 331 |
detector = get_global_detector()
|
| 332 |
cap = cv2.VideoCapture(video_file.name)
|
| 333 |
+
if not cap.isOpened():
|
| 334 |
+
return None, "β Cannot open video file."
|
| 335 |
|
| 336 |
out_path = create_temp_file()
|
| 337 |
fourcc = cv2.VideoWriter_fourcc(*'avc1')
|
| 338 |
fps, w, h = cap.get(cv2.CAP_PROP_FPS), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 339 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 340 |
+
out_vid = cv2.VideoWriter(out_path, fourcc, fps, (w, h))
|
| 341 |
+
|
| 342 |
+
if not out_vid.isOpened():
|
| 343 |
+
out_vid = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
| 344 |
|
| 345 |
+
frame_num, frames_with_faces = 0, 0
|
| 346 |
while cap.isOpened():
|
| 347 |
ret, frame = cap.read()
|
| 348 |
+
if not ret:
|
| 349 |
+
break
|
| 350 |
frame_num += 1
|
| 351 |
progress(frame_num / max(total_frames, 1), desc=f"Analyzing frame {frame_num}/{total_frames}")
|
| 352 |
faces, annotated_frame = detector.detect_faces(frame, confidence, return_annotated=True)
|
| 353 |
if faces:
|
| 354 |
frames_with_faces += 1
|
|
|
|
| 355 |
out_vid.write(annotated_frame)
|
| 356 |
+
|
| 357 |
cap.release()
|
| 358 |
out_vid.release()
|
| 359 |
|
| 360 |
+
# Simplified result - just show frame count
|
| 361 |
if frames_with_faces > 0:
|
| 362 |
+
result = f"β
**Faces detected in {frames_with_faces}/{frame_num} frames!**"
|
|
|
|
|
|
|
|
|
|
| 363 |
else:
|
| 364 |
result = f"β **No faces detected in {frame_num} frames.**"
|
| 365 |
+
|
| 366 |
return out_path, result
|
| 367 |
except Exception as e:
|
| 368 |
logger.error(f"Video detection error: {e}")
|
| 369 |
gr.Error(f"Video detection failed: {e}")
|
| 370 |
return None, f"β Error: {e}"
|
| 371 |
|
| 372 |
+
def detect_faces_webcam(image, sensitivity):
|
| 373 |
+
"""Detect faces in webcam stream."""
|
| 374 |
+
if image is None:
|
| 375 |
+
return None
|
| 376 |
try:
|
| 377 |
+
# Convert sensitivity to confidence threshold
|
| 378 |
+
confidence = get_confidence_from_sensitivity(sensitivity)
|
| 379 |
detector = get_global_detector()
|
| 380 |
_, annotated_image = detector.detect_faces(image, confidence, return_annotated=True)
|
| 381 |
return annotated_image
|
|
|
|
| 388 |
# ====================================================
|
| 389 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue"), title="Face Privacy Tool") as demo:
|
| 390 |
gr.Markdown("# π Face Privacy Tool")
|
| 391 |
+
gr.Markdown("AI-powered face detection and privacy protection using YOLOv8-Face. Obscure faces in images, videos, and live webcam feeds, or use the detection mode to verify their presence.")
|
| 392 |
|
| 393 |
with gr.Row():
|
| 394 |
+
# ========== SETTINGS SIDEBAR (WRAPPED IN FRAME) ==========
|
| 395 |
with gr.Column(scale=1):
|
| 396 |
+
with gr.Frame():
|
| 397 |
+
gr.Markdown("### βοΈ Global Settings")
|
| 398 |
+
|
| 399 |
+
with gr.Accordion("Privacy Settings", open=True):
|
| 400 |
+
blur_type = gr.Radio(
|
| 401 |
+
["gaussian", "pixelate", "solid"],
|
| 402 |
+
value="pixelate",
|
| 403 |
+
label="Blur Type",
|
| 404 |
+
info="Choose how to obscure faces."
|
| 405 |
+
)
|
| 406 |
+
blur_amount = gr.Slider(
|
| 407 |
+
1, 100,
|
| 408 |
+
step=1,
|
| 409 |
+
value=25,
|
| 410 |
+
label="Blur Intensity/Size",
|
| 411 |
+
info="Higher = more obscured."
|
| 412 |
+
)
|
| 413 |
+
blur_size = gr.Slider(
|
| 414 |
+
1.0, 2.0,
|
| 415 |
+
step=0.05,
|
| 416 |
+
value=1.2,
|
| 417 |
+
label="Coverage Area",
|
| 418 |
+
info="Expand blur beyond face boundary."
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
with gr.Accordion("Detection Settings", open=True):
|
| 422 |
+
# Changed from Slider to Radio for sensitivity
|
| 423 |
+
detection_sensitivity = gr.Radio(
|
| 424 |
+
choices=list(SENSITIVITY_MAP.keys()),
|
| 425 |
+
value="Balanced (Default)",
|
| 426 |
+
label="Detection Sensitivity",
|
| 427 |
+
info="How strict the face detection should be"
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
# ========== MAIN CONTENT AREA ==========
|
| 431 |
with gr.Column(scale=2):
|
| 432 |
with gr.Tabs():
|
| 433 |
+
# ========== PRIVACY MODE ==========
|
| 434 |
with gr.TabItem("π Privacy Mode (Blur Faces)"):
|
| 435 |
gr.Markdown("### Apply privacy protection to your media.")
|
| 436 |
+
|
| 437 |
with gr.Tabs():
|
| 438 |
+
# Image Tab
|
| 439 |
with gr.TabItem("π· Image"):
|
| 440 |
with gr.Row():
|
| 441 |
+
img_in_blur = gr.Image(
|
| 442 |
+
sources=["upload", "clipboard"],
|
| 443 |
+
type="numpy",
|
| 444 |
+
label="Input Image",
|
| 445 |
+
height=500,
|
| 446 |
+
object_fit="contain"
|
| 447 |
+
)
|
| 448 |
+
img_out_blur = gr.Image(
|
| 449 |
+
type="numpy",
|
| 450 |
+
label="Protected Image",
|
| 451 |
+
height=500,
|
| 452 |
+
object_fit="contain"
|
| 453 |
+
)
|
| 454 |
with gr.Row():
|
| 455 |
blur_img_btn = gr.Button("Apply Privacy Blur", variant="primary", scale=3)
|
| 456 |
gr.ClearButton([img_in_blur, img_out_blur], scale=1)
|
| 457 |
gr.Examples(
|
| 458 |
examples=[
|
| 459 |
["./examples/single_face.jpg"],
|
| 460 |
+
["./examples/two_faces.png"],
|
| 461 |
+
["./examples/group_photo.png"],
|
| 462 |
+
["./examples/group2.webp"],
|
| 463 |
],
|
| 464 |
inputs=img_in_blur,
|
| 465 |
label="Click an example to try"
|
| 466 |
+
)
|
| 467 |
+
|
| 468 |
+
# Video Tab
|
| 469 |
with gr.TabItem("π₯ Video"):
|
| 470 |
with gr.Row():
|
| 471 |
+
vid_in_blur = gr.File(
|
| 472 |
+
file_types=[".mp4", ".mov", ".avi"],
|
| 473 |
+
label="Input Video"
|
| 474 |
+
)
|
| 475 |
with gr.Column():
|
| 476 |
+
vid_out_blur = gr.Video(
|
| 477 |
+
label="Protected Video",
|
| 478 |
+
height=500
|
| 479 |
+
)
|
| 480 |
vid_status_blur = gr.Markdown("")
|
| 481 |
with gr.Row():
|
| 482 |
blur_vid_btn = gr.Button("Process Video", variant="primary", scale=3)
|
| 483 |
gr.ClearButton([vid_in_blur, vid_out_blur, vid_status_blur], scale=1)
|
| 484 |
|
| 485 |
+
# Webcam Tab
|
| 486 |
with gr.TabItem("πΉ Webcam"):
|
| 487 |
gr.Markdown("**Real-time privacy protection from your webcam feed.**")
|
| 488 |
with gr.Row():
|
| 489 |
+
web_in_blur = gr.Image(
|
| 490 |
+
sources=["webcam"],
|
| 491 |
+
type="numpy",
|
| 492 |
+
streaming=True,
|
| 493 |
+
label="Live Webcam",
|
| 494 |
+
height=500,
|
| 495 |
+
object_fit="contain"
|
| 496 |
+
)
|
| 497 |
+
web_out_blur = gr.Image(
|
| 498 |
+
type="numpy",
|
| 499 |
+
label="Protected Feed",
|
| 500 |
+
height=500,
|
| 501 |
+
object_fit="contain"
|
| 502 |
+
)
|
| 503 |
+
|
| 504 |
+
# ========== DETECTION MODE ==========
|
| 505 |
with gr.TabItem("π Detection Mode (Check for Faces)"):
|
| 506 |
gr.Markdown("### Verify if your media contains human faces.")
|
| 507 |
+
|
| 508 |
with gr.Tabs():
|
| 509 |
+
# Image Detection Tab
|
| 510 |
with gr.TabItem("π· Image"):
|
| 511 |
with gr.Row():
|
| 512 |
+
img_in_detect = gr.Image(
|
| 513 |
+
sources=["upload", "clipboard"],
|
| 514 |
+
type="numpy",
|
| 515 |
+
label="Input Image",
|
| 516 |
+
height=500,
|
| 517 |
+
object_fit="contain"
|
| 518 |
+
)
|
| 519 |
with gr.Column():
|
| 520 |
+
img_out_detect = gr.Image(
|
| 521 |
+
type="numpy",
|
| 522 |
+
label="Detection Result",
|
| 523 |
+
height=500,
|
| 524 |
+
object_fit="contain"
|
| 525 |
+
)
|
| 526 |
img_status_detect = gr.Markdown("_Upload an image to start._")
|
| 527 |
+
|
| 528 |
with gr.Row():
|
| 529 |
detect_img_btn = gr.Button("Detect Faces", variant="primary", scale=3)
|
| 530 |
gr.ClearButton([img_in_detect, img_out_detect, img_status_detect], scale=1)
|
| 531 |
gr.Examples(
|
| 532 |
examples=[
|
| 533 |
["./examples/single_face.jpg"],
|
| 534 |
+
["./examples/two_faces.png"],
|
| 535 |
+
["./examples/group_photo.png"],
|
| 536 |
+
["./examples/group2.webp"]
|
| 537 |
],
|
| 538 |
inputs=img_in_detect,
|
| 539 |
label="Click an example to try"
|
| 540 |
)
|
| 541 |
|
| 542 |
+
# Video Detection Tab
|
| 543 |
with gr.TabItem("π₯ Video"):
|
| 544 |
with gr.Row():
|
| 545 |
+
vid_in_detect = gr.File(
|
| 546 |
+
file_types=[".mp4", ".mov", ".avi"],
|
| 547 |
+
label="Input Video"
|
| 548 |
+
)
|
| 549 |
with gr.Column():
|
| 550 |
+
vid_out_detect = gr.Video(
|
| 551 |
+
label="Annotated Video",
|
| 552 |
+
height=500
|
| 553 |
+
)
|
| 554 |
vid_status_detect = gr.Markdown("_Upload a video to start._")
|
| 555 |
with gr.Row():
|
| 556 |
detect_vid_btn = gr.Button("Analyze Video for Faces", variant="primary", scale=3)
|
| 557 |
gr.ClearButton([vid_in_detect, vid_out_detect, vid_status_detect], scale=1)
|
| 558 |
|
| 559 |
+
# Webcam Detection Tab
|
| 560 |
with gr.TabItem("πΉ Webcam"):
|
| 561 |
gr.Markdown("**Live face detection from your webcam feed.**")
|
| 562 |
with gr.Row():
|
| 563 |
+
web_in_detect = gr.Image(
|
| 564 |
+
sources=["webcam"],
|
| 565 |
+
type="numpy",
|
| 566 |
+
streaming=True,
|
| 567 |
+
label="Live Feed",
|
| 568 |
+
height=500,
|
| 569 |
+
object_fit="contain"
|
| 570 |
+
)
|
| 571 |
+
web_out_detect = gr.Image(
|
| 572 |
+
type="numpy",
|
| 573 |
+
label="Detection Result",
|
| 574 |
+
height=500,
|
| 575 |
+
object_fit="contain"
|
| 576 |
+
)
|
| 577 |
+
|
| 578 |
+
# ========== EVENT HANDLERS ==========
|
| 579 |
+
# Privacy Mode (updated to use detection_sensitivity instead of detection_confidence)
|
| 580 |
+
blur_img_btn.click(
|
| 581 |
+
process_media,
|
| 582 |
+
inputs=[img_in_blur, blur_type, blur_amount, blur_size, detection_sensitivity],
|
| 583 |
+
outputs=img_out_blur
|
| 584 |
+
)
|
| 585 |
+
blur_vid_btn.click(
|
| 586 |
+
process_video,
|
| 587 |
+
inputs=[vid_in_blur, blur_type, blur_amount, blur_size, detection_sensitivity],
|
| 588 |
+
outputs=[vid_out_blur, vid_status_blur]
|
| 589 |
+
)
|
| 590 |
+
web_in_blur.stream(
|
| 591 |
+
process_media,
|
| 592 |
+
inputs=[web_in_blur, blur_type, blur_amount, blur_size, detection_sensitivity],
|
| 593 |
+
outputs=web_out_blur
|
| 594 |
+
)
|
| 595 |
|
| 596 |
+
# Detection Mode (updated to use detection_sensitivity instead of detection_confidence)
|
| 597 |
+
detect_img_btn.click(
|
| 598 |
+
detect_faces_image,
|
| 599 |
+
inputs=[img_in_detect, detection_sensitivity],
|
| 600 |
+
outputs=[img_out_detect, img_status_detect]
|
| 601 |
+
)
|
| 602 |
+
detect_vid_btn.click(
|
| 603 |
+
detect_faces_video,
|
| 604 |
+
inputs=[vid_in_detect, detection_sensitivity],
|
| 605 |
+
outputs=[vid_out_detect, vid_status_detect]
|
| 606 |
+
)
|
| 607 |
+
web_in_detect.stream(
|
| 608 |
+
detect_faces_webcam,
|
| 609 |
+
inputs=[web_in_detect, detection_sensitivity],
|
| 610 |
+
outputs=web_out_detect
|
| 611 |
+
)
|
| 612 |
|
| 613 |
# ====================================================
|
| 614 |
# MAIN ENTRY POINT
|
|
|
|
| 621 |
demo.launch()
|
| 622 |
except Exception as e:
|
| 623 |
logger.error(f"β Startup failed: {e}")
|
| 624 |
+
logger.info("π‘ Make sure 'yolov8n-face.pt' is available in the current directory or will be downloaded automatically by ultralytics.")
|
| 625 |
+
|