finalproject / services /face_service.py
jarondon82's picture
Mejorado detector facial y control de flujo para imágenes con demasiados rostros
fc85f1e
"""
Face Detection Service - EmotionMirror Application
This service provides face detection capabilities using OpenCV's Haar Cascade classifier.
"""
import cv2
import numpy as np
import os
import logging
from typing import List, Tuple, Dict, Any, Optional
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class FaceDetectionService:
"""
Service for detecting faces in images using OpenCV.
This service uses Haar Cascade classifiers for robust face detection
and provides functionality to detect, extract, and analyze facial regions.
"""
def __init__(self):
"""
Initialize the face detection service with appropriate classifiers.
"""
try:
# Load the pre-trained face cascade classifier
cascade_path = cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'
self.face_cascade = cv2.CascadeClassifier(cascade_path)
# Optional: Load additional cascades for more detailed detection
eye_cascade_path = cv2.data.haarcascades + 'haarcascade_eye.xml'
self.eye_cascade = cv2.CascadeClassifier(eye_cascade_path)
if self.face_cascade.empty():
logger.error("Failed to load face cascade classifier")
raise ValueError("Failed to load face cascade classifier")
logger.info("Face detection service initialized successfully")
except Exception as e:
logger.error(f"Error initializing FaceDetectionService: {str(e)}")
raise
def detect_faces(self, img: np.ndarray) -> List[Tuple[int, int, int, int]]:
"""
Detect faces in the provided image.
Args:
img: Input image as numpy array (BGR format for OpenCV)
Returns:
List of face bounding boxes as (x, y, w, h) tuples
"""
try:
# Convert to grayscale for detection
if len(img.shape) == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img.copy()
# Detect faces with optimized parameters for better detection
faces = self.face_cascade.detectMultiScale(
gray,
scaleFactor=1.2, # Aumentado de 1.1 a 1.2 para reducir falsos positivos
minNeighbors=6, # Aumentado de 5 a 6 para ser más selectivo
minSize=(50, 50), # Aumentado tamaño mínimo de 30x30 a 50x50
flags=cv2.CASCADE_SCALE_IMAGE
)
logger.info(f"Detected {len(faces)} faces in image")
return faces
except Exception as e:
logger.error(f"Error detecting faces: {str(e)}")
return []
def extract_face_regions(self, img: np.ndarray, faces: List[Tuple[int, int, int, int]]) -> List[np.ndarray]:
"""
Extract face regions from the image based on detected bounding boxes.
Args:
img: Input image
faces: List of face bounding boxes (x, y, w, h)
Returns:
List of face region images
"""
face_regions = []
try:
for (x, y, w, h) in faces:
# Extract with a slight margin around the face for better analysis
margin = int(0.1 * w) # 10% margin
# Calculate boundaries with margin, ensuring they stay within image bounds
x1 = max(0, x - margin)
y1 = max(0, y - margin)
x2 = min(img.shape[1], x + w + margin)
y2 = min(img.shape[0], y + h + margin)
# Extract the face region
face_img = img[y1:y2, x1:x2].copy()
face_regions.append(face_img)
return face_regions
except Exception as e:
logger.error(f"Error extracting face regions: {str(e)}")
return face_regions
def validate_face_quality(self, face_img: np.ndarray) -> Dict[str, Any]:
"""
Validate the quality of a detected face for emotion analysis.
Args:
face_img: Face region image
Returns:
Dictionary with quality metrics and validation result
"""
result = {
"valid": True,
"metrics": {},
"recommendations": []
}
try:
# Convert to grayscale for analysis
if len(face_img.shape) == 3:
gray = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)
else:
gray = face_img.copy()
# Check face size
min_face_dim = 48 # Minimum recommended dimension for reliable detection
h, w = gray.shape[:2]
result["metrics"]["width"] = w
result["metrics"]["height"] = h
if w < min_face_dim or h < min_face_dim:
result["valid"] = False
result["recommendations"].append(f"Face is too small ({w}x{h}px). Minimum recommended size is {min_face_dim}x{min_face_dim}px")
# Check illumination (brightness)
brightness = np.mean(gray)
result["metrics"]["brightness"] = brightness
if brightness < 40:
result["valid"] = False
result["recommendations"].append("Face is too dark. Improve lighting conditions")
elif brightness > 220:
result["valid"] = False
result["recommendations"].append("Face is too bright. Reduce exposure or lighting")
# Check contrast
contrast = np.std(gray)
result["metrics"]["contrast"] = contrast
if contrast < 20:
result["valid"] = False
result["recommendations"].append("Face has low contrast. Improve lighting conditions")
# Detect eyes to check if face is properly visible
eyes = self.eye_cascade.detectMultiScale(gray)
result["metrics"]["eyes_detected"] = len(eyes)
if len(eyes) < 2:
result["valid"] = False
result["recommendations"].append("Both eyes should be clearly visible")
return result
except Exception as e:
logger.error(f"Error validating face quality: {str(e)}")
result["valid"] = False
result["recommendations"].append(f"Error analyzing face: {str(e)}")
return result
def validate_face_count(self, faces: List[Tuple[int, int, int, int]], max_faces: int = 5) -> Dict[str, Any]:
"""
Validate that the number of detected faces does not exceed the maximum limit.
Args:
faces: List of face bounding boxes as (x, y, w, h) tuples
max_faces: Maximum number of faces allowed for processing
Returns:
Dictionary with validation result and messages
"""
result = {
"valid": True,
"count": len(faces),
"max_allowed": max_faces,
"message": "",
"warning": ""
}
try:
if len(faces) > max_faces:
result["valid"] = False
result["message"] = f" Detected {len(faces)} faces in the image. The current version of EmotionMirror has a limit of {max_faces} faces per image."
result["warning"] = f"We are working to increase this limit in future versions. Please upload an image with {max_faces} or fewer faces to continue with the analysis."
logger.warning(f"Face count validation failed: {len(faces)} faces detected, maximum allowed is {max_faces}")
else:
result["message"] = f"Image has {len(faces)} face(s), which is within the limit of {max_faces} faces."
logger.info(f"Face count validation passed: {len(faces)} faces detected")
return result
except Exception as e:
logger.error(f"Error validating face count: {str(e)}")
result["valid"] = False
result["message"] = "Error validating the number of faces in the image."
result["warning"] = f"Error details: {str(e)}"
return result