kyc-backend / app /services /face_quality.py
supraptin's picture
Initial deployment to Hugging Face Spaces
bd2c5ca
"""
Face Quality Analysis Service.
This service provides face quality assessment including:
- Blur detection (Laplacian variance)
- Brightness analysis
- Face pose estimation
"""
import cv2
import numpy as np
from typing import Dict, Any, Optional
import logging
from ..config import settings
logger = logging.getLogger(__name__)
class FaceQualityService:
"""Service for analyzing face image quality."""
def __init__(self):
"""Initialize the face quality service."""
pass
def analyze_quality(
self,
image: np.ndarray,
face_info: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Analyze the quality of a face image.
Args:
image: Input image (BGR format)
face_info: Optional face info dict containing pose data from face detection
Returns:
Dictionary containing quality metrics
"""
result = {}
# Analyze blur
blur_result = self.analyze_blur(image)
result.update(blur_result)
# Analyze brightness
brightness_result = self.analyze_brightness(image)
result.update(brightness_result)
# Add pose analysis if face_info provided
if face_info and "pose" in face_info:
pose_result = self.analyze_pose(face_info["pose"])
result["pose"] = pose_result
# Overall quality assessment
result["is_good_quality"] = self._assess_overall_quality(result)
return result
def analyze_blur(self, image: np.ndarray) -> Dict[str, Any]:
"""
Analyze image blur using Laplacian variance method.
Higher variance = sharper image
Lower variance = blurrier image
Args:
image: Input image (BGR format)
Returns:
Dictionary with blur metrics
"""
# Convert to grayscale
if len(image.shape) == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
# Calculate Laplacian variance
laplacian = cv2.Laplacian(gray, cv2.CV_64F)
variance = laplacian.var()
is_blurry = variance < settings.BLUR_THRESHOLD
return {
"blur_score": round(float(variance), 2),
"blur_threshold": settings.BLUR_THRESHOLD,
"is_blurry": is_blurry
}
def analyze_brightness(self, image: np.ndarray) -> Dict[str, Any]:
"""
Analyze image brightness.
Args:
image: Input image (BGR format)
Returns:
Dictionary with brightness metrics
"""
# Convert to grayscale
if len(image.shape) == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
# Calculate mean brightness (normalized to 0-1)
mean_brightness = np.mean(gray) / 255.0
is_too_dark = mean_brightness < settings.BRIGHTNESS_MIN
is_too_bright = mean_brightness > settings.BRIGHTNESS_MAX
return {
"brightness": round(float(mean_brightness), 3),
"brightness_min": settings.BRIGHTNESS_MIN,
"brightness_max": settings.BRIGHTNESS_MAX,
"is_too_dark": is_too_dark,
"is_too_bright": is_too_bright
}
def analyze_pose(self, pose: Dict[str, float]) -> Dict[str, Any]:
"""
Analyze face pose angles.
Args:
pose: Dictionary with yaw, pitch, roll angles
Returns:
Dictionary with pose analysis
"""
yaw = abs(pose.get("yaw", 0))
pitch = abs(pose.get("pitch", 0))
roll = abs(pose.get("roll", 0))
is_frontal = (
yaw <= settings.POSE_MAX_YAW and
pitch <= settings.POSE_MAX_PITCH and
roll <= settings.POSE_MAX_ROLL
)
return {
"yaw": round(pose.get("yaw", 0), 2),
"pitch": round(pose.get("pitch", 0), 2),
"roll": round(pose.get("roll", 0), 2),
"max_yaw": settings.POSE_MAX_YAW,
"max_pitch": settings.POSE_MAX_PITCH,
"max_roll": settings.POSE_MAX_ROLL,
"is_frontal": is_frontal
}
def analyze_contrast(self, image: np.ndarray) -> Dict[str, Any]:
"""
Analyze image contrast.
Args:
image: Input image (BGR format)
Returns:
Dictionary with contrast metrics
"""
# Convert to grayscale
if len(image.shape) == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
# Calculate standard deviation as contrast measure
contrast = np.std(gray) / 255.0
return {
"contrast": round(float(contrast), 3),
"is_low_contrast": contrast < 0.1
}
def analyze_face_size(
self,
image: np.ndarray,
bbox: Dict[str, int],
min_face_ratio: float = 0.1
) -> Dict[str, Any]:
"""
Analyze face size relative to image.
Args:
image: Input image
bbox: Face bounding box
min_face_ratio: Minimum acceptable face to image ratio
Returns:
Dictionary with face size metrics
"""
img_height, img_width = image.shape[:2]
img_area = img_height * img_width
face_area = bbox["width"] * bbox["height"]
face_ratio = face_area / img_area
return {
"face_area": face_area,
"image_area": img_area,
"face_ratio": round(face_ratio, 4),
"is_face_too_small": face_ratio < min_face_ratio
}
def _assess_overall_quality(self, metrics: Dict[str, Any]) -> bool:
"""
Assess overall image quality based on metrics.
Args:
metrics: Dictionary of quality metrics
Returns:
True if image passes quality checks
"""
# Check blur
if metrics.get("is_blurry", False):
return False
# Check brightness
if metrics.get("is_too_dark", False) or metrics.get("is_too_bright", False):
return False
# Check pose if available
if "pose" in metrics and not metrics["pose"].get("is_frontal", True):
return False
return True
# Global service instance
face_quality_service = FaceQualityService()