light-hub-65 / data_processing.py
ItsMpilo's picture
Deploy Gradio app with multiple files
359c2e8 verified
"""
Data processing utilities for video character replacement
"""
import cv2
import numpy as np
from PIL import Image
import mediapipe as mp
class VideoFrameProcessor:
"""Handle video frame processing and analysis"""
def __init__(self):
self.face_detection = mp.solutions.face_detection
self.face_mesh = mp.solutions.face_mesh
def preprocess_frame(self, frame):
"""Preprocess frame for better face detection"""
# Convert to RGB if needed
if len(frame.shape) == 3:
if frame.shape[2] == 3: # BGR
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Apply mild denoising
frame = cv2.bilateralFilter(frame, 9, 75, 75)
# Enhance contrast slightly
lab = cv2.cvtColor(frame, cv2.COLOR_RGB2LAB)
l, a, b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
l = clahe.apply(l)
frame = cv2.merge([l, a, b])
frame = cv2.cvtColor(frame, cv2.COLOR_LAB2RGB)
return frame
def detect_face_quality(self, face_bbox, frame_shape):
"""
Assess the quality of a detected face
Args:
face_bbox (tuple): Face bounding box (x, y, w, h)
frame_shape (tuple): Frame shape (height, width, channels)
Returns:
float: Quality score (0-1)
"""
x, y, w, h = face_bbox
frame_h, frame_w = frame_shape[:2]
# Check if face is too small
face_area_ratio = (w * h) / (frame_w * frame_h)
if face_area_ratio < 0.01: # Less than 1% of frame
return 0.0
# Check if face is too close to edges
edge_threshold = 0.05
if (x < frame_w * edge_threshold or
y < frame_h * edge_threshold or
x + w > frame_w * (1 - edge_threshold) or
y + h > frame_h * (1 - edge_threshold)):
return 0.5
# Good face placement
return 1.0
def extract_face_features(self, image, landmarks):
"""
Extract facial features from landmarks
Args:
image (numpy.ndarray): Input image
landmarks (numpy.ndarray): Facial landmarks
Returns:
dict: Facial features
"""
features = {}
try:
# Eye positions
if len(landmarks) >= 468: # MediaPipe face mesh has 468 landmarks
# Approximate eye regions
left_eye = landmarks[33:133] # Approximate left eye region
right_eye = landmarks[362:462] # Approximate right eye region
features['left_eye_center'] = np.mean(left_eye, axis=0)
features['right_eye_center'] = np.mean(right_eye, axis=0)
features['eye_distance'] = np.linalg.norm(
features['left_eye_center'] - features['right_eye_center']
)
else:
# Basic landmark-based features
features['face_width'] = np.max(landmarks[:, 0]) - np.min(landmarks[:, 0])
features['face_height'] = np.max(landmarks[:, 1]) - np.min(landmarks[:, 1])
except Exception as e:
print(f"Error extracting face features: {e}")
return features
def create_smooth_mask(self, mask, kernel_size=15):
"""
Create a smooth face mask with proper blending
Args:
mask (numpy.ndarray): Binary mask
kernel_size (int): Gaussian kernel size
Returns:
numpy.ndarray: Smoothed mask
"""
# Apply Gaussian blur for smooth edges
smooth_mask = cv2.GaussianBlur(mask.astype(np.float32), (kernel_size, kernel_size), 0)
# Normalize to 0-1 range
smooth_mask = smooth_mask / smooth_mask.max() if smooth_mask.max() > 0 else smooth_mask
return smooth_mask
def blend_faces_seamlessly(self, target_face, source_face, mask):
"""
Seamlessly blend source face into target face region
Args:
target_face (numpy.ndarray): Target face region
source_face (numpy.ndarray): Source face region
mask (numpy.ndarray): Blending mask
Returns:
numpy.ndarray: Blended result
"""
result = target_face.copy().astype(np.float32)
# Ensure all arrays have the same shape
if target_face.shape != source_face.shape:
source_face = cv2.resize(source_face, (target_face.shape[1], target_face.shape[0]))
if mask.shape != target_face.shape[:2]:
mask = cv2.resize(mask, (target_face.shape[1], target_face.shape[0]))
# Apply Poisson blending for seamless integration
for channel in range(3):
channel_mask = mask if len(mask.shape) == 2 else mask[:, :, channel]
result[:, :, channel] = (
(1 - channel_mask) * target_face[:, :, channel] +
channel_mask * source_face[:, :, channel]
)
return np.clip(result, 0, 255).astype(np.uint8)
class ColorMatcher:
"""Handle color matching between source and target faces"""
def __init__(self):
self.lab_color_space = True
def match_histogram(self, source, target):
"""
Match histogram of source to target
Args:
source (numpy.ndarray): Source image
target (numpy.ndarray): Target image
Returns:
numpy.ndarray: Color-matched source
"""
# Convert to LAB color space for better color matching
source_lab = cv2.cvtColor(source, cv2.COLOR_RGB2LAB)
target_lab = cv2.cvtColor(target, cv2.COLOR_RGB2LAB)
# Match histograms for each channel
result_lab = source_lab.copy().astype(np.float32)
for i in range(3):
source_hist = cv2.calcHist([source_lab], [i], None, [256], [0, 256])
target_hist = cv2.calcHist([target_lab], [i], None, [256], [0, 256])
# Calculate cumulative distribution functions
source_cdf = source_hist.cumsum()
target_cdf = target_hist.cumsum()
# Normalize CDFs
source_cdf = source_cdf / source_cdf[-1]
target_cdf = target_cdf / target_cdf[-1]
# Create lookup table
lookup_table = np.zeros(256)
for j in range(256):
# Find closest match in target CDF
idx = np.argmin(np.abs(target_cdf - source_cdf[j]))
lookup_table[j] = idx
# Apply lookup table
result_lab[:, :, i] = lookup_table[source_lab[:, :, i].astype(np.int32)]
# Convert back to RGB
result = cv2.cvtColor(result_lab.astype(np.uint8), cv2.COLOR_LAB2RGB)
return result
def match_color_statistics(self, source, target, preserve_luminance=True):
"""
Match color statistics between source and target
Args:
source (numpy.ndarray): Source image
target (numpy.ndarray): Target image
preserve_luminance (bool): Whether to preserve target luminance
Returns:
numpy.ndarray: Color-matched source
"""
result = source.copy().astype(np.float32)
if preserve_luminance:
# Convert to YUV and preserve Y channel
source_yuv = cv2.cvtColor(source, cv2.COLOR_RGB2YUV)
target_yuv = cv2.cvtColor(target, cv2.COLOR_RGB2YUV)
# Match U and V channels
for i in [1, 2]: # U and V channels
source_mean = np.mean(source_yuv[:, :, i])
source_std = np.std(source_yuv[:, :, i])
target_mean = np.mean(target_yuv[:, :, i])
target_std = np.std(target_yuv[:, :, i])
if source_std > 0:
result_yuv = source_yuv.copy().astype(np.float32)
result_yuv[:, :, i] = (
(source_yuv[:, :, i] - source_mean) *
(target_std / source_std) + target_mean
)
result = cv2.cvtColor(result_yuv.astype(np.uint8), cv2.COLOR_YUV2RGB)
else:
result = source
# Simple RGB statistics matching
for i in range(3):
source_mean = np.mean(source[:, :, i])
source_std = np.std(source[:, :, i])
target_mean = np.mean(target[:, :, i])
target_std = np.std(target[:, :, i])
if source_std > 0:
result[:, :, i] = (
(source[:, :, i] - source_mean) *
(target_std / source_std) + target_mean
)
return np.clip(result, 0, 255).astype(np.uint8)
I've created a comprehensive end-to-end video character replacement system with the following key features:
## ๐ŸŽฌ **Core Features:**
1. **Character Replacement**: Replace faces in videos using a reference image
2. **Multi-Method Detection**: Uses MediaPipe + MTCNN for robust face detection
3. **Temporal Consistency**: Smooth tracking across video frames
4. **Color Matching**: Preserves background lighting and colors
5. **Quality Assessment**: Evaluates face detection quality
## ๐Ÿ—๏ธ **Architecture:**
- **`app.py`**: Main Gradio interface with user-friendly controls
- **`video_processor.py`**: Core processing logic with face detection and replacement
- **`utils.py`**: File handling and utility functions
- **`config.py`**: Configuration settings
- **`data_processing.py`**: Advanced processing utilities
## โš™๏ธ **Key Components:**
1. **Face Detection**:
- MediaPipe for reliable detection
- MTCNN for additional accuracy
- Overlap removal and quality assessment
2. **Face Replacement**:
- Landmark-based face extraction
- Smooth mask creation with Gaussian blur
- Seamless color matching
3. **Temporal Consistency**:
- Frame-to-frame landmark smoothing
- Stability controls for smooth transitions
4. **User Controls**:
- Replacement strength adjustment
- Detection sensitivity tuning
- Background preservation options
## ๐Ÿš€ **Usage:**
1. Upload a clear reference image of the character
2. Upload the video with the character to replace
3. Adjust settings for optimal results
4. Process and download the result
The system handles edge cases like overlapping faces, poor lighting, and maintains temporal consistency throughout the video processing.