| | """ |
| | Visual effects and enhancements for BackgroundFX Pro. |
| | Implements professional-grade effects for background replacement. |
| | """ |
| |
|
| | import cv2 |
| | import numpy as np |
| | import torch |
| | import torch.nn.functional as F |
| | from typing import Dict, List, Optional, Tuple, Union |
| | from dataclasses import dataclass |
| | from enum import Enum |
| | import logging |
| | from scipy.ndimage import gaussian_filter, map_coordinates |
| |
|
| | from ..utils.logger import setup_logger |
| | from ..utils.device import DeviceManager |
| | from ..core.quality import QualityAnalyzer |
| |
|
| | logger = setup_logger(__name__) |
| |
|
| |
|
| | class EffectType(Enum): |
| | """Available effect types.""" |
| | BLUR = "blur" |
| | BOKEH = "bokeh" |
| | COLOR_SHIFT = "color_shift" |
| | LIGHT_WRAP = "light_wrap" |
| | SHADOW = "shadow" |
| | REFLECTION = "reflection" |
| | GLOW = "glow" |
| | CHROMATIC_ABERRATION = "chromatic_aberration" |
| | VIGNETTE = "vignette" |
| | FILM_GRAIN = "film_grain" |
| | MOTION_BLUR = "motion_blur" |
| | DEPTH_OF_FIELD = "depth_of_field" |
| |
|
| |
|
| | @dataclass |
| | class EffectConfig: |
| | """Configuration for visual effects.""" |
| | blur_strength: float = 15.0 |
| | bokeh_size: int = 21 |
| | bokeh_brightness: float = 1.5 |
| | light_wrap_intensity: float = 0.3 |
| | light_wrap_width: int = 10 |
| | shadow_opacity: float = 0.5 |
| | shadow_blur: float = 10.0 |
| | shadow_offset: Tuple[int, int] = (5, 5) |
| | glow_intensity: float = 0.5 |
| | glow_radius: int = 20 |
| | chromatic_shift: float = 2.0 |
| | vignette_strength: float = 0.3 |
| | grain_intensity: float = 0.1 |
| | motion_blur_angle: float = 0.0 |
| | motion_blur_size: int = 15 |
| |
|
| |
|
| | class BackgroundEffects: |
| | """Apply effects to background images.""" |
| | |
| | def __init__(self, config: Optional[EffectConfig] = None): |
| | self.config = config or EffectConfig() |
| | self.device_manager = DeviceManager() |
| | |
| | def apply_blur(self, image: np.ndarray, |
| | strength: Optional[float] = None, |
| | mask: Optional[np.ndarray] = None) -> np.ndarray: |
| | """ |
| | Apply Gaussian blur to image. |
| | |
| | Args: |
| | image: Input image |
| | strength: Blur strength |
| | mask: Optional mask for selective blur |
| | |
| | Returns: |
| | Blurred image |
| | """ |
| | strength = strength or self.config.blur_strength |
| | |
| | if strength <= 0: |
| | return image |
| | |
| | |
| | kernel_size = int(strength * 2) + 1 |
| | |
| | |
| | blurred = cv2.GaussianBlur(image, (kernel_size, kernel_size), strength) |
| | |
| | |
| | if mask is not None: |
| | mask_3ch = np.repeat(mask[:, :, np.newaxis], 3, axis=2) |
| | if mask_3ch.max() > 1: |
| | mask_3ch = mask_3ch / 255.0 |
| | |
| | blurred = image * (1 - mask_3ch) + blurred * mask_3ch |
| | blurred = blurred.astype(np.uint8) |
| | |
| | return blurred |
| | |
| | def apply_bokeh(self, image: np.ndarray, |
| | depth_map: Optional[np.ndarray] = None) -> np.ndarray: |
| | """ |
| | Apply bokeh effect to simulate depth of field. |
| | |
| | Args: |
| | image: Input image |
| | depth_map: Optional depth map for varying blur |
| | |
| | Returns: |
| | Image with bokeh effect |
| | """ |
| | h, w = image.shape[:2] |
| | |
| | |
| | if depth_map is None: |
| | |
| | center_x, center_y = w // 2, h // 2 |
| | Y, X = np.ogrid[:h, :w] |
| | dist = np.sqrt((X - center_x)**2 + (Y - center_y)**2) |
| | depth_map = dist / dist.max() |
| | |
| | |
| | if depth_map.max() > 1: |
| | depth_map = depth_map / 255.0 |
| | |
| | |
| | kernel_size = self.config.bokeh_size |
| | kernel = self._create_bokeh_kernel(kernel_size) |
| | |
| | |
| | result = np.zeros_like(image, dtype=np.float32) |
| | |
| | |
| | blur_levels = 5 |
| | for i in range(blur_levels): |
| | blur_strength = (i + 1) * (kernel_size // blur_levels) |
| | |
| | if blur_strength > 0: |
| | blurred = cv2.filter2D(image, -1, kernel[:blur_strength, :blur_strength]) |
| | else: |
| | blurred = image |
| | |
| | |
| | depth_min = i / blur_levels |
| | depth_max = (i + 1) / blur_levels |
| | mask = ((depth_map >= depth_min) & (depth_map < depth_max)).astype(np.float32) |
| | |
| | |
| | mask_3ch = np.repeat(mask[:, :, np.newaxis], 3, axis=2) |
| | |
| | |
| | result += blurred * mask_3ch |
| | |
| | |
| | result = self._add_bokeh_highlights(result, depth_map) |
| | |
| | return np.clip(result, 0, 255).astype(np.uint8) |
| | |
| | def _create_bokeh_kernel(self, size: int) -> np.ndarray: |
| | """Create hexagonal bokeh kernel.""" |
| | kernel = np.zeros((size, size), dtype=np.float32) |
| | center = size // 2 |
| | radius = center - 1 |
| | |
| | |
| | for i in range(size): |
| | for j in range(size): |
| | x, y = i - center, j - center |
| | |
| | if abs(x) <= radius and abs(y) <= radius * np.sqrt(3) / 2: |
| | if abs(y) <= (radius * np.sqrt(3) / 2 - abs(x) * np.sqrt(3) / 2): |
| | kernel[i, j] = 1.0 |
| | |
| | |
| | kernel /= kernel.sum() |
| | |
| | return kernel |
| | |
| | def _add_bokeh_highlights(self, image: np.ndarray, |
| | depth_map: np.ndarray) -> np.ndarray: |
| | """Add bright bokeh spots to out-of-focus areas.""" |
| | |
| | gray = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2GRAY) |
| | _, bright_mask = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY) |
| | |
| | |
| | kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7)) |
| | bright_mask = cv2.dilate(bright_mask, kernel, iterations=2) |
| | |
| | |
| | bright_mask = (bright_mask * depth_map).astype(np.uint8) |
| | |
| | |
| | glow = cv2.GaussianBlur(bright_mask, (21, 21), 10) |
| | glow = cv2.cvtColor(glow, cv2.COLOR_GRAY2BGR) / 255.0 |
| | |
| | |
| | result = image + glow * self.config.bokeh_brightness * 50 |
| | |
| | return result |
| | |
| | def apply_light_wrap(self, foreground: np.ndarray, |
| | background: np.ndarray, |
| | mask: np.ndarray) -> np.ndarray: |
| | """ |
| | Apply light wrap effect for better compositing. |
| | |
| | Args: |
| | foreground: Foreground image |
| | background: Background image |
| | mask: Foreground mask |
| | |
| | Returns: |
| | Foreground with light wrap |
| | """ |
| | |
| | if len(mask.shape) == 3: |
| | mask = mask[:, :, 0] |
| | |
| | |
| | if mask.max() > 1: |
| | mask = mask / 255.0 |
| | |
| | |
| | kernel = np.ones((self.config.light_wrap_width, self.config.light_wrap_width), np.uint8) |
| | dilated_mask = cv2.dilate(mask, kernel, iterations=1) |
| | edge_mask = dilated_mask - mask |
| | |
| | |
| | blurred_bg = cv2.GaussianBlur(background, (21, 21), 10) |
| | |
| | |
| | bg_light = blurred_bg * edge_mask[:, :, np.newaxis] |
| | |
| | |
| | mask_3ch = np.repeat(mask[:, :, np.newaxis], 3, axis=2) |
| | wrapped = foreground + bg_light * self.config.light_wrap_intensity |
| | |
| | return np.clip(wrapped, 0, 255).astype(np.uint8) |
| | |
| | def add_shadow(self, image: np.ndarray, |
| | mask: np.ndarray, |
| | ground_plane: Optional[float] = None) -> np.ndarray: |
| | """ |
| | Add realistic shadow to composited image. |
| | |
| | Args: |
| | image: Background image |
| | mask: Object mask |
| | ground_plane: Y-coordinate of ground plane |
| | |
| | Returns: |
| | Image with shadow |
| | """ |
| | h, w = image.shape[:2] |
| | |
| | if ground_plane is None: |
| | ground_plane = h * 0.9 |
| | |
| | |
| | shadow_mask = mask.copy() |
| | if len(shadow_mask.shape) == 3: |
| | shadow_mask = shadow_mask[:, :, 0] |
| | |
| | |
| | offset_x, offset_y = self.config.shadow_offset |
| | |
| | |
| | src_points = np.float32([[0, 0], [w, 0], [0, h], [w, h]]) |
| | dst_points = np.float32([ |
| | [offset_x, offset_y], |
| | [w + offset_x, offset_y], |
| | [-offset_x * 2, h], |
| | [w + offset_x * 2, h] |
| | ]) |
| | |
| | matrix = cv2.getPerspectiveTransform(src_points, dst_points) |
| | shadow_mask = cv2.warpPerspective(shadow_mask, matrix, (w, h)) |
| | |
| | |
| | blur_size = int(self.config.shadow_blur) * 2 + 1 |
| | shadow_mask = cv2.GaussianBlur(shadow_mask, (blur_size, blur_size), |
| | self.config.shadow_blur) |
| | |
| | |
| | shadow_mask[:int(ground_plane), :] = 0 |
| | |
| | |
| | if shadow_mask.max() > 0: |
| | shadow_mask = shadow_mask / shadow_mask.max() |
| | shadow_mask *= self.config.shadow_opacity |
| | |
| | |
| | shadow_color = np.array([0, 0, 0], dtype=np.float32) |
| | shadow_mask_3ch = np.repeat(shadow_mask[:, :, np.newaxis], 3, axis=2) |
| | |
| | result = image * (1 - shadow_mask_3ch) + shadow_color * shadow_mask_3ch |
| | |
| | return np.clip(result, 0, 255).astype(np.uint8) |
| | |
| | def add_reflection(self, image: np.ndarray, |
| | mask: np.ndarray, |
| | reflection_strength: float = 0.3) -> np.ndarray: |
| | """ |
| | Add reflection effect for glossy surfaces. |
| | |
| | Args: |
| | image: Input image |
| | mask: Object mask |
| | reflection_strength: Reflection opacity |
| | |
| | Returns: |
| | Image with reflection |
| | """ |
| | h, w = image.shape[:2] |
| | |
| | |
| | if len(mask.shape) == 2: |
| | mask_3ch = np.repeat(mask[:, :, np.newaxis], 3, axis=2) |
| | else: |
| | mask_3ch = mask |
| | |
| | if mask_3ch.max() > 1: |
| | mask_3ch = mask_3ch / 255.0 |
| | |
| | object_only = image * mask_3ch |
| | |
| | |
| | reflection = cv2.flip(object_only, 0) |
| | |
| | |
| | gradient = np.linspace(reflection_strength, 0, h) |
| | gradient = np.repeat(gradient[:, np.newaxis], w, axis=1) |
| | gradient = np.repeat(gradient[:, :, np.newaxis], 3, axis=2) |
| | |
| | |
| | reflection = reflection * gradient |
| | |
| | |
| | reflection = cv2.GaussianBlur(reflection, (5, 5), 2) |
| | |
| | |
| | result = image.copy() |
| | result = result + reflection |
| | |
| | return np.clip(result, 0, 255).astype(np.uint8) |
| | |
| | def add_glow(self, image: np.ndarray, |
| | mask: Optional[np.ndarray] = None, |
| | color: Optional[Tuple[int, int, int]] = None) -> np.ndarray: |
| | """ |
| | Add glow effect to image or masked region. |
| | |
| | Args: |
| | image: Input image |
| | mask: Optional mask for selective glow |
| | color: Glow color (BGR) |
| | |
| | Returns: |
| | Image with glow effect |
| | """ |
| | if color is None: |
| | color = (255, 255, 255) |
| | |
| | |
| | if mask is not None: |
| | if len(mask.shape) == 2: |
| | glow_source = np.zeros_like(image) |
| | for i in range(3): |
| | glow_source[:, :, i] = mask * (color[i] / 255.0) |
| | else: |
| | glow_source = mask * np.array(color).reshape(1, 1, 3) / 255.0 |
| | else: |
| | |
| | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) |
| | _, bright_mask = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY) |
| | glow_source = cv2.cvtColor(bright_mask, cv2.COLOR_GRAY2BGR) |
| | |
| | |
| | glow = np.zeros_like(image, dtype=np.float32) |
| | |
| | for i in range(1, 4): |
| | blur_size = self.config.glow_radius * i |
| | kernel_size = blur_size * 2 + 1 |
| | |
| | blurred = cv2.GaussianBlur(glow_source, (kernel_size, kernel_size), blur_size) |
| | glow += blurred / (i * 2) |
| | |
| | |
| | if glow.max() > 0: |
| | glow = glow / glow.max() |
| | glow *= self.config.glow_intensity * 255 |
| | |
| | |
| | result = image.astype(np.float32) + glow |
| | |
| | return np.clip(result, 0, 255).astype(np.uint8) |
| | |
| | def chromatic_aberration(self, image: np.ndarray, |
| | shift: Optional[float] = None) -> np.ndarray: |
| | """ |
| | Apply chromatic aberration effect. |
| | |
| | Args: |
| | image: Input image |
| | shift: Pixel shift amount |
| | |
| | Returns: |
| | Image with chromatic aberration |
| | """ |
| | shift = shift or self.config.chromatic_shift |
| | h, w = image.shape[:2] |
| | |
| | |
| | b, g, r = cv2.split(image) |
| | |
| | |
| | center_x, center_y = w // 2, h // 2 |
| | |
| | |
| | M_r = np.float32([[1 + shift/w, 0, -shift], [0, 1 + shift/h, -shift]]) |
| | r_shifted = cv2.warpAffine(r, M_r, (w, h)) |
| | |
| | |
| | M_b = np.float32([[1 - shift/w, 0, shift], [0, 1 - shift/h, shift]]) |
| | b_shifted = cv2.warpAffine(b, M_b, (w, h)) |
| | |
| | |
| | result = cv2.merge([b_shifted, g, r_shifted]) |
| | |
| | return result |
| | |
| | def add_vignette(self, image: np.ndarray, |
| | strength: Optional[float] = None) -> np.ndarray: |
| | """ |
| | Add vignette effect to image. |
| | |
| | Args: |
| | image: Input image |
| | strength: Vignette strength (0-1) |
| | |
| | Returns: |
| | Image with vignette |
| | """ |
| | strength = strength or self.config.vignette_strength |
| | h, w = image.shape[:2] |
| | |
| | |
| | center_x, center_y = w // 2, h // 2 |
| | Y, X = np.ogrid[:h, :w] |
| | |
| | |
| | dist = np.sqrt((X - center_x)**2 + (Y - center_y)**2) |
| | max_dist = np.sqrt(center_x**2 + center_y**2) |
| | |
| | |
| | vignette = 1 - (dist / max_dist) * strength |
| | vignette = np.clip(vignette, 0, 1) |
| | |
| | |
| | vignette_3ch = np.repeat(vignette[:, :, np.newaxis], 3, axis=2) |
| | result = image * vignette_3ch |
| | |
| | return np.clip(result, 0, 255).astype(np.uint8) |
| | |
| | def add_film_grain(self, image: np.ndarray, |
| | intensity: Optional[float] = None) -> np.ndarray: |
| | """ |
| | Add film grain effect to image. |
| | |
| | Args: |
| | image: Input image |
| | intensity: Grain intensity |
| | |
| | Returns: |
| | Image with film grain |
| | """ |
| | intensity = intensity or self.config.grain_intensity |
| | |
| | |
| | h, w = image.shape[:2] |
| | grain = np.random.randn(h, w, 3) * intensity * 255 |
| | |
| | |
| | result = image.astype(np.float32) + grain |
| | |
| | return np.clip(result, 0, 255).astype(np.uint8) |
| | |
| | def motion_blur(self, image: np.ndarray, |
| | angle: Optional[float] = None, |
| | size: Optional[int] = None) -> np.ndarray: |
| | """ |
| | Apply directional motion blur. |
| | |
| | Args: |
| | image: Input image |
| | angle: Blur angle in degrees |
| | size: Blur kernel size |
| | |
| | Returns: |
| | Motion blurred image |
| | """ |
| | angle = angle or self.config.motion_blur_angle |
| | size = size or self.config.motion_blur_size |
| | |
| | |
| | kernel = np.zeros((size, size)) |
| | kernel[int((size-1)/2), :] = np.ones(size) |
| | kernel = kernel / size |
| | |
| | |
| | M = cv2.getRotationMatrix2D((size/2, size/2), angle, 1) |
| | kernel = cv2.warpAffine(kernel, M, (size, size)) |
| | |
| | |
| | result = cv2.filter2D(image, -1, kernel) |
| | |
| | return result |
| |
|
| |
|
| | class CompositeEffects: |
| | """Advanced compositing effects.""" |
| | |
| | def __init__(self): |
| | self.logger = setup_logger(f"{__name__}.CompositeEffects") |
| | self.bg_effects = BackgroundEffects() |
| | |
| | def smart_composite(self, foreground: np.ndarray, |
| | background: np.ndarray, |
| | mask: np.ndarray, |
| | effects: List[EffectType]) -> np.ndarray: |
| | """ |
| | Apply smart compositing with multiple effects. |
| | |
| | Args: |
| | foreground: Foreground image |
| | background: Background image |
| | mask: Alpha mask |
| | effects: List of effects to apply |
| | |
| | Returns: |
| | Composited image with effects |
| | """ |
| | result = background.copy() |
| | |
| | |
| | if len(mask.shape) == 2: |
| | mask_3ch = np.repeat(mask[:, :, np.newaxis], 3, axis=2) |
| | else: |
| | mask_3ch = mask |
| | |
| | if mask_3ch.max() > 1: |
| | mask_3ch = mask_3ch / 255.0 |
| | |
| | |
| | for effect in effects: |
| | if effect == EffectType.BLUR: |
| | result = self.bg_effects.apply_blur(result, mask=1-mask_3ch[:,:,0]) |
| | elif effect == EffectType.BOKEH: |
| | result = self.bg_effects.apply_bokeh(result) |
| | elif effect == EffectType.VIGNETTE: |
| | result = self.bg_effects.add_vignette(result) |
| | |
| | |
| | if EffectType.LIGHT_WRAP in effects: |
| | foreground = self.bg_effects.apply_light_wrap( |
| | foreground, result, mask_3ch[:,:,0] |
| | ) |
| | |
| | |
| | result = result * (1 - mask_3ch) + foreground * mask_3ch |
| | result = result.astype(np.uint8) |
| | |
| | |
| | if EffectType.SHADOW in effects: |
| | result = self.bg_effects.add_shadow(result, mask_3ch[:,:,0]) |
| | |
| | if EffectType.REFLECTION in effects: |
| | result = self.bg_effects.add_reflection(result, mask_3ch[:,:,0]) |
| | |
| | if EffectType.GLOW in effects: |
| | result = self.bg_effects.add_glow(result, mask_3ch[:,:,0]) |
| | |
| | |
| | if EffectType.CHROMATIC_ABERRATION in effects: |
| | result = self.bg_effects.chromatic_aberration(result) |
| | |
| | if EffectType.FILM_GRAIN in effects: |
| | result = self.bg_effects.add_film_grain(result) |
| | |
| | return result |
| | |
| | def color_harmonization(self, foreground: np.ndarray, |
| | background: np.ndarray, |
| | mask: np.ndarray, |
| | strength: float = 0.3) -> np.ndarray: |
| | """ |
| | Harmonize colors between foreground and background. |
| | |
| | Args: |
| | foreground: Foreground image |
| | background: Background image |
| | mask: Foreground mask |
| | strength: Harmonization strength |
| | |
| | Returns: |
| | Color-harmonized foreground |
| | """ |
| | |
| | bg_mean = np.mean(background, axis=(0, 1)) |
| | bg_std = np.std(background, axis=(0, 1)) |
| | |
| | |
| | fg_mean = np.mean(foreground, axis=(0, 1)) |
| | fg_std = np.std(foreground, axis=(0, 1)) |
| | |
| | |
| | result = foreground.astype(np.float32) |
| | |
| | for i in range(3): |
| | |
| | result[:, :, i] = (result[:, :, i] - fg_mean[i]) / (fg_std[i] + 1e-6) |
| | |
| | |
| | result[:, :, i] = result[:, :, i] * (bg_std[i] * strength + fg_std[i] * (1 - strength)) |
| | result[:, :, i] += bg_mean[i] * strength + fg_mean[i] * (1 - strength) |
| | |
| | return np.clip(result, 0, 255).astype(np.uint8) |