Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python3 | |
| """ | |
| Complete Russian/Eastern European 2000s Photo Filter with Reference Style Transfer | |
| Fixed version with proper function definitions and Gradio interface | |
| """ | |
| import gradio as gr | |
| from PIL import Image, ImageOps, ImageFilter, ImageDraw, ImageFont, ImageEnhance | |
| import numpy as np | |
| import cv2 | |
| import io | |
| import random | |
| import math | |
| # ---------------------- | |
| # Utilities | |
| # ---------------------- | |
| def to_np(img: Image.Image): | |
| return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) | |
| def to_pil(arr: np.ndarray): | |
| return Image.fromarray(cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)) | |
| def clamp_u8(x): | |
| return np.clip(x, 0, 255).astype(np.uint8) | |
| def smoothstep(x, edge0, edge1): | |
| t = np.clip((x - edge0) / (edge1 - edge0 + 1e-6), 0, 1) | |
| return t * t * (3 - 2 * t) | |
| # ---------------------- | |
| # Missing Debug Functions | |
| # ---------------------- | |
| def simple_style_test(input_image): | |
| """Simple test function to verify basic functionality""" | |
| if input_image is None: | |
| return "❌ No input image provided" | |
| try: | |
| # Just check if we can process the image | |
| img_array = np.array(input_image) | |
| mean_brightness = np.mean(cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)) | |
| return f"""✅ Basic functionality working! | |
| Image size: {input_image.size} | |
| Mean brightness: {mean_brightness:.2f} | |
| Image mode: {input_image.mode} | |
| Ready for style transfer testing!""" | |
| except Exception as e: | |
| return f"❌ Error in simple test: {e}" | |
| def test_style_transfer_debug(input_image, reference_images): | |
| """Test function for debugging style transfer""" | |
| if input_image is None: | |
| return "❌ No input image provided" | |
| if not reference_images: | |
| return "❌ No reference images provided" | |
| try: | |
| # Try to load reference images | |
| ref_images = [] | |
| for file in reference_images: | |
| try: | |
| img = Image.open(file.name).convert("RGB") | |
| ref_images.append(img) | |
| except Exception as e: | |
| return f"❌ Failed to load reference image: {e}" | |
| if not ref_images: | |
| return "❌ No reference images could be loaded" | |
| # Create reference database | |
| ref_db = create_reference_database(ref_images) | |
| if not ref_db: | |
| return "❌ Failed to create reference database" | |
| # Test color matching | |
| target_pil = input_image.convert("RGB") | |
| original_array = np.array(target_pil) | |
| # Apply simple color matching test | |
| if ref_db['color_stats']: | |
| result = apply_color_matching(target_pil, ref_db['color_stats'][0], 0.8) | |
| result_array = np.array(result) | |
| difference = np.mean(np.abs(original_array.astype(float) - result_array.astype(float))) | |
| ref_stats = ref_db['color_stats'][0] | |
| debug_info = f"""✅ Style transfer working! | |
| Reference LAB mean: {ref_stats['lab_mean']} | |
| Color difference: {difference:.2f} (should be > 1.0) | |
| Database has {len(ref_db['color_stats'])} reference(s) | |
| Amateur chars: {'Yes' if 'amateur_chars' in ref_db else 'No'}""" | |
| return debug_info | |
| else: | |
| return "❌ No color stats in reference database" | |
| except Exception as e: | |
| return f"❌ Error during test: {e}" | |
| # Enhanced Style Transfer Functions for Amateur Point-and-Click Photography | |
| def analyze_amateur_photography_characteristics(image): | |
| """Analyze characteristics typical of amateur point-and-click photography""" | |
| if image is None: | |
| return None | |
| img_array = np.array(image) | |
| gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY) | |
| h, w = gray.shape | |
| # Analyze center vs edge brightness (center-weighted metering) | |
| center_region = gray[h//4:3*h//4, w//4:3*w//4] | |
| edge_region = np.concatenate([ | |
| gray[:h//4, :].flatten(), | |
| gray[3*h//4:, :].flatten(), | |
| gray[:, :w//4].flatten(), | |
| gray[:, 3*w//4:].flatten() | |
| ]) | |
| # Flash characteristics detection | |
| top_quarter = gray[:h//4, :] | |
| flash_hotspot = np.percentile(top_quarter, 95) | |
| # Depth analysis (simple edge density) | |
| edges = cv2.Canny(gray, 50, 150) | |
| foreground_edges = np.mean(edges[2*h//3:, :]) # Bottom third | |
| background_edges = np.mean(edges[:h//3, :]) # Top third | |
| return { | |
| 'center_brightness': np.mean(center_region), | |
| 'edge_brightness': np.mean(edge_region), | |
| 'flash_intensity': flash_hotspot, | |
| 'brightness_variance': np.std(gray), | |
| 'foreground_detail': foreground_edges, | |
| 'background_detail': background_edges, | |
| 'overall_exposure': np.mean(gray), | |
| 'highlight_clipping': np.sum(gray > 240) / (h * w), | |
| 'shadow_crushing': np.sum(gray < 15) / (h * w) | |
| } | |
| def emulate_point_and_click_exposure(image, reference_chars, strength=0.7): | |
| """Emulate typical point-and-click camera exposure characteristics""" | |
| if reference_chars is None: | |
| return image | |
| img_array = np.array(image).astype(np.float32) | |
| h, w = img_array.shape[:2] | |
| # Create distance-based masks for foreground/background | |
| y_coords, x_coords = np.ogrid[:h, :w] | |
| center_y, center_x = h // 2, w // 2 | |
| # Distance from center (for center-weighted metering simulation) | |
| center_distance = np.sqrt((x_coords - center_x)**2 + (y_coords - center_y)**2) | |
| center_distance = center_distance / np.max(center_distance) | |
| # Depth proxy (bottom = closer, top = farther) | |
| depth_proxy = y_coords.astype(np.float32) / h | |
| # Simulate center-weighted metering bias | |
| ref_center_bright = reference_chars.get('center_brightness', 128) | |
| ref_edge_bright = reference_chars.get('edge_brightness', 100) | |
| current_center = np.mean(img_array[h//4:3*h//4, w//4:3*w//4]) | |
| # Apply center-weighted exposure correction | |
| center_correction = (ref_center_bright - current_center) * strength * 0.3 | |
| center_mask = 1 - smoothstep(center_distance, 0.3, 0.8) | |
| img_array += center_mask[..., None] * center_correction | |
| # Simulate flash falloff on foreground subjects | |
| ref_flash = reference_chars.get('flash_intensity', 200) | |
| if ref_flash > 180: # Reference had flash | |
| # Flash affects foreground more (bottom 60% of image) | |
| flash_mask = 1 - smoothstep(depth_proxy, 0.4, 1.0) | |
| flash_strength = (ref_flash - 128) * strength * 0.15 | |
| # Flash creates overexposure in foreground | |
| img_array += flash_mask[..., None] * flash_strength | |
| # Flash creates harsh shadows in background | |
| shadow_mask = smoothstep(depth_proxy, 0.6, 1.0) | |
| shadow_strength = -flash_strength * 0.4 | |
| img_array += shadow_mask[..., None] * shadow_strength | |
| # Simulate limited dynamic range (crush shadows, clip highlights) | |
| ref_clipping = reference_chars.get('highlight_clipping', 0.02) | |
| ref_crushing = reference_chars.get('shadow_crushing', 0.03) | |
| if ref_clipping > 0.01: | |
| # Clip highlights more aggressively | |
| clip_threshold = 255 - (ref_clipping * 800) | |
| img_array = np.where(img_array > clip_threshold, | |
| clip_threshold + (img_array - clip_threshold) * 0.3, | |
| img_array) | |
| if ref_crushing > 0.01: | |
| # Crush shadows | |
| crush_threshold = ref_crushing * 600 | |
| img_array = np.where(img_array < crush_threshold, | |
| img_array * 0.5, | |
| img_array) | |
| return Image.fromarray(np.clip(img_array, 0, 255).astype(np.uint8)) | |
| def apply_amateur_focus_characteristics(image, reference_chars, strength=0.6): | |
| """Simulate amateur focus characteristics - everything in focus or poorly focused""" | |
| if reference_chars is None: | |
| return image | |
| img_array = np.array(image) | |
| h, w = img_array.shape[:2] | |
| # Simple depth proxy | |
| y_coords = np.arange(h).reshape(-1, 1) / h | |
| depth_proxy = np.broadcast_to(y_coords, (h, w)) | |
| ref_fg_detail = reference_chars.get('foreground_detail', 50) | |
| ref_bg_detail = reference_chars.get('background_detail', 30) | |
| # If reference has poor background focus, blur background | |
| if ref_bg_detail < ref_fg_detail * 0.7: | |
| # Create depth-based blur | |
| background_blur = cv2.GaussianBlur(img_array, (0, 0), 1.5 * strength) | |
| # Apply more blur to background | |
| bg_mask = smoothstep(1 - depth_proxy, 0.3, 0.8) | |
| result = img_array.astype(np.float32) | |
| blurred = background_blur.astype(np.float32) | |
| result = result * (1 - bg_mask[..., None]) + blurred * bg_mask[..., None] | |
| img_array = result.astype(np.uint8) | |
| # If reference shows motion blur (camera shake), add slight blur | |
| ref_variance = reference_chars.get('brightness_variance', 30) | |
| if ref_variance > 40: # High variance might indicate motion blur | |
| # Add slight motion blur | |
| kernel_size = max(3, int(strength * 5)) | |
| motion_kernel = np.zeros((kernel_size, kernel_size)) | |
| motion_kernel[kernel_size//2, :] = 1 / kernel_size | |
| motion_blurred = cv2.filter2D(img_array, -1, motion_kernel) | |
| img_array = cv2.addWeighted(img_array, 1 - strength * 0.3, motion_blurred, strength * 0.3, 0) | |
| return Image.fromarray(img_array) | |
| def apply_amateur_flash_realism(image, reference_chars, strength=0.7): | |
| """Apply realistic amateur flash characteristics""" | |
| if reference_chars is None: | |
| return image | |
| ref_flash = reference_chars.get('flash_intensity', 150) | |
| if ref_flash < 180: # No significant flash in reference | |
| return image | |
| img_array = np.array(image).astype(np.float32) | |
| h, w = img_array.shape[:2] | |
| # Flash position (slightly off-center, typical of compact cameras) | |
| flash_x = w * 0.52 # Slightly right of center | |
| flash_y = h * 0.15 # Upper portion | |
| # Create distance map from flash | |
| y_coords, x_coords = np.ogrid[:h, :w] | |
| flash_distance = np.sqrt((x_coords - flash_x)**2 + (y_coords - flash_y)**2) | |
| max_distance = np.sqrt(w**2 + h**2) | |
| flash_distance_norm = flash_distance / max_distance | |
| # Flash characteristics | |
| # 1. Harsh falloff (inverse square law) | |
| flash_intensity = 1 / (1 + flash_distance_norm * 8) ** 2 | |
| # 2. Flash creates cool color temperature | |
| flash_effect = flash_intensity * strength * (ref_flash - 128) / 128 | |
| # Apply flash effect | |
| img_array[:,:,2] += flash_effect * 20 # Less red | |
| img_array[:,:,1] += flash_effect * 25 # More green | |
| img_array[:,:,0] += flash_effect * 35 # Much more blue | |
| # 3. Flash overexposes foreground subjects | |
| foreground_mask = 1 - smoothstep(y_coords / h, 0.5, 1.0) | |
| overexposure = flash_effect * foreground_mask * 15 | |
| img_array += overexposure[..., None] | |
| # 4. Flash creates hard shadows behind subjects | |
| # Simulate by darkening areas that would be shadowed | |
| shadow_mask = smoothstep(flash_distance_norm, 0.4, 0.8) * smoothstep(y_coords / h, 0.3, 0.7) | |
| shadow_effect = -flash_effect * shadow_mask * 20 | |
| img_array += shadow_effect[..., None] | |
| return Image.fromarray(np.clip(img_array, 0, 255).astype(np.uint8)) | |
| def extract_color_statistics(image): | |
| """Extract color statistics from reference image""" | |
| if image is None: | |
| return None | |
| img_array = np.array(image) | |
| # Convert to different color spaces for analysis | |
| lab = cv2.cvtColor(img_array, cv2.COLOR_RGB2LAB) | |
| hsv = cv2.cvtColor(img_array, cv2.COLOR_RGB2HSV) | |
| stats = { | |
| 'rgb_mean': np.mean(img_array, axis=(0,1)), | |
| 'rgb_std': np.std(img_array, axis=(0,1)), | |
| 'lab_mean': np.mean(lab, axis=(0,1)), | |
| 'lab_std': np.std(lab, axis=(0,1)), | |
| 'hsv_mean': np.mean(hsv, axis=(0,1)), | |
| 'hsv_std': np.std(hsv, axis=(0,1)), | |
| 'brightness_dist': np.histogram(cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY), bins=50)[0], | |
| } | |
| return stats | |
| def extract_texture_features(image): | |
| """Extract basic texture features""" | |
| if image is None: | |
| return None | |
| gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) | |
| # Simple gradient-based texture analysis | |
| grad_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3) | |
| grad_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3) | |
| grad_mag = np.sqrt(grad_x**2 + grad_y**2) | |
| return { | |
| 'gradient_mean': np.mean(grad_mag), | |
| 'gradient_std': np.std(grad_mag), | |
| 'edge_density': np.mean(grad_mag > np.percentile(grad_mag, 75)), | |
| 'contrast': np.std(gray) | |
| } | |
| def apply_color_matching(target_image, reference_stats, strength=0.7): | |
| """Apply color matching based on reference statistics""" | |
| if reference_stats is None: | |
| print("DEBUG: No reference stats provided to color matching") | |
| return target_image | |
| print(f"DEBUG: Applying color matching with strength {strength}") | |
| print(f"DEBUG: Reference LAB mean: {reference_stats['lab_mean']}") | |
| target_array = np.array(target_image).astype(np.float32) | |
| original_array = target_array.copy() | |
| # LAB color space matching | |
| target_lab = cv2.cvtColor(target_array.astype(np.uint8), cv2.COLOR_RGB2LAB).astype(np.float32) | |
| original_lab = target_lab.copy() | |
| # Match mean and standard deviation | |
| for i in range(3): | |
| target_mean = np.mean(target_lab[:,:,i]) | |
| target_std = np.std(target_lab[:,:,i]) | |
| ref_mean = reference_stats['lab_mean'][i] | |
| ref_std = reference_stats['lab_std'][i] | |
| print(f"DEBUG: Channel {i} - Target mean: {target_mean:.1f}, Ref mean: {ref_mean:.1f}") | |
| print(f"DEBUG: Channel {i} - Target std: {target_std:.1f}, Ref std: {ref_std:.1f}") | |
| if target_std > 1: | |
| target_lab[:,:,i] = (target_lab[:,:,i] - target_mean) * (ref_std / target_std) + ref_mean | |
| # Convert back to RGB | |
| matched = cv2.cvtColor(np.clip(target_lab, 0, 255).astype(np.uint8), cv2.COLOR_LAB2RGB) | |
| # Check the difference before blending | |
| lab_difference = np.mean(np.abs(original_lab - target_lab)) | |
| print(f"DEBUG: LAB space difference: {lab_difference}") | |
| # Blend with original | |
| result_array = np.array(target_image).astype(np.float32) | |
| matched_array = matched.astype(np.float32) | |
| final = result_array * (1 - strength) + matched_array * strength | |
| final_image = Image.fromarray(np.clip(final, 0, 255).astype(np.uint8)) | |
| # Check final difference | |
| final_difference = np.mean(np.abs(original_array - np.array(final_image).astype(np.float32))) | |
| print(f"DEBUG: Final color matching difference: {final_difference}") | |
| return final_image | |
| def apply_texture_matching(target_image, reference_texture, strength=0.5): | |
| """Apply texture-based adjustments""" | |
| if reference_texture is None: | |
| return target_image | |
| target_array = np.array(target_image) | |
| target_texture = extract_texture_features(target_image) | |
| if target_texture is None: | |
| return target_image | |
| # Adjust contrast based on reference | |
| ref_contrast = reference_texture['contrast'] | |
| target_contrast = target_texture['contrast'] | |
| if target_contrast > 0: | |
| contrast_factor = (ref_contrast / target_contrast) * strength + 1 * (1 - strength) | |
| contrast_factor = np.clip(contrast_factor, 0.5, 2.0) | |
| enhanced = target_array.astype(np.float32) | |
| enhanced = (enhanced - 128) * contrast_factor + 128 | |
| enhanced = np.clip(enhanced, 0, 255).astype(np.uint8) | |
| return Image.fromarray(enhanced) | |
| return target_image | |
| def create_reference_database(reference_images): | |
| """Process multiple reference images to create enhanced style database""" | |
| if not reference_images: | |
| return None | |
| database = { | |
| 'color_stats': [], | |
| 'texture_features': [], | |
| 'scene_brightness': [], | |
| 'amateur_chars': [] # NEW: Amateur photography characteristics | |
| } | |
| for ref_img in reference_images: | |
| if ref_img is not None: | |
| color_stats = extract_color_statistics(ref_img) | |
| texture_features = extract_texture_features(ref_img) | |
| amateur_chars = analyze_amateur_photography_characteristics(ref_img) # NEW | |
| if color_stats is not None: | |
| database['color_stats'].append(color_stats) | |
| if texture_features is not None: | |
| database['texture_features'].append(texture_features) | |
| if amateur_chars is not None: | |
| database['amateur_chars'].append(amateur_chars) | |
| # Scene brightness for matching | |
| avg_brightness = np.mean(cv2.cvtColor(np.array(ref_img), cv2.COLOR_RGB2GRAY)) | |
| database['scene_brightness'].append(avg_brightness) | |
| return database if database['color_stats'] else None | |
| def enhanced_reference_style_transfer(target_image, reference_database, strength=0.6, method="enhanced_amateur"): | |
| """Enhanced style transfer with amateur photography characteristics""" | |
| if not reference_database or not reference_database.get('color_stats'): | |
| return target_image | |
| # Find best matching reference | |
| target_brightness = np.mean(cv2.cvtColor(np.array(target_image), cv2.COLOR_RGB2GRAY)) | |
| best_ref_idx = 0 | |
| min_brightness_diff = float('inf') | |
| for i, ref_brightness in enumerate(reference_database['scene_brightness']): | |
| brightness_diff = abs(target_brightness - ref_brightness) | |
| if brightness_diff < min_brightness_diff: | |
| min_brightness_diff = brightness_diff | |
| best_ref_idx = i | |
| result = target_image | |
| # Apply different methods | |
| if method == "color_matching": | |
| best_color_stats = reference_database['color_stats'][best_ref_idx] | |
| result = apply_color_matching(result, best_color_stats, strength) | |
| elif method == "texture_matching": | |
| best_texture = reference_database['texture_features'][best_ref_idx] | |
| result = apply_texture_matching(result, best_texture, strength) | |
| elif method == "enhanced_amateur": | |
| # Full amateur photography emulation | |
| best_color_stats = reference_database['color_stats'][best_ref_idx] | |
| best_texture = reference_database['texture_features'][best_ref_idx] | |
| # Apply color and texture matching first | |
| result = apply_color_matching(result, best_color_stats, strength * 0.7) | |
| result = apply_texture_matching(result, best_texture, strength * 0.3) | |
| # Apply amateur photography characteristics | |
| if 'amateur_chars' in reference_database and len(reference_database['amateur_chars']) > best_ref_idx: | |
| best_amateur_chars = reference_database['amateur_chars'][best_ref_idx] | |
| # Apply amateur exposure characteristics | |
| result = emulate_point_and_click_exposure(result, best_amateur_chars, strength) | |
| # Apply amateur focus characteristics | |
| result = apply_amateur_focus_characteristics(result, best_amateur_chars, strength * 0.7) | |
| # Apply amateur flash realism | |
| result = apply_amateur_flash_realism(result, best_amateur_chars, strength * 0.8) | |
| return result | |
| def apply_reference_style_transfer(target_image, reference_database, strength=0.6, method="advanced_blend"): | |
| """Apply enhanced reference style transfer""" | |
| return enhanced_reference_style_transfer(target_image, reference_database, strength, method) | |
| # ---------------------- | |
| # Original Core Functions (unchanged) | |
| # ---------------------- | |
| def crop_4_3(img: Image.Image): | |
| w, h = img.size | |
| target_ratio = 4/3 | |
| cur_ratio = w/h | |
| if cur_ratio > target_ratio: | |
| new_w = int(h * target_ratio) | |
| left = max(0, int((w - new_w) * 0.4)) | |
| return img.crop((left, 0, left + new_w, h)) | |
| else: | |
| new_h = int(w / target_ratio) | |
| top = max(0, int((h - new_h) * 0.3)) | |
| return img.crop((0, top, w, top + new_h)) | |
| def apply_lens_distortion(bgr, strength=0.01): | |
| if strength <= 0: | |
| return bgr | |
| h, w = bgr.shape[:2] | |
| y, x = np.ogrid[:h, :w] | |
| cx, cy = w/2, h/2 | |
| x_norm = (x - cx) / cx | |
| y_norm = (y - cy) / cy | |
| r = np.sqrt(x_norm**2 + y_norm**2) | |
| distortion = 1 + strength * r**2 | |
| map_x = (x_norm * distortion * cx + cx).astype(np.float32) | |
| map_y = (y_norm * distortion * cy + cy).astype(np.float32) | |
| return cv2.remap(bgr, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) | |
| def enhanced_vignette(bgr, strength=0.15, feather=1.8): | |
| if strength <= 0: | |
| return bgr | |
| h, w = bgr.shape[:2] | |
| y, x = np.ogrid[:h, :w] | |
| cx, cy = w/2, h/2 | |
| x_norm = (x - cx) / (w/2) | |
| y_norm = (y - cy) / (h/2) | |
| dist = np.sqrt(x_norm**2 + y_norm**2) | |
| mask = 1 - strength * (dist ** feather) | |
| mask = np.clip(mask, 0.6, 1.0).astype(np.float32) | |
| out = bgr.astype(np.float32).copy() | |
| out *= mask[..., None] | |
| return clamp_u8(out) | |
| def realistic_film_grain(bgr, grain_strength=8, grain_size=1.1): | |
| if grain_strength < 2: | |
| return bgr | |
| h, w = bgr.shape[:2] | |
| fine = np.random.normal(0, grain_strength * 0.5, (h, w)).astype(np.float32) | |
| if grain_size > 1.0: | |
| ch, cw = max(1, int(h/grain_size)), max(1, int(w/grain_size)) | |
| coarse = np.random.normal(0, grain_strength * 0.2, (ch, cw)).astype(np.float32) | |
| coarse = cv2.resize(coarse, (w, h), interpolation=cv2.INTER_LINEAR) | |
| fine += coarse | |
| yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV).astype(np.float32) | |
| yuv[:, :, 0] += fine * 0.6 | |
| yuv[:, :, 1] += fine * 0.2 | |
| yuv[:, :, 2] += fine * 0.2 | |
| out = cv2.cvtColor(clamp_u8(yuv), cv2.COLOR_YUV2BGR) | |
| return out | |
| def enhanced_chroma_noise(bgr, amount=4.0): | |
| if amount <= 0: | |
| return bgr | |
| ycrcb = cv2.cvtColor(bgr, cv2.COLOR_BGR2YCrCb).astype(np.float32) | |
| y, cr, cb = cv2.split(ycrcb) | |
| h, w = cr.shape | |
| cr_n = np.random.normal(0, amount * 0.5, (h, w)).astype(np.float32) | |
| cb_n = np.random.normal(0, amount * 0.5, (h, w)).astype(np.float32) | |
| cb_n = cb_n * 0.7 + cr_n * 0.3 | |
| cr = np.clip(cr + cr_n, 0, 255) | |
| cb = np.clip(cb + cb_n, 0, 255) | |
| return cv2.cvtColor(np.stack([y, cr, cb], axis=-1).astype(np.uint8), cv2.COLOR_YCrCb2BGR) | |
| def authentic_2000s_tone_curve(bgr, amount=1.0): | |
| if amount <= 0: | |
| return bgr | |
| x = np.linspace(0, 1, 256) | |
| tone = np.where( | |
| x < 0.5, | |
| 0.18 + 0.60 * (2 * x) ** 0.9, | |
| 0.82 - 0.15 * (2 * (1 - x)) ** 1.1 | |
| ) | |
| lut = (np.clip(tone, 0, 1) * 255).astype(np.uint8) | |
| curved = np.empty_like(bgr) | |
| for c in range(3): | |
| curved[:, :, c] = cv2.LUT(bgr[:, :, c], lut) | |
| return (bgr.astype(np.float32) * (1 - amount) + curved.astype(np.float32) * amount).astype(np.uint8) | |
| def early_digital_wb(bgr, preset="auto"): | |
| presets = { | |
| "auto": {"temp_shift": 8, "tint_shift": 4, "saturation": 0.88}, | |
| "daylight": {"temp_shift": 0, "tint_shift": 2, "saturation": 0.95}, | |
| "cloudy": {"temp_shift": -6, "tint_shift": 1, "saturation": 0.92}, | |
| "tungsten": {"temp_shift": 25,"tint_shift": 8, "saturation": 0.85}, | |
| "fluorescent": {"temp_shift": 15,"tint_shift": -5, "saturation": 0.90}, | |
| } | |
| s = presets.get(preset, presets["auto"]) | |
| b, g, r = cv2.split(bgr.astype(np.int16)) | |
| if s["temp_shift"] > 0: | |
| b = np.clip(b + s["temp_shift"], 0, 255) | |
| r = np.clip(r - s["temp_shift"] // 2, 0, 255) | |
| else: | |
| r = np.clip(r - s["temp_shift"], 0, 255) | |
| b = np.clip(b + s["temp_shift"] // 2, 0, 255) | |
| g = np.clip(g + s["tint_shift"], 0, 255) | |
| result = cv2.merge([b.astype(np.uint8), g.astype(np.uint8), r.astype(np.uint8)]) | |
| hsv = cv2.cvtColor(result, cv2.COLOR_BGR2HSV).astype(np.float32) | |
| hsv[:, :, 1] *= s["saturation"] | |
| hsv[:, :, 1] = np.clip(hsv[:, :, 1], 0, 255) | |
| return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR) | |
| def ccd_blooming_effect(bgr, threshold=240, bloom_size=2): | |
| gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY) | |
| mask = (gray > threshold).astype(np.uint8) | |
| if not np.any(mask): | |
| return bgr | |
| kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (bloom_size, bloom_size)) | |
| bloomed = cv2.dilate(mask, kernel, iterations=1) | |
| out = bgr.astype(np.float32) | |
| bloom_factor = 1.08 | |
| for i in range(3): | |
| out[:, :, i] = np.where(bloomed > 0, np.minimum(out[:, :, i] * bloom_factor, 255), out[:, :, i]) | |
| return out.astype(np.uint8) | |
| def enhanced_center_sharpness(pil_img: Image.Image, strength=0.3): | |
| arr = np.array(pil_img) | |
| h, w = arr.shape[:2] | |
| kernel = np.array([[-0.1, -0.1, -0.1], | |
| [-0.1, 2.2, -0.1], | |
| [-0.1, -0.1, -0.1]]) | |
| sharp = cv2.filter2D(arr, -1, kernel) | |
| y, x = np.ogrid[:h, :w] | |
| cx, cy = w/2, h/2 | |
| dist = np.sqrt((x - cx)**2 + (y - cy)**2) | |
| mask = 1 - (dist / np.sqrt(cx**2 + cy**2)) | |
| mask = np.clip(mask, 0, 1) ** 2 | |
| res = arr.astype(np.float32) * (1 - mask[..., None] * strength) + sharp.astype(np.float32) * (mask[..., None] * strength) | |
| return Image.fromarray(np.clip(res, 0, 255).astype(np.uint8)) | |
| def authentic_jpeg_compression(pil_img: Image.Image, quality=55, add_artifacts=False): | |
| def compress_once(im, q): | |
| buf = io.BytesIO() | |
| im.save(buf, format='JPEG', quality=q, subsampling=2, optimize=False) | |
| buf.seek(0) | |
| return Image.open(buf).convert("RGB") | |
| out = compress_once(pil_img, int(quality)) | |
| if add_artifacts: | |
| out = compress_once(out, int(min(95, quality + 10))) | |
| return out | |
| # Russian film stocks and enhanced features | |
| def authentic_russian_film_stocks(bgr, stock="svema", strength=0.5): | |
| if strength <= 0: | |
| return bgr | |
| stocks = { | |
| "svema": {"shadow_tint": (0, 8, -3), "highlight_tint": (5, -2, 8), "saturation": 0.92, "contrast": 1.08}, | |
| "orwo": {"shadow_tint": (-2, 3, 6), "highlight_tint": (2, 0, -4), "saturation": 0.95, "contrast": 1.12}, | |
| "tasma": {"shadow_tint": (2, -1, 4), "highlight_tint": (3, 2, -1), "saturation": 0.88, "contrast": 1.05} | |
| } | |
| if stock not in stocks: | |
| stock = "svema" | |
| s = stocks[stock] | |
| result = bgr.astype(np.float32) | |
| gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY).astype(np.float32) / 255.0 | |
| shadow_mask = np.maximum(0, 1 - gray * 2) | |
| highlight_mask = np.maximum(0, (gray - 0.5) * 2) | |
| for i, (shadow_shift, highlight_shift) in enumerate(zip(s["shadow_tint"], s["highlight_tint"])): | |
| result[:,:,i] += shadow_mask * shadow_shift * strength | |
| result[:,:,i] += highlight_mask * highlight_shift * strength | |
| result = np.clip(result, 0, 255).astype(np.uint8) | |
| hsv = cv2.cvtColor(result, cv2.COLOR_BGR2HSV).astype(np.float32) | |
| hsv[:,:,1] *= (s["saturation"] ** strength) | |
| hsv[:,:,2] *= (s["contrast"] ** (strength * 0.5)) | |
| hsv = np.clip(hsv, 0, 255) | |
| return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR) | |
| def add_tungsten_indoor_warmth(bgr, strength=0.3): | |
| if strength <= 0: | |
| return bgr | |
| gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY).astype(np.float32) / 255.0 | |
| depth_proxy = 1 - gray | |
| result = bgr.astype(np.float32) | |
| warm_mask = depth_proxy * strength | |
| result[:,:,2] += warm_mask * 25 | |
| result[:,:,1] += warm_mask * 12 | |
| result[:,:,0] -= warm_mask * 8 | |
| return np.clip(result, 0, 255).astype(np.uint8) | |
| def add_fluorescent_flicker(bgr, strength=0.2): | |
| if strength <= 0: | |
| return bgr | |
| flicker = 1 + np.random.normal(0, strength * 0.05) | |
| flicker = np.clip(flicker, 0.85, 1.15) | |
| result = bgr.astype(np.float32) * flicker | |
| green_var = np.random.normal(1, strength * 0.03) | |
| result[:,:,1] *= green_var | |
| return np.clip(result, 0, 255).astype(np.uint8) | |
| def add_party_atmosphere(bgr, strength=0.3): | |
| if strength <= 0: | |
| return bgr | |
| result = bgr.astype(np.float32) | |
| result *= (1 + strength * 0.15) | |
| hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV) | |
| lower_skin = np.array([0, 25, 50]) | |
| upper_skin = np.array([25, 255, 255]) | |
| skin_mask = cv2.inRange(hsv, lower_skin, upper_skin).astype(np.float32) / 255.0 | |
| result[:,:,2] += skin_mask * strength * 15 | |
| result[:,:,1] += skin_mask * strength * 8 | |
| return np.clip(result, 0, 255).astype(np.uint8) | |
| def apply_scene_preset(bgr, scene="none", intensity=1.0): | |
| if scene == "none": | |
| return bgr | |
| result = bgr.copy() | |
| if scene == "kitchen_party": | |
| result = authentic_russian_film_stocks(result, "svema", intensity * 0.6) | |
| result = add_tungsten_indoor_warmth(result, intensity * 0.4) | |
| result = add_party_atmosphere(result, intensity * 0.5) | |
| elif scene == "winter_street": | |
| result = authentic_russian_film_stocks(result, "orwo", intensity * 0.7) | |
| result = result.astype(np.float32) | |
| result[:,:,0] += intensity * 8 | |
| result = np.clip(result, 0, 255).astype(np.uint8) | |
| elif scene == "apartment_interior": | |
| result = authentic_russian_film_stocks(result, "tasma", intensity * 0.5) | |
| result = add_tungsten_indoor_warmth(result, intensity * 0.3) | |
| result = add_fluorescent_flicker(result, intensity * 0.2) | |
| elif scene == "dacha_summer": | |
| result = authentic_russian_film_stocks(result, "svema", intensity * 0.4) | |
| hsv = cv2.cvtColor(result, cv2.COLOR_BGR2HSV).astype(np.float32) | |
| green_mask = ((hsv[:,:,0] > 40) & (hsv[:,:,0] < 80)).astype(np.float32) | |
| hsv[:,:,1] += green_mask * intensity * 15 | |
| hsv = np.clip(hsv, 0, 255) | |
| result = cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR) | |
| return result | |
| # Video/TV effects | |
| def radial_chromatic_aberration(bgr, pixels=1.0): | |
| if pixels <= 0: | |
| return bgr | |
| h, w = bgr.shape[:2] | |
| y, x = np.indices((h, w), dtype=np.float32) | |
| cx, cy = np.float32(w / 2.0), np.float32(h / 2.0) | |
| dx = x - cx | |
| dy = y - cy | |
| r = np.sqrt(dx * dx + dy * dy) + 1e-6 | |
| r_norm = r / np.sqrt(cx * cx + cy * cy) | |
| shift = (np.float32(pixels) * r_norm) | |
| ux = dx / r | |
| uy = dy / r | |
| map_x_out = np.ascontiguousarray((x + ux * shift).astype(np.float32)) | |
| map_y_out = np.ascontiguousarray((y + uy * shift).astype(np.float32)) | |
| map_x_in = np.ascontiguousarray((x - ux * shift).astype(np.float32)) | |
| map_y_in = np.ascontiguousarray((y - uy * shift).astype(np.float32)) | |
| b, g, rch = cv2.split(bgr) | |
| rch = cv2.remap(rch, map_x_out, map_y_out, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) | |
| b = cv2.remap(b, map_x_in, map_y_in, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) | |
| return cv2.merge([b, g, rch]) | |
| def composite_chroma_bleed(bgr, amount=0.3, offset_px=1): | |
| if amount <= 0: | |
| return bgr | |
| ycrcb = cv2.cvtColor(bgr, cv2.COLOR_BGR2YCrCb).astype(np.float32) | |
| y, cr, cb = cv2.split(ycrcb) | |
| k = max(1, int(3 + amount * 12)) | |
| cr_b = cv2.blur(cr, (k, 1)) | |
| cb_b = cv2.blur(cb, (k, 1)) | |
| if offset_px != 0: | |
| M = np.float32([[1, 0, offset_px], [0, 1, 0]]) | |
| cr_b = cv2.warpAffine(cr_b, M, (cr.shape[1], cr.shape[0]), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE) | |
| cb_b = cv2.warpAffine(cb_b, M, (cb.shape[1], cb.shape[0]), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE) | |
| out = cv2.cvtColor(np.stack([y, cr_b, cb_b], axis=-1).astype(np.uint8), cv2.COLOR_YCrCb2BGR) | |
| return out | |
| def add_interlace_combing(bgr, amount=0.3, horiz_px=2): | |
| if amount <= 0: | |
| return bgr | |
| h, w = bgr.shape[:2] | |
| out = bgr.copy() | |
| delta = int(max(1, horiz_px * amount * 5)) | |
| out[::2] = np.roll(out[::2], shift=delta, axis=1) | |
| lines = np.ones((h, 1, 1), np.float32) | |
| lines[::2] *= (1.0 - 0.15 * amount) | |
| out = clamp_u8(out.astype(np.float32) * lines) | |
| return out | |
| def add_tv_scanlines(bgr, strength=0.02): | |
| if strength <= 0: | |
| return bgr | |
| h, w = bgr.shape[:2] | |
| lines = np.ones((h, 1, 1), np.float32) | |
| darken = np.clip(strength, 0.0, 0.35) | |
| lines[::2] *= (1.0 - darken) | |
| out = clamp_u8(bgr.astype(np.float32) * lines) | |
| return out | |
| def add_low_bitrate_artifacts(bgr, strength=0.3, block_size=16, ringing=0.3): | |
| if strength <= 0: | |
| return bgr | |
| h, w = bgr.shape[:2] | |
| factor = max(1, int(block_size * (0.8 + 1.7 * strength))) | |
| small_w = max(1, w // factor) | |
| small_h = max(1, h // factor) | |
| small = cv2.resize(bgr, (small_w, small_h), interpolation=cv2.INTER_LINEAR) | |
| up = cv2.resize(small, (w, h), interpolation=cv2.INTER_NEAREST) | |
| if ringing > 0: | |
| blur = cv2.GaussianBlur(up, (0, 0), 0.8 + 1.6 * ringing) | |
| up = cv2.addWeighted(up, 1 + 0.9 * ringing, blur, -0.9 * ringing, 0) | |
| pil = to_pil(up) | |
| q = int(np.clip(48 - 28 * strength, 8, 60)) | |
| pil = authentic_jpeg_compression(pil, quality=q, add_artifacts=True) | |
| return to_np(pil) | |
| def add_print_border(pil_img: Image.Image, enable=False, width_rel=0.04, color=(245, 245, 245)): | |
| if not enable or width_rel <= 0: | |
| return pil_img | |
| w, h = pil_img.size | |
| border = int(min(w, h) * width_rel) | |
| canvas = Image.new("RGB", (w + border * 2, h + int(border * 2.2)), color) | |
| canvas.paste(pil_img, (border, border)) | |
| return canvas | |
| def lab_color_cast(bgr, preset="none", amount=0.3): | |
| if amount <= 0 or preset == "none": | |
| return bgr | |
| y = cv2.cvtColor(bgr, cv2.COLOR_BGR2YCrCb)[:, :, 0].astype(np.float32) / 255.0 | |
| r, g, b = bgr[:, :, 2].astype(np.float32), bgr[:, :, 1].astype(np.float32), bgr[:, :, 0].astype(np.float32) | |
| if preset == "fuji_warm_magenta_shadows": | |
| t_high = smoothstep(y, 0.55, 0.95) | |
| t_shad = 1.0 - smoothstep(y, 0.15, 0.45) | |
| r += amount * (22.0 * t_high + 12.0 * t_shad) | |
| g += amount * (14.0 * t_high - 8.0 * t_shad) | |
| b += amount * (0.0 * t_high + 10.0 * t_shad) | |
| elif preset == "kodak_cool_mids": | |
| t_mid = np.exp(-((y - 0.55) ** 2) / (2 * 0.12 ** 2)) | |
| r -= amount * (12.0 * t_mid) | |
| g += amount * (6.0 * t_mid) | |
| b += amount * (16.0 * t_mid) | |
| elif preset == "minilab_greenish": | |
| t_all = smoothstep(y, 0.2, 0.9) | |
| g += amount * (18.0 * t_all) | |
| r -= amount * (6.0 * (1 - t_all)) | |
| hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV).astype(np.float32) | |
| hsv[:, :, 1] *= (1 - 0.06 * amount) | |
| bgr = cv2.cvtColor(clamp_u8(hsv), cv2.COLOR_HSV2BGR) | |
| r, g, b = bgr[:, :, 2].astype(np.float32), bgr[:, :, 1].astype(np.float32), bgr[:, :, 0].astype(np.float32) | |
| out = np.stack([clamp_u8(b), clamp_u8(g), clamp_u8(r)], axis=-1) | |
| return out | |
| def add_scan_dust_hairs(pil_img: Image.Image, density=0.25, strength=0.6, hair_prob=0.25, size_factor=1.0): | |
| if density <= 0 or strength <= 0: | |
| return pil_img | |
| w, h = pil_img.size | |
| area = w * h | |
| n = int(max(1, (area / 55000.0) * float(density))) | |
| dark = Image.new("L", (w, h), 0) | |
| bright = Image.new("L", (w, h), 0) | |
| ddraw = ImageDraw.Draw(dark) | |
| bdraw = ImageDraw.Draw(bright) | |
| for _ in range(n): | |
| if random.random() < hair_prob: | |
| x0 = random.randint(0, w - 1) | |
| y0 = random.randint(0, h - 1) | |
| length = int(random.uniform(30, 120) * size_factor) | |
| angle = random.uniform(0, math.pi) | |
| x1 = int(np.clip(x0 + length * math.cos(angle), 0, w - 1)) | |
| y1 = int(np.clip(y0 + length * math.sin(angle), 0, h - 1)) | |
| width = random.choice([1, 1, 2]) | |
| if random.random() < 0.6: | |
| ddraw.line((x0, y0, x1, y1), fill=random.randint(160, 255), width=width) | |
| else: | |
| bdraw.line((x0, y0, x1, y1), fill=random.randint(140, 220), width=width) | |
| else: | |
| cx = random.randint(0, w - 1) | |
| cy = random.randint(0, h - 1) | |
| r = int(random.uniform(1, 3.5) * size_factor) | |
| bbox = (cx - r, cy - r, cx + r, cy + r) | |
| if random.random() < 0.5: | |
| ddraw.ellipse(bbox, fill=random.randint(160, 255)) | |
| else: | |
| bdraw.ellipse(bbox, fill=random.randint(140, 220)) | |
| dark = dark.filter(ImageFilter.GaussianBlur(radius=0.8 + 1.2 * strength)) | |
| bright = bright.filter(ImageFilter.GaussianBlur(radius=0.8 + 1.2 * strength)) | |
| base = np.array(pil_img).astype(np.float32) | |
| d = np.array(dark).astype(np.float32) / 255.0 | |
| b = np.array(bright).astype(np.float32) / 255.0 | |
| amt = 28.0 * float(strength) | |
| base -= d[..., None] * amt | |
| base += b[..., None] * (amt * 0.9) | |
| base = np.clip(base, 0, 255).astype(np.uint8) | |
| return Image.fromarray(base) | |
| def apply_chaos(bgr, amount=0.2): | |
| if amount <= 0: | |
| return bgr | |
| h, w = bgr.shape[:2] | |
| out = bgr.copy() | |
| max_shift = 2.0 * amount | |
| tx = np.random.uniform(-max_shift, max_shift) | |
| ty = np.random.uniform(-max_shift, max_shift) | |
| M = np.float32([[1, 0, tx], [0, 1, ty]]) | |
| out = cv2.warpAffine(out, M, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) | |
| amp = 2.0 * amount | |
| freq = np.random.uniform(1.0, 3.0) | |
| phase = np.random.uniform(0, 2*np.pi) | |
| shifts = (amp * np.sin(phase + (np.arange(h) / max(h,1)) * 2*np.pi*freq)).astype(np.int32) | |
| for y in range(h): | |
| if shifts[y] != 0: | |
| out[y] = np.roll(out[y], shifts[y], axis=0) | |
| n_hot = int(amount * w * h * 0.00005) | |
| for _ in range(n_hot): | |
| y = random.randint(0, h - 1) | |
| x = random.randint(0, w - 1) | |
| color = random.choice([(255, 255, 255), (255, 240, 220), (255, 255, 200)]) | |
| out[y, x] = color | |
| if n_hot > 0: | |
| out = cv2.GaussianBlur(out, (0, 0), 0.25 + 0.6 * amount) | |
| return out | |
| def add_russian_timestamp_styles(pil_img: Image.Image, date_text: str, style="russian"): | |
| months = ["ЯНВ","ФЕВ","МАР","АПР","МАЙ","ИЮН","ИЮЛ","АВГ","СЕН","ОКТ","НОЯ","ДЕК"] | |
| try: | |
| d, m, y = date_text.split(".") | |
| m_i = int(m) | |
| rus = f"{int(d):02d} {months[m_i-1]} {int(y)}" | |
| except Exception: | |
| rus = date_text | |
| draw = ImageDraw.Draw(pil_img) | |
| w, h = pil_img.size | |
| font_size = max(12, min(w, h) // 40) | |
| try: | |
| font = ImageFont.truetype("DejaVuSansMono.ttf", font_size) | |
| except: | |
| font = ImageFont.load_default() | |
| x_pos, y_pos = w - 10, h - 10 | |
| for dx in (-1, 0, 1): | |
| for dy in (-1, 0, 1): | |
| if dx or dy: | |
| draw.text((x_pos + dx, y_pos + dy), rus, anchor="rd", fill=(0, 0, 0), font=font) | |
| draw.text((x_pos, y_pos), rus, anchor="rd", fill=(255, 200, 0), font=font) | |
| return pil_img | |
| def add_authentic_timestamp(pil_img: Image.Image, date_text: str, style="digital"): | |
| draw = ImageDraw.Draw(pil_img) | |
| w, h = pil_img.size | |
| font_size = max(12, min(w, h) // 40) | |
| try: | |
| font = ImageFont.truetype("DejaVuSansMono.ttf", font_size) | |
| except: | |
| font = ImageFont.load_default() | |
| if style == "digital": | |
| x_pos, y_pos = w - 10, h - 10 | |
| for dx in (-1, 0, 1): | |
| for dy in (-1, 0, 1): | |
| if dx or dy: | |
| draw.text((x_pos + dx, y_pos + dy), date_text, anchor="rd", fill=(0, 0, 0), font=font) | |
| draw.text((x_pos, y_pos), date_text, anchor="rd", fill=(255, 200, 0), font=font) | |
| else: | |
| try: | |
| small_font = ImageFont.truetype("DejaVuSansMono.ttf", max(8, font_size - 4)) | |
| except: | |
| small_font = font | |
| draw.text((10, h - 10), date_text, anchor="ld", fill=(255, 255, 255), font=small_font) | |
| return pil_img | |
| def add_motion_blur(pil_img: Image.Image, strength=0.8): | |
| if strength <= 0: | |
| return pil_img | |
| k = max(3, int(3 + strength * 6)) | |
| kernel = np.zeros((k, k), np.float32) | |
| kernel[k // 2, :] = 1.0 / k | |
| arr = np.array(pil_img) | |
| blurred = cv2.filter2D(arr, -1, kernel) | |
| return Image.fromarray(blurred) | |
| def add_cheap_flash_effect(bgr, strength=0.08): | |
| if strength <= 0: | |
| return bgr | |
| out = bgr.astype(np.float32) | |
| out = out * (1.0 + strength * 0.3) | |
| out[:, :, 0] += 12 * strength | |
| out[:, :, 1] += 8 * strength | |
| out = np.clip(out, 0, 255).astype(np.uint8) | |
| lut = np.arange(256, dtype=np.float32) | |
| lut = np.clip(lut + (30 * strength) * (1 - (lut / 255.0)), 0, 255).astype(np.uint8) | |
| for c in range(3): | |
| out[:, :, c] = cv2.LUT(out[:, :, c], lut) | |
| return out | |
| def map_intensity(intensity_0_10: float): | |
| base = float(np.clip(intensity_0_10 / 3.0, 0.0, 1.0)) | |
| s = 1.0 - (1.0 - base) ** 3 | |
| extra = float(np.clip((intensity_0_10 - 3.0) / 7.0, 0.0, 1.0)) | |
| boost = 1.0 + 2.8 * (extra ** 1.2) | |
| return s, boost | |
| # ---------------------- | |
| # Main processing pipeline with style transfer | |
| # ---------------------- | |
| def process_image( | |
| image, | |
| intensity, | |
| wb_preset, | |
| add_date, | |
| date_style, | |
| custom_date, | |
| grain_amount, | |
| compression_level, | |
| flash_effect, | |
| motion_blur_strength, | |
| # Reference style transfer | |
| reference_images, | |
| style_strength, | |
| style_method, | |
| enable_style_transfer, | |
| # Scene and film controls | |
| scene_preset, | |
| film_stock, | |
| lighting_condition, | |
| # Video controls | |
| macroblock_strength, | |
| block_size, | |
| ringing_strength, | |
| interlace_amount, | |
| chroma_bleed_amount, | |
| scanlines_amount, | |
| # Optics/print | |
| chrom_ab_px, | |
| print_border_enable, | |
| print_border_width, | |
| # Lab & scan | |
| lab_preset, | |
| lab_amount, | |
| dust_enable, | |
| dust_density, | |
| dust_strength, | |
| hair_prob, | |
| speck_size, | |
| # Chaos | |
| chaos_amount, | |
| # Options | |
| keep_ratio, | |
| timestamp_layer, | |
| russian_style | |
| ): | |
| if image is None: | |
| return None | |
| # Master scaling | |
| s, boost = map_intensity(float(intensity)) | |
| # Working image | |
| original = image.convert("RGB") | |
| pil = original.copy() if keep_ratio else crop_4_3(original) | |
| # STEP 1: Apply reference style transfer FIRST (if enabled) | |
| if enable_style_transfer and reference_images: | |
| print(f"DEBUG: Style transfer enabled with {len(reference_images)} reference images") | |
| ref_db = create_reference_database(reference_images) | |
| print(f"DEBUG: Reference database created: {ref_db is not None}") | |
| if ref_db: | |
| print(f"DEBUG: Database contains {len(ref_db.get('color_stats', []))} color stats") | |
| print(f"DEBUG: Applying style transfer with method={style_method}, strength={style_strength}") | |
| # Store original for comparison | |
| original_array = np.array(pil) | |
| # Apply style transfer | |
| pil = apply_reference_style_transfer(pil, ref_db, style_strength, style_method) | |
| # Check if anything changed | |
| new_array = np.array(pil) | |
| difference = np.mean(np.abs(original_array.astype(float) - new_array.astype(float))) | |
| print(f"DEBUG: Style transfer difference: {difference} (should be > 0 if working)") | |
| if difference < 1.0: | |
| print("WARNING: Style transfer made very little difference!") | |
| else: | |
| print("DEBUG: Failed to create reference database!") | |
| else: | |
| if not enable_style_transfer: | |
| print("DEBUG: Style transfer disabled") | |
| if not reference_images: | |
| print("DEBUG: No reference images provided") | |
| else: | |
| print("DEBUG: Style transfer skipped") | |
| # Optional: bake timestamp BEFORE effects | |
| if add_date and timestamp_layer == "baked": | |
| if not custom_date: | |
| year = random.choice([1998, 1999, 2000, 2001, 2002]) | |
| month = random.randint(1, 12) | |
| day = random.randint(1, 28) | |
| date_text = f"{day:02d}.{month:02d}.{year}" | |
| else: | |
| date_text = custom_date.strip() | |
| if russian_style and date_style == "digital": | |
| pil = add_russian_timestamp_styles(pil, date_text, style="russian") | |
| else: | |
| pil = add_authentic_timestamp(pil, date_text, style=date_style) | |
| # Pre-effects | |
| mb = min(3.0, float(motion_blur_strength) * 0.25 * s * boost) | |
| if mb > 0.01: | |
| pil = add_motion_blur(pil, strength=mb) | |
| pil = enhanced_center_sharpness(pil, strength=min(0.45, 0.15 * s * boost)) | |
| bgr = to_np(pil) | |
| # White balance | |
| bgr = early_digital_wb(bgr, wb_preset) | |
| # Scene preset | |
| bgr = apply_scene_preset(bgr, scene_preset, intensity=s) | |
| # Film stock (if not handled by scene preset) | |
| if scene_preset == "none" and film_stock != "none": | |
| bgr = authentic_russian_film_stocks(bgr, film_stock, strength=0.6 * s) | |
| # Lighting conditions | |
| if lighting_condition == "tungsten_warmth": | |
| bgr = add_tungsten_indoor_warmth(bgr, strength=0.4 * s) | |
| elif lighting_condition == "fluorescent_flicker": | |
| bgr = add_fluorescent_flicker(bgr, strength=0.3 * s) | |
| # Lab cast | |
| bgr = lab_color_cast(bgr, preset=lab_preset, amount=float(lab_amount) * (0.6 + 0.6 * s)) | |
| # Tone curve | |
| bgr = authentic_2000s_tone_curve(bgr, amount=min(1.0, 0.4 * s * (0.9 + 0.5 * (boost - 1)))) | |
| # Flash | |
| if flash_effect: | |
| bgr = add_cheap_flash_effect(bgr, strength=min(0.25, 0.05 * s * boost)) | |
| # Blooming | |
| bgr = ccd_blooming_effect(bgr, threshold=242, bloom_size=2) | |
| # Optics | |
| bgr = apply_lens_distortion(bgr, strength=min(0.03, 0.004 * s * boost)) | |
| bgr = radial_chromatic_aberration(bgr, pixels=min(3.0, float(chrom_ab_px) * (0.7 + 0.3 * s))) | |
| # Vignette | |
| bgr = enhanced_vignette(bgr, strength=min(0.4, 0.06 * s * boost), feather=1.8) | |
| # Grain & chroma noise | |
| g_strength = min(30.0, (float(grain_amount) * 0.35 + 1.5) * s * boost) | |
| bgr = realistic_film_grain(bgr, grain_strength=g_strength, grain_size=1.05) | |
| bgr = enhanced_chroma_noise(bgr, amount=min(12.0, 1.6 * s * boost)) | |
| # Video effects | |
| bgr = composite_chroma_bleed(bgr, amount=float(chroma_bleed_amount) * (0.4 + 0.8 * s), offset_px=1) | |
| bgr = add_interlace_combing(bgr, amount=float(interlace_amount), horiz_px=2) | |
| bgr = add_tv_scanlines(bgr, strength=float(scanlines_amount) * 0.25) | |
| # Macroblocking | |
| bgr = add_low_bitrate_artifacts( | |
| bgr, | |
| strength=float(macroblock_strength) * (0.5 + 0.8 * s), | |
| block_size=int(block_size), | |
| ringing=float(ringing_strength) | |
| ) | |
| # JPEG compression | |
| pil_mid = to_pil(bgr) | |
| comp_norm = (float(compression_level) - 0.3) / (1.5 - 0.3) | |
| comp_norm = float(np.clip(comp_norm, 0, 1)) | |
| q = int(92 - (92 - 68) * comp_norm * min(1.5, s * (0.8 + 0.6 * (boost - 1)))) | |
| add_2pass = (compression_level > 1.0) or (s > 0.7) | |
| pil_mid = authentic_jpeg_compression(pil_mid, quality=int(np.clip(q, 30, 92)), add_artifacts=add_2pass) | |
| # Final blend | |
| orig_aligned = original if keep_ratio else crop_4_3(original) | |
| mix = float(np.clip(0.08 + 0.67 * s * (0.9 + 0.6 * (boost - 1)), 0.08, 0.92)) | |
| processed = Image.blend(orig_aligned, pil_mid, alpha=mix) | |
| # Chaos | |
| if chaos_amount > 0: | |
| bgr_chaos = to_np(processed) | |
| bgr_chaos = apply_chaos(bgr_chaos, amount=float(chaos_amount)) | |
| processed = to_pil(bgr_chaos) | |
| # Timestamp on top | |
| if add_date and timestamp_layer == "top": | |
| if not custom_date: | |
| year = random.choice([1998, 1999, 2000, 2001, 2002]) | |
| month = random.randint(1, 12) | |
| day = random.randint(1, 28) | |
| date_text = f"{day:02d}.{month:02d}.{year}" | |
| else: | |
| date_text = custom_date.strip() | |
| if russian_style and date_style == "digital": | |
| processed = add_russian_timestamp_styles(processed, date_text, style="russian") | |
| else: | |
| processed = add_authentic_timestamp(processed, date_text, style=date_style) | |
| # Print border | |
| processed = add_print_border(processed, enable=bool(print_border_enable), width_rel=float(print_border_width)) | |
| # Scan dust/hairs | |
| if dust_enable: | |
| processed = add_scan_dust_hairs( | |
| processed, | |
| density=float(dust_density), | |
| strength=float(dust_strength), | |
| hair_prob=float(hair_prob), | |
| size_factor=float(speck_size) | |
| ) | |
| return processed | |
| # Processing function to handle file inputs | |
| def process_with_files( | |
| input_image, | |
| reference_images, | |
| style_strength, | |
| style_method, | |
| enable_style_transfer, | |
| intensity, | |
| wb_preset, | |
| add_date, | |
| date_style, | |
| custom_date, | |
| grain_amount, | |
| compression_level, | |
| flash_effect, | |
| motion_blur_strength, | |
| scene_preset, | |
| film_stock, | |
| lighting_condition, | |
| macroblock_strength, | |
| block_size, | |
| ringing_strength, | |
| interlace_amount, | |
| chroma_bleed_amount, | |
| scanlines_amount, | |
| chrom_ab_px, | |
| print_border_enable, | |
| print_border_width, | |
| lab_preset, | |
| lab_amount, | |
| dust_enable, | |
| dust_density, | |
| dust_strength, | |
| hair_prob, | |
| speck_size, | |
| chaos_amount, | |
| keep_ratio, | |
| timestamp_layer, | |
| russian_style | |
| ): | |
| # DEBUG: Print what we received | |
| print(f"DEBUG: enable_style_transfer = {enable_style_transfer}") | |
| print(f"DEBUG: reference_images type = {type(reference_images)}") | |
| print(f"DEBUG: style_strength = {style_strength}") | |
| print(f"DEBUG: style_method = {style_method}") | |
| # Convert file inputs to PIL Images | |
| ref_images = [] | |
| if reference_images: | |
| print(f"DEBUG: Processing {len(reference_images)} reference files") | |
| for i, file in enumerate(reference_images): | |
| try: | |
| print(f"DEBUG: Processing file {i}: {file.name}") | |
| img = Image.open(file.name).convert("RGB") | |
| ref_images.append(img) | |
| print(f"DEBUG: Successfully loaded image {i}, size: {img.size}") | |
| except Exception as e: | |
| print(f"DEBUG: Failed to load file {i}: {e}") | |
| continue | |
| else: | |
| print("DEBUG: No reference images provided") | |
| print(f"DEBUG: Successfully loaded {len(ref_images)} reference images") | |
| # Call process_image with properly ordered arguments | |
| return process_image( | |
| input_image, | |
| intensity, | |
| wb_preset, | |
| add_date, | |
| date_style, | |
| custom_date, | |
| grain_amount, | |
| compression_level, | |
| flash_effect, | |
| motion_blur_strength, | |
| # Reference style transfer | |
| ref_images, # Pass the processed images here | |
| style_strength, | |
| style_method, | |
| enable_style_transfer, | |
| # Scene and film controls | |
| scene_preset, | |
| film_stock, | |
| lighting_condition, | |
| # Video controls | |
| macroblock_strength, | |
| block_size, | |
| ringing_strength, | |
| interlace_amount, | |
| chroma_bleed_amount, | |
| scanlines_amount, | |
| # Optics/print | |
| chrom_ab_px, | |
| print_border_enable, | |
| print_border_width, | |
| # Lab & scan | |
| lab_preset, | |
| lab_amount, | |
| dust_enable, | |
| dust_density, | |
| dust_strength, | |
| hair_prob, | |
| speck_size, | |
| # Chaos | |
| chaos_amount, | |
| # Options | |
| keep_ratio, | |
| timestamp_layer, | |
| russian_style | |
| ) | |
| # ---------------------- | |
| # Enhanced UI with Style Transfer | |
| # ---------------------- | |
| with gr.Blocks(title="Russian 2000s Filter with Reference Style Transfer", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown(""" | |
| # 📷 Complete Russian 2000s Filter with Reference Style Transfer | |
| Transform your photos using authentic Russian film stocks, period effects, AND reference-based style transfer from real 2000s photos. | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| input_image = gr.Image(type="pil", label="📸 Upload Your Photo") | |
| with gr.Column(scale=1): | |
| output_image = gr.Image(type="pil", label="✨ Processed Photo", interactive=False) | |
| # Main processing button right under the photos | |
| with gr.Row(): | |
| process_btn = gr.Button("🎬 Apply Complete Russian Filter with Style Transfer", variant="primary", size="lg") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| # All settings moved down here | |
| with gr.Accordion("🎨 Reference Style Transfer (Enhanced!)", open=True): | |
| gr.Markdown(""" | |
| **Upload 1-5 authentic Russian 2000s photos as style references** | |
| - Works on free tier (CPU processing) | |
| - **NEW**: Analyzes amateur photography characteristics: | |
| - Point-and-click exposure patterns (center-weighted metering) | |
| - Flash falloff and harsh lighting | |
| - Foreground overexposure vs background shadows | |
| - Amateur focus characteristics | |
| - Limited dynamic range simulation | |
| - Processing time: 10-15 seconds | |
| - **Debug mode enabled**: Check console for style transfer status | |
| """) | |
| with gr.Accordion("🔧 Debug & Testing", open=False): | |
| gr.Markdown("**Quick test to verify style transfer is working**") | |
| # Add test buttons for debugging | |
| test_style_transfer = gr.Button("🔍 Test Style Transfer (Debug)", variant="secondary") | |
| test_simple = gr.Button("🎯 Simple Style Test (No Files)", variant="secondary") | |
| debug_output = gr.Textbox(label="Debug Output", lines=4, interactive=False) | |
| reference_images = gr.File( | |
| file_count="multiple", | |
| file_types=["image"], | |
| label="Reference Photos (Upload 1-5 authentic Russian 2000s images)" | |
| ) | |
| enable_style_transfer = gr.Checkbox( | |
| label="Enable Enhanced Reference Style Transfer", | |
| value=False | |
| ) | |
| style_strength = gr.Slider( | |
| 0, 1, value=0.65, step=0.05, | |
| label="Style Transfer Strength" | |
| ) | |
| style_method = gr.Radio( | |
| choices=["color_matching", "texture_matching", "enhanced_amateur"], | |
| value="enhanced_amateur", | |
| label="Style Transfer Method", | |
| info="Enhanced Amateur = Full point-and-click camera emulation" | |
| ) | |
| with gr.Accordion("🎛️ Basic Settings", open=True): | |
| intensity = gr.Slider(0, 10, value=3.5, step=0.1, label="Overall Effect Intensity (0–10)") | |
| wb_preset = gr.Dropdown( | |
| choices=["auto", "daylight", "cloudy", "tungsten", "fluorescent"], | |
| value="tungsten", | |
| label="White Balance Preset" | |
| ) | |
| grain_amount = gr.Slider(2, 15, value=7, step=1, label="Film Grain Amount") | |
| compression_level = gr.Slider(0.3, 1.5, value=0.9, step=0.1, label="JPEG Compression Level") | |
| keep_ratio = gr.Checkbox(value=False, label="Keep Original Aspect Ratio (disable for authentic 4:3 crop)") | |
| with gr.Accordion("🇷🇺 Russian/Eastern European Features", open=True): | |
| scene_preset = gr.Dropdown( | |
| choices=["none", "kitchen_party", "winter_street", "apartment_interior", "dacha_summer"], | |
| value="none", | |
| label="Scene Preset" | |
| ) | |
| film_stock = gr.Dropdown( | |
| choices=["none", "svema", "orwo", "tasma"], | |
| value="svema", | |
| label="Russian/Soviet Film Stock" | |
| ) | |
| lighting_condition = gr.Dropdown( | |
| choices=["none", "tungsten_warmth", "fluorescent_flicker"], | |
| value="none", | |
| label="Period Lighting Conditions" | |
| ) | |
| russian_style = gr.Checkbox(label="Russian Date Format (Cyrillic months)", value=False) | |
| flash_effect = gr.Checkbox(label="Cheap Camera Flash", value=True) | |
| motion_blur_strength = gr.Slider(0, 3, value=1, step=0.5, label="Motion Blur") | |
| with gr.Accordion("📼 Video / TV Artifacts", open=False): | |
| macroblock_strength = gr.Slider(0, 1, value=0.4, step=0.05, label="Macroblocking Strength") | |
| block_size = gr.Slider(1, 32, value=16, step=1, label="Block Size (px)") | |
| ringing_strength = gr.Slider(0, 1, value=0.35, step=0.05, label="Ringing / Edge Halos") | |
| interlace_amount = gr.Slider(0, 1, value=0.15, step=0.05, label="Interlace Combing") | |
| chroma_bleed_amount = gr.Slider(0, 1, value=0.2, step=0.05, label="Chroma Bleed") | |
| scanlines_amount = gr.Slider(0, 1, value=0.15, step=0.05, label="CRT Scanlines") | |
| with gr.Accordion("🔧 Optics & Print", open=False): | |
| chrom_ab_px = gr.Slider(0, 2.0, value=0.6, step=0.1, label="Chromatic Aberration (px)") | |
| print_border_enable = gr.Checkbox(label="Add 10×15 Minilab Border", value=False) | |
| print_border_width = gr.Slider(0.02, 0.08, value=0.04, step=0.005, label="Border Width") | |
| with gr.Accordion("🧪 Lab & Scan Look", open=False): | |
| lab_preset = gr.Dropdown( | |
| choices=["none", "fuji_warm_magenta_shadows", "kodak_cool_mids", "minilab_greenish"], | |
| value="none", | |
| label="Lab Color Cast Preset" | |
| ) | |
| lab_amount = gr.Slider(0, 1, value=0.3, step=0.05, label="Lab Cast Amount") | |
| dust_enable = gr.Checkbox(label="Add Scan Dust & Hairs", value=False) | |
| dust_density = gr.Slider(0, 1, value=0.25, step=0.05, label="Dust/Hair Density") | |
| dust_strength = gr.Slider(0, 1, value=0.6, step=0.05, label="Dust/Hair Contrast") | |
| hair_prob = gr.Slider(0, 1, value=0.25, step=0.05, label="Hair Probability") | |
| speck_size = gr.Slider(0.8, 2.5, value=1.0, step=0.1, label="Speck Size Factor") | |
| with gr.Accordion("🎲 Chaos", open=False): | |
| chaos_amount = gr.Slider(0, 1, value=0.2, step=0.05, label="Micro Jitter, Wobble & Hot Pixels") | |
| with gr.Accordion("📅 Timestamp Options", open=False): | |
| add_date = gr.Checkbox(label="Add Date Timestamp", value=True) | |
| date_style = gr.Radio(choices=["digital", "film_lab"], value="digital", label="Timestamp Style") | |
| custom_date = gr.Textbox( | |
| label="Custom Date (dd.mm.yyyy)", | |
| placeholder="14.08.2000", | |
| info="Leave empty for random date from 1998–2002" | |
| ) | |
| timestamp_layer = gr.Radio( | |
| choices=["top", "baked"], | |
| value="top", | |
| label="Timestamp Layer" | |
| ) | |
| # Connect test buttons | |
| test_simple.click( | |
| fn=simple_style_test, | |
| inputs=[input_image], | |
| outputs=[debug_output] | |
| ) | |
| test_style_transfer.click( | |
| fn=test_style_transfer_debug, | |
| inputs=[input_image, reference_images], | |
| outputs=[debug_output] | |
| ) | |
| # Connect main processing button | |
| process_btn.click( | |
| fn=process_with_files, | |
| inputs=[ | |
| input_image, reference_images, style_strength, style_method, enable_style_transfer, | |
| intensity, wb_preset, add_date, date_style, custom_date, | |
| grain_amount, compression_level, flash_effect, motion_blur_strength, | |
| scene_preset, film_stock, lighting_condition, | |
| macroblock_strength, block_size, ringing_strength, interlace_amount, | |
| chroma_bleed_amount, scanlines_amount, | |
| chrom_ab_px, print_border_enable, print_border_width, | |
| lab_preset, lab_amount, dust_enable, dust_density, dust_strength, hair_prob, speck_size, | |
| chaos_amount, | |
| keep_ratio, timestamp_layer, russian_style | |
| ], | |
| outputs=[output_image] | |
| ) | |
| gr.Markdown(""" | |
| ### 🎯 ENHANCED: Reference Style Transfer Features: | |
| - **Upload Reference Photos**: 1-5 authentic Russian 2000s photos for style matching | |
| - **Color Matching**: Matches lighting, color grading, and atmosphere | |
| - **Texture Matching**: Adjusts contrast and visual texture based on references | |
| - **Enhanced Amateur Mode**: Full point-and-click camera emulation with: | |
| - **Center-weighted metering** simulation (subjects properly exposed, backgrounds over/under) | |
| - **Flash characteristics** (harsh falloff, foreground overexposure, cool color cast) | |
| - **Depth-based focus** (amateur focus patterns, background blur) | |
| - **Limited dynamic range** (shadow crushing, highlight clipping) | |
| - **Exposure patterns** typical of 2000s compact cameras | |
| ### 💡 Enhanced Style Transfer Tips: | |
| - **Best references**: Family photos with flash, indoor gatherings, amateur compositions | |
| - **Enhanced Amateur mode**: Gives most authentic point-and-click camera results | |
| - **Flash photos work best**: References with visible flash create realistic amateur lighting | |
| - **Center-composed photos**: Works best with typically amateur-style center composition | |
| ### 📸 What Enhanced Mode Emulates: | |
| - **Point-and-click cameras**: Canon PowerShot, Nikon Coolpix, Sony Mavica | |
| - **Center-weighted metering**: Subjects in center properly exposed, backgrounds blown/dark | |
| - **On-camera flash**: Harsh, direct flash with realistic falloff and color temperature | |
| - **Amateur focus patterns**: Everything in focus OR poorly focused backgrounds | |
| - **Cheap optics**: Limited dynamic range, highlight clipping, shadow crushing | |
| ### 🎬 Recommended Workflow: | |
| 1. Upload your modern photo | |
| 2. Upload 3-5 flash photos from Russian family gatherings (1998-2002) | |
| 3. Enable "Enhanced Amateur" style transfer (strength 0.6-0.7) | |
| 4. Choose "Kitchen Party" or "Apartment Interior" scene preset | |
| 5. Use tungsten white balance + compression 0.9 for authentic look | |
| 6. Process and get genuine point-and-click camera results! | |
| ### 🔧 Updated Default Settings: | |
| - **Block Size**: Now 8px (was 16px) for finer, more realistic artifacts | |
| - **White Balance**: Tungsten default (most common Russian indoor lighting) | |
| - **Compression**: 0.9 default (typical of 2000s digital cameras) | |
| - **4:3 Crop**: Now default ON (authentic camera aspect ratio) | |
| - **Intensity**: 3.5 default (slightly more effect for amateur camera look) | |
| """) | |
| if __name__ == "__main__": | |
| demo.launch() |