| | import torch |
| | from diffusers import StableDiffusionControlNetPipeline, ControlNetModel |
| | from controlnet_aux import OpenposeDetector |
| | from PIL import Image, ImageFilter, ImageEnhance |
| | import random |
| | import cv2 |
| | import numpy as np |
| | import gradio as gr |
| | import torch.nn.functional as F |
| | from transformers import Sam2Model, Sam2Processor |
| | from scipy import ndimage |
| | from skimage import measure, morphology |
| |
|
| | |
| | class ControlNetProgressCallback: |
| | def __init__(self, progress, total_steps): |
| | self.progress = progress |
| | self.total_steps = total_steps |
| | self.current_step = 0 |
| |
|
| | def __call__(self, pipe, step_index, timestep, callback_kwargs): |
| | self.current_step = step_index + 1 |
| | progress_percentage = self.current_step / self.total_steps |
| |
|
| | if self.progress is not None: |
| | self.progress(progress_percentage, desc=f"ControlNet: Schritt {self.current_step}/{self.total_steps}") |
| |
|
| | print(f"ControlNet Fortschritt: {self.current_step}/{self.total_steps} ({progress_percentage:.1%})") |
| | return callback_kwargs |
| |
|
| |
|
| | class ControlNetProcessor: |
| | def __init__(self, device="cuda", torch_dtype=torch.float32): |
| | self.device = device |
| | self.torch_dtype = torch_dtype |
| | self.pose_detector = None |
| | self.midas_model = None |
| | self.midas_transform = None |
| | self.sam_processor = None |
| | self.sam_model = None |
| | self.sam_initialized = False |
| |
|
| | def _lazy_load_sam(self): |
| | """Lazy Loading von SAM 2 über 🤗 Transformers API""" |
| | if self.sam_initialized: |
| | return True |
| |
|
| | try: |
| | print("#" * 80) |
| | print("# 🔄 LADE SAM 2 (Segment Anything Model 2)") |
| | print("#" * 80) |
| | model_id = "facebook/sam2-hiera-tiny" |
| | |
| | print(f"📥 Modell-ID: {model_id}") |
| | print(f"📥 Lade Processor...") |
| | self.sam_processor = Sam2Processor.from_pretrained(model_id) |
| | print(f"📥 Lade Modell...") |
| | self.sam_model = Sam2Model.from_pretrained(model_id, torch_dtype=torch.float32).to(self.device) |
| | self.sam_model.eval() |
| | |
| | self.sam_initialized = True |
| | print("✅ SAM 2 erfolgreich geladen (via Transformers)") |
| | return True |
| |
|
| | except Exception as e: |
| | print(f"❌ FEHLER beim Laden von SAM 2: {str(e)[:200]}") |
| | self.sam_initialized = True |
| | return False |
| |
|
| | def _validate_bbox(self, image, bbox_coords): |
| | """Validiert und korrigiert BBox-Koordinaten""" |
| | width, height = image.size |
| |
|
| | if isinstance(bbox_coords, (list, tuple)) and len(bbox_coords) == 4: |
| | x1, y1, x2, y2 = bbox_coords |
| | else: |
| | x1, y1, x2, y2 = bbox_coords |
| |
|
| | x1, x2 = min(x1, x2), max(x1, x2) |
| | y1, y2 = min(y1, y2), max(y1, y2) |
| |
|
| | x1 = max(0, min(x1, width - 1)) |
| | y1 = max(0, min(y1, height - 1)) |
| | x2 = max(0, min(x2, width - 1)) |
| | y2 = max(0, min(y2, height - 1)) |
| |
|
| | if x2 - x1 < 10 or y2 - y1 < 10: |
| | size = min(width, height) * 0.3 |
| | x1 = max(0, width/2 - size/2) |
| | y1 = max(0, height/2 - size/2) |
| | x2 = min(width, width/2 + size/2) |
| | y2 = min(height, height/2 + size/2) |
| |
|
| | return int(x1), int(y1), int(x2), int(y2) |
| | |
| |
|
| | def create_sam_mask(self, image, bbox_coords, mode, is_front_face=False, is_back_head=False): |
| | """ |
| | ERWEITERTE Funktion: Erstellt präzise Maske mit SAM 2 |
| | """ |
| | try: |
| | print("#" * 80) |
| | print("# 🎯 STARTE SAM 2 SEGMENTIERUNG") |
| | print("#" * 80) |
| | print(f"📐 Eingabebild-Größe: {image.size}") |
| | print(f"🎛️ Ausgewählter Modus: {mode}") |
| | |
| | |
| | |
| | |
| | original_image = image |
| | |
| | |
| | if not self.sam_initialized: |
| | print("📥 SAM 2 ist noch nicht geladen, starte Lazy Loading...") |
| | self._lazy_load_sam() |
| |
|
| | if self.sam_model is None or self.sam_processor is None: |
| | print("⚠️ SAM 2 Model nicht verfügbar, verwende Fallback") |
| | return self._create_rectangular_mask(image, bbox_coords, mode) |
| |
|
| | |
| | x1, y1, x2, y2 = self._validate_bbox(image, bbox_coords) |
| | original_bbox = (x1, y1, x2, y2) |
| | print(f"📏 Original-BBox Größe: {x2-x1} × {y2-y1} px") |
| | |
| | |
| | |
| | |
| | if mode == "environment_change": |
| | print("-" * 60) |
| | print("🌳 MODUS: ENVIRONMENT_CHANGE") |
| | print("-" * 60) |
| | |
| |
|
| | |
| | image_np = np.array(image.convert("RGB")) |
| |
|
| | |
| | input_boxes = [[[x1, y1, x2, y2]]] |
| |
|
| | |
| | |
| | inputs = self.sam_processor( |
| | image_np, |
| | input_boxes=input_boxes, |
| | return_tensors="pt" |
| | ).to(self.device) |
| |
|
| | print(f" - 'input_boxes' Shape: {inputs['input_boxes'].shape}") |
| |
|
| | |
| | print("-" * 60) |
| | print("🧠 SAM 2 INFERENZ (Vorhersage)") |
| | with torch.no_grad(): |
| | print(" Führe Vorhersage durch...") |
| | outputs = self.sam_model(**inputs) |
| | print(f"✅ Vorhersage abgeschlossen") |
| | print(f" Anzahl der Vorhersagemasken: {outputs.pred_masks.shape[2]}") |
| |
|
| | num_masks = outputs.pred_masks.shape[2] |
| | print(f" SAM lieferte {num_masks} verschiedene Masken") |
| |
|
| | |
| | all_masks = [] |
| |
|
| | for i in range(num_masks): |
| | single_mask = outputs.pred_masks[:, :, i, :, :] |
| | resized_mask = F.interpolate( |
| | single_mask, |
| | size=(image.height, image.width), |
| | mode='bilinear', |
| | align_corners=False |
| | ).squeeze() |
| | |
| | mask_np = resized_mask.sigmoid().cpu().numpy() |
| | all_masks.append(mask_np) |
| |
|
| |
|
| | bbox_center = ((x1 + x2) // 2, (y1 + y2) // 2) |
| | bbox_area = (x2 - x1) * (y2 - y1) |
| | print(f" Erwartetes BBox-Zentrum: {bbox_center}") |
| | print(f" Erwartete BBox-Fläche: {bbox_area:,} Pixel") |
| | |
| | print("🤔 HEURISTIK: Beste Maske auswählen") |
| | best_mask_idx = 0 |
| | best_score = -1 |
| | |
| | |
| | for i in range(num_masks): |
| | mask_np_temp = all_masks[i] |
| | |
| | |
| | mask_max = mask_np_temp.max() |
| | if mask_max < 0.3: |
| | continue |
| | |
| | adaptive_threshold = max(0.3, mask_max * 0.7) |
| | mask_binary = (mask_np_temp > adaptive_threshold).astype(np.uint8) |
| |
|
| | |
| | if np.sum(mask_binary) == 0: |
| | print(f" ❌ Maske {i+1}: Keine Pixel nach adaptive_threshold {adaptive_threshold:.3f}") |
| | continue |
| |
|
| | |
| | mask_area_pixels = np.sum(mask_binary) |
| |
|
| | |
| | bbox_mask = np.zeros((image.height, image.width), dtype=np.uint8) |
| | bbox_mask[y1:y2, x1:x2] = 1 |
| | |
| | overlap = np.sum(mask_binary & bbox_mask) |
| | bbox_overlap_ratio = overlap / np.sum(bbox_mask) if np.sum(bbox_mask) > 0 else 0 |
| | |
| | |
| | y_coords, x_coords = np.where(mask_binary > 0) |
| | if len(y_coords) > 0: |
| | centroid_y = np.mean(y_coords) |
| | centroid_x = np.mean(x_coords) |
| | centroid_distance = np.sqrt((centroid_x - bbox_center[0])**2 + (centroid_y - bbox_center[1])**2) |
| | normalized_distance = centroid_distance / max(image.width, image.height) |
| | else: |
| | normalized_distance = 1.0 |
| | |
| | |
| | area_ratio = mask_area_pixels / bbox_area |
| | area_score = 1.0 - min(abs(area_ratio - 1.0), 1.0) |
| | |
| | |
| | confidence_score = mask_max |
| | |
| | |
| | score = ( |
| | bbox_overlap_ratio * 0.4 + |
| | (1.0 - normalized_distance) * 0.25 + |
| | area_score * 0.25 + |
| | confidence_score * 0.1 |
| | ) |
| | |
| | print(f" 📊 STANDARD-SCORES für Maske {i+1}:") |
| | print(f" • BBox-Überlappung: {bbox_overlap_ratio:.3f}") |
| | print(f" • Zentrums-Distanz: {centroid_distance if 'centroid_distance' in locals() else 'N/A'}") |
| | print(f" • Flächen-Ratio: {area_ratio:.3f}") |
| | print(f" • GESAMTSCORE: {score:.3f}") |
| | |
| | if score > best_score: |
| | best_score = score |
| | best_mask_idx = i |
| | print(f" 🏆 Neue beste Maske: Nr. {i+1} mit Score {score:.3f}") |
| | |
| | print(f"✅ Beste Maske ausgewählt: Nr. {best_mask_idx+1} mit Score {best_score:.3f}") |
| | |
| | |
| | mask_np = all_masks[best_mask_idx] |
| |
|
| | max_val = mask_np.max() |
| | print(f" 🔍 Maximaler SAM-Konfidenzwert der besten Maske: {max_val:.3f}") |
| |
|
| | if max_val < 0.6: |
| | dynamic_threshold = 0.3 |
| | print(f" ⚠️ SAM ist unsicher (max_val={max_val:.3f} < 0.6)") |
| | else: |
| | dynamic_threshold = max_val * 0.85 |
| | print(f" ✅ SAM ist sicher (max_val={max_val:.3f} >= 0.6)") |
| |
|
| | |
| | mask_array = (mask_np > dynamic_threshold).astype(np.uint8) * 255 |
| |
|
| | |
| | if mask_array.max() == 0: |
| | print(" ⚠️ Maske leer, erstelle rechteckige Fallback-Maske") |
| | mask_array = np.zeros((512, 512), dtype=np.uint8) * 255 |
| | |
| | |
| | scale_x = 512 / image.width |
| | scale_y = 512 / image.height |
| | fb_x1 = int(x1 * scale_x) |
| | fb_y1 = int(y1 * scale_y) |
| | fb_x2 = int(x2 * scale_x) |
| | fb_y2 = int(y2 * scale_y) |
| |
|
| | |
| | cv2.rectangle(mask_array, (fb_x1, fb_y1), (fb_x2, fb_y2), 0, -1) |
| |
|
| | |
| | raw_mask_array = mask_array.copy() |
| |
|
| | |
| | |
| | |
| | print("🌳 ENVIRONMENT-CHANGE POSTPROCESSING") |
| | |
| | |
| | |
| | if image.size != original_image.size: |
| | print(f" ⚠️ Bildgröße angepasst: {image.size} → {original_image.size}") |
| | temp_mask = Image.fromarray(mask_array).convert("L") |
| | temp_mask = temp_mask.resize(original_image.size, Image.Resampling.NEAREST) |
| | mask_array = np.array(temp_mask) |
| | print(f" ✅ Maske auf Originalgröße skaliert: {mask_array.shape}") |
| |
|
| | |
| | working_mask = mask_array.copy() |
| | print(f"working_mask shape: {working_mask.shape}") |
| |
|
| |
|
| | |
| | kernel_dilate = np.ones((5, 5), np.uint8) |
| | working_mask = cv2.dilate(working_mask, kernel_dilate, iterations=1) |
| | print(f" ✅ Dilate (5x5) - Person leicht erweitert") |
| | |
| |
|
| | |
| | |
| | kernel_close_small = np.ones((3, 3), np.uint8) |
| | working_mask = cv2.morphologyEx(working_mask, cv2.MORPH_CLOSE, kernel_close_small, iterations=1) |
| | print(f" ✅ MORPH_CLOSE (3x3) - Feine Löcher im Hintergrund geschlossen") |
| |
|
| | |
| | |
| | |
| | contours, _ = cv2.findContours(working_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
| | |
| | if len(contours) > 0: |
| | |
| | largest_contour = max(contours, key=cv2.contourArea) |
| | |
| | |
| | clean_mask = np.zeros_like(working_mask) |
| | cv2.drawContours(clean_mask, [largest_contour], -1, 255, -1) |
| | |
| | |
| | |
| | temp_inverted = 255 - clean_mask |
| | hole_contours, _ = cv2.findContours(temp_inverted, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) |
| | |
| | for hole in hole_contours: |
| | area = cv2.contourArea(hole) |
| | if area < 100: |
| | cv2.drawContours(clean_mask, [hole], -1, 255, -1) |
| | |
| | working_mask = clean_mask |
| | print(f" ✅ Konturenfilter - Größte Kontur behalten, {len(contours)-1} kleine entfernt") |
| |
|
| |
|
| | |
| | |
| | inpaint_binary_mask = working_mask.copy() |
| | |
| | inpaint_binary_mask = 255 - inpaint_binary_mask |
| | |
| |
|
| | |
| | |
| | working_mask = cv2.GaussianBlur(working_mask, (7, 7), 1.5) |
| | print(f" ✅ Gaussian Blur (7x7 , sigma=1.5) für weiche Kanten") |
| |
|
| | |
| |
|
| | |
| | working_mask_float = working_mask.astype(np.float32) / 255.0 |
| | working_mask_float = np.clip(working_mask_float, 0.0, 1.0) |
| | working_mask_float = working_mask_float ** 0.85 |
| | working_mask = (working_mask_float * 255).astype(np.uint8) |
| | print(f" ✅ Gamma-Korrektur (0.85) gegen milchige Ränder") |
| |
|
| |
|
| |
|
| | |
| | binary_mask = (working_mask > 128).astype(np.uint8) * 255 |
| | final_mask = 255 - binary_mask |
| | print(f" ✅ Finale Invertierung für environment_change") |
| |
|
| |
|
| | |
| | white_pixels = np.sum(final_mask > 127) |
| | black_pixels = np.sum(final_mask <= 127) |
| | total_pixels = final_mask.size |
| | |
| | print(f" 📊 FINALE MASKE:") |
| | print(f" • Weiße Pixel (Hintergrund): {white_pixels:,} ({white_pixels/total_pixels*100:.1f}%)") |
| | print(f" • Schwarze Pixel (Person): {black_pixels:,} ({black_pixels/total_pixels*100:.1f}%)") |
| | |
| | |
| | |
| | mask = Image.fromarray(final_mask).convert("L") |
| | raw_mask = Image.fromarray(raw_mask_array).convert("L") |
| | inpaint_binary_pil = Image.fromarray(inpaint_binary_mask).convert("L") |
| |
|
| | print("#" * 80) |
| | print(f"✅ SAM 2 SEGMENTIERUNG ABGESCHLOSSEN") |
| | print(f"📐 Finale Maskengröße: {mask.size}") |
| | print(f"🎛️ Verwendeter Modus: {mode}") |
| | print("#" * 80) |
| | |
| | return mask, raw_mask, inpaint_binary_pil |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | elif mode == "focus_change": |
| | print("-" * 60) |
| | print("🎯 MODUS: FOCUS_CHANGE (OPTIMIERT)") |
| | print("-" * 60) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | image_np = np.array(image.convert("RGB")) |
| | |
| | |
| | input_boxes = [[[x1, y1, x2, y2]]] |
| | |
| | |
| | center_x = (x1 + x2) // 2 |
| | center_y = (y1 + y2) // 2 |
| | input_points = [[[[center_x, center_y]]]] |
| | input_labels = [[[1]]] |
| | |
| | print(f" 🎯 SAM-Prompt: BBox [{x1},{y1},{x2},{y2}]") |
| | print(f" 👁️ Punkt: Nur Mitte ({center_x},{center_y})") |
| | |
| | |
| | inputs = self.sam_processor( |
| | image_np, |
| | input_boxes=input_boxes, |
| | input_points=input_points, |
| | input_labels=input_labels, |
| | return_tensors="pt" |
| | ).to(self.device) |
| | |
| | |
| | print("🧠 SAM 2 INFERENZ (3 Masken-Varianten)") |
| | with torch.no_grad(): |
| | print(" Führe Vorhersage durch...") |
| | outputs = self.sam_model(**inputs) |
| | print(f"✅ Vorhersage abgeschlossen") |
| | print(f" Anzahl der Vorhersagemasken: {outputs.pred_masks.shape[2]}") |
| |
|
| | num_masks = outputs.pred_masks.shape[2] |
| | |
| |
|
| | |
| | all_masks = [] |
| |
|
| | for i in range(num_masks): |
| | single_mask = outputs.pred_masks[:, :, i, :, :] |
| | |
| | resized_mask = F.interpolate( |
| | single_mask, |
| | size=(image.height, image.width), |
| | mode='bilinear', |
| | align_corners=False |
| | ).squeeze() |
| | |
| | mask_np = resized_mask.sigmoid().cpu().numpy() |
| | all_masks.append(mask_np) |
| | |
| |
|
| | |
| | bbox_center = ((x1 + x2) // 2, (y1 + y2) // 2) |
| | bbox_area = (x2 - x1) * (y2 - y1) |
| | |
| | print("🤔 HEURISTIK: Beste Maske auswählen") |
| | best_mask_idx = 0 |
| | best_score = -1 |
| | |
| | |
| | for i in range(num_masks): |
| | |
| |
|
| | mask_np_temp = all_masks[i] |
| | |
| | |
| | mask_max = mask_np_temp.max() |
| | if mask_max < 0.3: |
| | continue |
| | |
| | adaptive_threshold = max(0.3, mask_max * 0.7) |
| | mask_binary = (mask_np_temp > adaptive_threshold).astype(np.uint8) |
| |
|
| | |
| | if np.sum(mask_binary) == 0: |
| | continue |
| | |
| | |
| | mask_area_pixels = np.sum(mask_binary) |
| | |
| | |
| | bbox_mask = np.zeros((image.height, image.width), dtype=np.uint8) |
| | bbox_mask[y1:y2, x1:x2] = 1 |
| | overlap = np.sum(mask_binary & bbox_mask) |
| | bbox_overlap_ratio = overlap / np.sum(bbox_mask) if np.sum(bbox_mask) > 0 else 0 |
| | |
| | |
| | y_coords, x_coords = np.where(mask_binary > 0) |
| | if len(y_coords) > 0: |
| | centroid_y = np.mean(y_coords) |
| | centroid_x = np.mean(x_coords) |
| | centroid_distance = np.sqrt((centroid_x - bbox_center[0])**2 + |
| | (centroid_y - bbox_center[1])**2) |
| | normalized_distance = centroid_distance / max(image.width, image.height) |
| | else: |
| | normalized_distance = 1.0 |
| | |
| | |
| | area_ratio = mask_area_pixels / bbox_area |
| | area_score = 1.0 - min(abs(area_ratio - 1.0), 1.0) |
| | |
| | |
| | score = ( |
| | bbox_overlap_ratio * 0.4 + |
| | (1.0 - normalized_distance) * 0.25 + |
| | area_score * 0.25 + |
| | mask_max * 0.1 |
| | ) |
| | |
| | print(f" Maske {i+1}: Score={score:.3f}, " |
| | f"Überlappung={bbox_overlap_ratio:.3f}, " |
| | f"Fläche={mask_area_pixels:,}px") |
| | |
| | if score > best_score: |
| | best_score = score |
| | best_mask_idx = i |
| | |
| | print(f"✅ Beste Maske: Nr. {best_mask_idx+1} mit Score {best_score:.3f}") |
| | |
| | best_mask_original = all_masks[best_mask_idx] |
| | |
| | mask_np = best_mask_original |
| | print(f" ✅ Beste Maske in Originalgröße: {image.width}×{image.height}") |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | mask_max = mask_np.max() |
| | if best_score < 0.6: |
| | dynamic_threshold = 0.15 |
| | print(f" ⚠️ Masken-Score niedrig ({best_score:.3f}). " |
| | f"Threshold=0.15 für bessere Präzision") |
| | elif best_score < 0.8: |
| | dynamic_threshold = max(0.25, mask_max * 0.5) |
| | print(f" ℹ️ Mittlere Maskenqualität. Threshold={dynamic_threshold:.3f}") |
| | else: |
| | dynamic_threshold = max(0.35, mask_max * 0.7) |
| | print(f" ✅ Excellente Maske. Threshold={dynamic_threshold:.3f}") |
| | |
| | |
| | mask_array = (mask_np > dynamic_threshold).astype(np.uint8) * 255 |
| |
|
| | |
| | if mask_array.max() == 0: |
| | print(" ⚠️ Maske leer, erstelle rechteckige Fallback-Maske") |
| | mask_array = np.zeros((image.height, image.width), dtype=np.uint8) |
| | |
| | cv2.rectangle(mask_array, (x1, y1), (x2, y2), 255, -1) |
| | |
| |
|
| | |
| | raw_mask_array = mask_array.copy() |
| |
|
| | print(f"🔧 FOCUS_CHANGE POSTPROCESSING (auf {image.width}×{image.height})") |
| |
|
| | |
| | |
| | |
| | print("🔧 FOCUS_CHANGE POSTPROCESSING (Originalgröße)") |
| | print(f" mask_array - Min/Max: {mask_array.min()}/{mask_array.max()}") |
| | print(f" mask_array - Weiße Pixel: {np.sum(mask_array > 0)}") |
| | print(f" mask_array - Shape: {mask_array.shape}") |
| | print(f" mask_array - dtype: {mask_array.dtype}") |
| | |
| | |
| | labeled_array, num_features = ndimage.label(mask_array) |
| | if num_features > 1: |
| | sizes = ndimage.sum(mask_array, labeled_array, range(1, num_features + 1)) |
| | largest_component = np.argmax(sizes) + 1 |
| | mask_array = np.where(labeled_array == largest_component, mask_array, 0) |
| | print(f" ✅ Größte Komponente behalten ({num_features}→1)") |
| | |
| | |
| | kernel_close = np.ones((5, 5), np.uint8) |
| | mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_CLOSE, kernel_close, iterations=2) |
| | |
| | |
| | clean_mask = mask_array.copy() |
| |
|
| | |
| | inpaint_binary_mask = cv2.dilate(clean_mask, np.ones((3,3), np.uint8), 1) |
| |
|
| | print(f"🔳 [FOCUS] Inpainting-Maske gespeichert: {np.unique(inpaint_binary_mask)}") |
| |
|
| |
|
| | |
| | kernel_dilate = np.ones((15, 15), np.uint8) |
| | mask_array = cv2.dilate(mask_array, kernel_dilate, iterations=1) |
| | |
| | |
| | |
| | mask_array = cv2.GaussianBlur(mask_array, (9, 9), 2.0) |
| | |
| | |
| | mask_array_float = mask_array.astype(np.float32) / 255.0 |
| | mask_array_float = np.clip(mask_array_float, 0.0, 1.0) |
| | mask_array_float = mask_array_float ** 0.85 |
| | mask_array = (mask_array_float * 255).astype(np.uint8) |
| | |
| | |
| | mask_original = Image.fromarray(mask_array).convert("L") |
| | raw_mask = Image.fromarray(raw_mask_array).convert("L") |
| | inpaint_binary_pil = Image.fromarray(inpaint_binary_mask).convert("L") |
| | |
| | |
| | mask = mask_original |
| | |
| | print(f"✅ FOCUS_CHANGE Maske erstellt: {mask.size}") |
| | return mask, raw_mask, inpaint_binary_pil |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | elif mode == "face_only_change": |
| | print("-" * 60) |
| | print("👤 SPEZIALMODUS: NUR GESICHT - ROBUSTER WORKFLOW") |
| | print("-" * 60) |
| | |
| | |
| | |
| | |
| | |
| | |
| | original_image = image |
| | print(f"💾 Originalbild gesichert: {original_image.size}") |
| | original_bbox = (x1, y1, x2, y2) |
| | print(f"💾 Original-BBox gespeichert: {original_bbox}") |
| |
|
| |
|
| | |
| | use_crop_strategy = True |
| |
|
| | if original_image.width <= 512 and original_image.height <= 512: |
| | print("Bild ist 512x512 oder kleiner, verwende Originalgröße für SAM") |
| | print(f" Originalgröße: {original_image.width}×{original_image.height}") |
| | |
| | use_crop_strategy = False |
| | else: |
| | print(f"📏 Bild ist größer als 512x512 ({original_image.size}) → Verwende Crop-Strategie") |
| | use_crop_strategy = True |
| | |
| | |
| | if use_crop_strategy: |
| | |
| | |
| | |
| | |
| | print("✂️ SCHRITT 2: ERSTELLE QUADRATISCHEN AUSSCHNITT (BBox × 2.5)") |
| | |
| | |
| | bbox_center_x = (x1 + x2) // 2 |
| | bbox_center_y = (y1 + y2) // 2 |
| | print(f" 📍 BBox-Zentrum: ({bbox_center_x}, {bbox_center_y})") |
| | |
| | |
| | bbox_width = x2 - x1 |
| | bbox_height = y2 - y1 |
| | bbox_max_dim = max(bbox_width, bbox_height) |
| | print(f" 📏 BBox Dimensionen: {bbox_width} × {bbox_height} px") |
| | print(f" 📐 Maximale BBox-Dimension: {bbox_max_dim} px") |
| | |
| | |
| | crop_size = int(bbox_max_dim * 2.5) |
| | print(f" 🎯 Ziel-Crop-Größe: {crop_size} × {crop_size} px (BBox × 2.5)") |
| | |
| | |
| | crop_x1 = bbox_center_x - crop_size // 2 |
| | crop_y1 = bbox_center_y - crop_size // 2 |
| | crop_x2 = crop_x1 + crop_size |
| | crop_y2 = crop_y1 + crop_size |
| | |
| | |
| | crop_x1 = max(0, crop_x1) |
| | crop_y1 = max(0, crop_y1) |
| | crop_x2 = min(original_image.width, crop_x2) |
| | crop_y2 = min(original_image.height, crop_y2) |
| |
|
| |
|
| | |
| | max_iterations = 3 |
| | print(f" 🔄 Iterative Crop-Anpassung (max. {max_iterations} Versuche)") |
| |
|
| | for iteration in range(max_iterations): |
| | actual_crop_width = crop_x2 - crop_x1 |
| | actual_crop_height = crop_y2 - crop_y1 |
| | |
| | |
| | if actual_crop_width >= crop_size and actual_crop_height >= crop_size: |
| | print(f" ✅ Crop-Größe OK nach {iteration} Iteration(en): {actual_crop_width}×{actual_crop_height} px") |
| | break |
| | |
| | print(f" 🔄 Iteration {iteration+1}: Crop zu klein ({actual_crop_width}×{actual_crop_height})") |
| | |
| | |
| | if actual_crop_width < crop_size: |
| | if crop_x1 == 0: |
| | crop_x2 = min(original_image.width, crop_x1 + crop_size) |
| | print(f" ← Breite angepasst (linker Rand): crop_x2 = {crop_x2}") |
| | elif crop_x2 == original_image.width: |
| | crop_x1 = max(0, crop_x2 - crop_size) |
| | print(f" → Breite angepasst (rechter Rand): crop_x1 = {crop_x1}") |
| | else: |
| | |
| | missing_width = crop_size - actual_crop_width |
| | expand_left = missing_width // 2 |
| | expand_right = missing_width - expand_left |
| | |
| | crop_x1 = max(0, crop_x1 - expand_left) |
| | crop_x2 = min(original_image.width, crop_x2 + expand_right) |
| | print(f" ↔ Zentriert erweitert um {missing_width}px") |
| | |
| | |
| | if actual_crop_height < crop_size: |
| | if crop_y1 == 0: |
| | crop_y2 = min(original_image.height, crop_y1 + crop_size) |
| | print(f" ↑ Höhe angepasst (oberer Rand): crop_y2 = {crop_y2}") |
| | elif crop_y2 == original_image.height: |
| | crop_y1 = max(0, crop_y2 - crop_size) |
| | print(f" ↓ Höhe angepasst (unterer Rand): crop_y1 = {crop_y1}") |
| | else: |
| | |
| | missing_height = crop_size - actual_crop_height |
| | expand_top = missing_height // 2 |
| | expand_bottom = missing_height - expand_top |
| | |
| | crop_y1 = max(0, crop_y1 - expand_top) |
| | crop_y2 = min(original_image.height, crop_y2 + expand_bottom) |
| | print(f" ↕ Zentriert erweitert um {missing_height}px") |
| | |
| | |
| | crop_x1 = max(0, crop_x1) |
| | crop_y1 = max(0, crop_y1) |
| | crop_x2 = min(original_image.width, crop_x2) |
| | crop_y2 = min(original_image.height, crop_y2) |
| | |
| | |
| | if iteration == max_iterations - 1: |
| | actual_crop_width = crop_x2 - crop_x1 |
| | actual_crop_height = crop_y2 - crop_y1 |
| | print(f" ⚠️ Max. Iterationen erreicht. Finaler Crop: {actual_crop_width}×{actual_crop_height} px") |
| | |
| | |
| | if actual_crop_width < crop_size or actual_crop_height < crop_size: |
| | min_acceptable = int(bbox_max_dim * 1.8) |
| | if actual_crop_width < min_acceptable or actual_crop_height < min_acceptable: |
| | print(f" 🚨 KRITISCH: Crop immer noch zu klein ({actual_crop_width}×{actual_crop_height})") |
| | print(f" 🚨 SAM könnte Probleme haben!") |
| |
|
| | print(f" 🔲 Finaler Crop-Bereich: [{crop_x1}, {crop_y1}, {crop_x2}, {crop_y2}]") |
| | print(f" 📏 Finale Crop-Größe: {crop_x2-crop_x1} × {crop_y2-crop_y1} px") |
| |
|
| | |
| | |
| | cropped_image = original_image.crop((crop_x1, crop_y1, crop_x2, crop_y2)) |
| | print(f" ✅ Quadratischer Ausschnitt erstellt: {cropped_image.size}") |
| | |
| | |
| | |
| | |
| | print("📐 SCHRITT 3: BBox-KOORDINATEN TRANSFORMIEREN") |
| | rel_x1 = x1 - crop_x1 |
| | rel_y1 = y1 - crop_y1 |
| | rel_x2 = x2 - crop_x1 |
| | rel_y2 = y2 - crop_y1 |
| | |
| | |
| | rel_x1 = max(0, rel_x1) |
| | rel_y1 = max(0, rel_y1) |
| | rel_x2 = min(cropped_image.width, rel_x2) |
| | rel_y2 = min(cropped_image.height, rel_y2) |
| | |
| | print(f" 🎯 Relative BBox im Crop: [{rel_x1}, {rel_y1}, {rel_x2}, {rel_y2}]") |
| | print(f" 📏 Relative BBox Größe: {rel_x2-rel_x1} × {rel_y2-rel_y1} px") |
| | |
| | |
| | |
| | |
| | print("🔍 SCHRITT 4: ERWEITERTE BILDAUFBEREITUNG FÜR GESICHTSERKENNUNG") |
| | |
| | |
| | contrast_enhancer = ImageEnhance.Contrast(cropped_image) |
| | enhanced_image = contrast_enhancer.enhance(1.3) |
| | |
| | |
| | sharpness_enhancer = ImageEnhance.Sharpness(enhanced_image) |
| | enhanced_image = sharpness_enhancer.enhance (1.4) |
| | |
| | |
| | brightness_enhancer = ImageEnhance.Brightness(enhanced_image) |
| | enhanced_image = brightness_enhancer.enhance(1.05) |
| | |
| | print(f" ✅ Erweiterte Bildaufbereitung abgeschlossen") |
| | print(f" • Kontrast: +30%") |
| | print(f" • Schärfe: +40%") |
| | print(f" • Helligkeit: +5%") |
| | |
| | |
| | image = enhanced_image |
| | x1, y1, x2, y2 = rel_x1, rel_y1, rel_x2, rel_y2 |
| | |
| | print(" 🔄 SAM wird auf aufbereitetem Ausschnitt ausgeführt") |
| | print(f" 📊 SAM-Eingabegröße: {image.size}") |
| |
|
| |
|
| | else: |
| | print("🎯 SCHRITT 2: VERWENDE ORIGINALBILD UND ORIGINAL-BBOX FÜR SAM") |
| | print(f" Originalbild-Größe: {original_image.size}") |
| | print(f" Original-BBox: [{x1}, {y1}, {x2}, {y2}]") |
| | |
| | |
| | image = original_image |
| | |
| |
|
| | |
| | |
| | |
| |
|
| | if use_crop_strategy: |
| | |
| | bbox_x1, bbox_y1, bbox_x2, bbox_y2 = rel_x1, rel_y1, rel_x2, rel_y2 |
| | |
| | print(f" 🎯 Heuristik-BBox im Crop: [{bbox_x1}, {bbox_y1}, {bbox_x2}, {bbox_y2}]") |
| | |
| | else: |
| | |
| | bbox_x1, bbox_y1, bbox_x2, bbox_y2 = original_bbox |
| |
|
| | |
| | |
| | |
| | bbox_width = bbox_x2 - bbox_x1 |
| | bbox_height = bbox_y2 - bbox_y1 |
| | bbox_area = bbox_width * bbox_height |
| | bbox_center = ((bbox_x1 + bbox_x2) // 2, (bbox_y1 + bbox_y2) // 2) |
| | |
| | print(f"📏 HEURISTIK-BBOX: [{bbox_x1}, {bbox_y1}, {bbox_x2}, {bbox_y2}]") |
| | print(f"📐 HEURISTIK-BBOX-Größe: {bbox_width}×{bbox_height}px = {bbox_area:,}px²") |
| | print(f"📍 HEURISTIK-BBOX-Zentrum: {bbox_center}") |
| | print(f" ✅ SAM verwendet Original-BBox-Koordinaten") |
| | |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | print("-" * 60) |
| | print(f"📦 BOUNDING BOX DETAILS FÜR SAM:") |
| | |
| | print("-" * 60) |
| | print("🖼️ BILDAUFBEREITUNG FÜR SAM 2") |
| | |
| | image_np = np.array(image.convert("RGB")) |
| | |
| | |
| | input_boxes = [[[x1, y1, x2, y2]]] |
| |
|
| | |
| | center_x = (x1 + x2) // 2 |
| | center_y = (y1 + y2) // 2 |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | if is_back_head: |
| | |
| | back_offset = int(bbox_height * 0.4) |
| | back_x = center_x |
| | back_y = center_y + back_offset |
| | back_y = min(back_y, y2 - 10) |
| | |
| | input_points = [[[[center_x,center_y], [back_x, back_y]]]] |
| | input_labels = [[[1, 0]]] |
| | |
| | print(f" 🎯 HINTERKOPF-SAM: Positiv ({center_x},{center_y}), Negativ ({back_x},{back_y})") |
| | else: |
| | |
| | input_points = [[[[center_x, center_y]]]] |
| | input_labels = [[[1]]] |
| | |
| | print(f" 🎯 GESICHTS-SAM: Punkt ({center_x},{center_y})") |
| | |
| | |
| | |
| | |
| | inputs = self.sam_processor( |
| | image_np, |
| | input_boxes=input_boxes, |
| | input_points=input_points, |
| | input_labels=input_labels, |
| | return_tensors="pt" |
| | ).to(self.device) |
| |
|
| | print(f"✅ Processor-Ausgabe: Dictionary mit {len(inputs)} Schlüsseln: {list(inputs.keys())}") |
| | print(f" - 'pixel_values' Shape: {inputs['pixel_values'].shape}") |
| | print(f" - 'input_boxes' Shape: {inputs['input_boxes'].shape}") |
| | if 'input_points' in inputs: |
| | print(f" - 'input_points' Shape: {inputs['input_points'].shape}") |
| |
|
| | |
| | print("-" * 60) |
| | print("🧠 SAM 2 INFERENZ (Vorhersage)") |
| | with torch.no_grad(): |
| | print(" Führe Vorhersage durch...") |
| | outputs = self.sam_model(**inputs) |
| | print(f"✅ Vorhersage abgeschlossen") |
| | print(f" Anzahl der Vorhersagemasken: {outputs.pred_masks.shape[2]}") |
| | |
| | |
| | print("📏 SCHRITT 6: MASKE EXTRAHIEREN") |
| | |
| | num_masks = outputs.pred_masks.shape[2] |
| | print(f" SAM lieferte {num_masks} verschiedene Masken") |
| | |
| | |
| | all_masks_crop = [] |
| |
|
| | |
| | for i in range(num_masks): |
| | single_mask = outputs.pred_masks[:, :, i, :, :] |
| | |
| | resized_mask_crop = F.interpolate( |
| | single_mask, |
| | size=(image.height, image.width), |
| | mode='bilinear', |
| | align_corners=False |
| | ).squeeze() |
| | |
| | mask_np = resized_mask_crop.sigmoid().cpu().numpy() |
| | all_masks_crop.append(mask_np) |
| |
|
| |
|
| | |
| | mask_binary = (mask_np > 0.5).astype(np.uint8) |
| | print(f" Maske {i+1}: {np.sum(mask_binary):,}px (Crop-Größe)") |
| | |
| | |
| | |
| | |
| | |
| | |
| | print("🤔 HEURISTIK BERECHNEN") |
| |
|
| | |
| | |
| | |
| | |
| | best_mask_idx = 0 |
| | best_score = -1 |
| | |
| | for i, mask_np in enumerate(all_masks_crop): |
| | mask_max = mask_np.max() |
| | |
| | |
| | if mask_max < 0.3: |
| | print(f" ❌ Maske {i+1}: Zu niedrige Konfidenz ({mask_max:.3f}), überspringe") |
| | continue |
| | |
| | |
| | adaptive_threshold = max(0.3, mask_max * 0.7) |
| | mask_binary = (mask_np > adaptive_threshold).astype(np.uint8) |
| | |
| | if np.sum(mask_binary) == 0: |
| | print(f" ❌ Maske {i+1}: Keine Pixel nach Threshold {adaptive_threshold:.3f}") |
| | continue |
| | |
| | |
| | mask_area_pixels = np.sum(mask_binary) |
| |
|
| | |
| | |
| | |
| |
|
| | print(f" 🔍 Analysiere Maske {i+1} auf {'Crop-' if use_crop_strategy else 'Original-'}Größe") |
| | |
| | |
| | area_ratio = mask_area_pixels / bbox_area if bbox_area > 0 else 0 |
| | print(f" 📐 Flächen-Ratio: {area_ratio:.3f} ({mask_area_pixels:,} / {bbox_area:,} Pixel)") |
| |
|
| | |
| | optimal_min, optimal_max = 0.8, 1.2 |
| | if optimal_min <= area_ratio <= optimal_max: |
| | area_score = 1.0 |
| | elif area_ratio < optimal_min: |
| | area_score = area_ratio / optimal_min |
| | else: |
| | area_score = optimal_max / area_ratio |
| |
|
| |
|
| | |
| | bbox_mask = np.zeros((image.height, image.width), dtype=np.uint8) |
| | bbox_mask[bbox_y1:bbox_y2, bbox_x1:bbox_x2] = 1 |
| | overlap = np.sum(mask_binary & bbox_mask) |
| | bbox_overlap_ratio = overlap / bbox_area if bbox_area > 0 else 0 |
| | |
| | |
| | |
| | |
| | labeled_mask = measure.label(mask_binary) |
| | regions = measure.regionprops(labeled_mask) |
| | |
| | if len(regions) == 0: |
| | compactness_score = 0.1 |
| | print(f" ❌ Keine zusammenhängenden Regionen gefunden") |
| | else: |
| | |
| | largest_region = max(regions, key=lambda r: r.area) |
| | |
| | |
| | solidity = largest_region.solidity if hasattr(largest_region, 'solidity') else 0.7 |
| | |
| | |
| | eccentricity = largest_region.eccentricity if hasattr(largest_region, 'eccentricity') else 0.5 |
| | |
| | |
| | |
| | if 0.4 <= eccentricity <= 0.9: |
| | eccentricity_score = 1.0 - abs(eccentricity - 0.65) * 2 |
| | else: |
| | eccentricity_score = 0.2 |
| | |
| | compactness_score = (solidity * 0.6 + eccentricity_score * 0.4) |
| | print(f" 🎯 Kompaktheits-Analyse:") |
| | print(f" • Solidität (Fläche/Konvex): {solidity:.3f}") |
| | print(f" • Exzentrizität (Form): {eccentricity:.3f}") |
| | print(f" • Kompaktheits-Score: {compactness_score:.3f}") |
| |
|
| | |
| | |
| | confidence_score = mask_max |
| | |
| | score = ( |
| | bbox_overlap_ratio * 0.65 + |
| | compactness_score * 0.20 + |
| | area_score * 0.10 + |
| | confidence_score * 0.05 |
| | ) |
| | |
| | print(f" 📊 GESICHTS-SCORES für Maske {i+1}:") |
| | print(f" • BBox-Überlappung: {bbox_overlap_ratio:.1%}") |
| | print(f" • Flächen-Score: {area_score:.3f}") |
| | print(f" • Kompaktheits-Score: {compactness_score:.3f}") |
| | print(f" • Konfidenz-Score: {confidence_score:.3f}") |
| | print(f" • GESAMTSCORE: {score:.3f}") |
| |
|
| | if score > best_score: |
| | best_score = score |
| | best_mask_idx = i |
| | print(f" 🏆 Neue beste Maske: Nr. {i+1} mit Score {score:.3f}") |
| |
|
| | print(f"✅ Beste Maske ausgewählt: Nr. {best_mask_idx+1} mit Score {best_score:.3f}") |
| |
|
| | |
| | mask_np = all_masks_crop[best_mask_idx] |
| | max_val = mask_np.max() |
| | print(f"🔍 Maximaler SAM-Konfidenzwert der besten Maske: {max_val:.3f}") |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | if max_val < 0.5: |
| | dynamic_threshold = 0.15 |
| | print(f" ⚠️ SAM ist unsicher für Gesicht (max_val={max_val:.3f} < 0.5)") |
| | elif max_val < 0.8: |
| | dynamic_threshold = max_val * 0.45 |
| | print(f" ℹ️ SAM ist mäßig sicher für Gesicht (max_val={max_val:.3f})") |
| | else: |
| | |
| | dynamic_threshold = max(0.2, max_val * 0.4) |
| | print(f" ✅ SAM ist sicher für Gesicht (max_val={max_val:.3f} >= 0.8)") |
| | |
| | print(f" 🎯 Gesichts-Threshold: {dynamic_threshold:.3f}") |
| |
|
| | |
| | print("🐛 DEBUG THRESHOLD:") |
| | print(f" mask_np Min/Max: {mask_np.min():.3f}/{mask_np.max():.3f}") |
| | print(f" dynamic_threshold: {dynamic_threshold:.3f}") |
| |
|
| | mask_array = (mask_np > dynamic_threshold).astype(np.uint8) * 255 |
| |
|
| | print(f"🚨 DEBUG BINÄRMASKE:") |
| | print(f" mask_array Min/Max: {mask_array.min()}/{mask_array.max()}") |
| | print(f" Weiße Pixel in mask_array: {np.sum(mask_array > 0)}") |
| | print(f" Anteil weiße Pixel: {np.sum(mask_array > 0) / mask_array.size:.1%}") |
| |
|
| | |
| | if mask_array.max() == 0: |
| | print("⚠️ KRITISCH: Binärmaske ist leer! Erzwinge Testmaske (BBox).") |
| | print(f" 🚨 BBox für Fallback: x1={x1}, y1={y1}, x2={x2}, y2={y2}") |
| | |
| | test_mask = np.zeros((image.height, image.width), dtype=np.uint8) |
| | cv2.rectangle(test_mask, (x1, y1), (x2, y2), 255, -1) |
| | |
| | mask_array = test_mask |
| | print(f"🐛 DEBUG ERZWUNGENE MASKE: Weiße Pixel: {np.sum(mask_array > 0)}") |
| |
|
| | |
| | raw_mask_array = mask_array.copy() |
| |
|
| | |
| | |
| | |
| |
|
| | labeled_array, num_features = ndimage.label(mask_array) |
| | |
| | if num_features > 0: |
| | |
| | regions = measure.regionprops(labeled_array) |
| | valid_regions = [] |
| | |
| | for region in regions: |
| | centroid_y, centroid_x = region.centroid |
| | bbox_distance = np.sqrt((centroid_x - bbox_center[0])**2 + |
| | (centroid_y - bbox_center[1])**2) |
| | |
| | |
| | max_distance = np.sqrt(bbox_width**2 + bbox_height**2) * 0.5 |
| | |
| | if bbox_distance <= max_distance: |
| | valid_regions.append(region) |
| | |
| | if valid_regions: |
| | |
| | largest_region = max(valid_regions, key=lambda r: r.area) |
| | mask_array = np.where(labeled_array == largest_region.label, mask_array, 0) |
| | print(f" ✅ {len(valid_regions)}/{num_features} Regionen in/nah BBox, größte behalten") |
| | else: |
| | print(f" ⚠️ Keine Region in/nah BBox gefunden, alle behalten") |
| | |
| | |
| | inpaint_binary_mask = mask_array.copy() |
| | |
| | if use_crop_strategy: |
| | print("👤 POSTPROCESSING AUF CROP-GRÖSSE") |
| | |
| | |
| | |
| | print(" ⚙️ Morphologische Operationen für sauberen Kopf") |
| | |
| | |
| | |
| | kernel_close = np.ones((7, 7), np.uint8) |
| | mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_CLOSE, kernel_close, iterations=1) |
| | print(" • MORPH_CLOSE (7x7) - Löcher im Kopf füllen") |
| | |
| | |
| | kernel_open = np.ones((5, 5), np.uint8) |
| | mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_OPEN, kernel_open, iterations=1) |
| | print(" • MORPH_OPEN (5x5) - Rauschen entfernen") |
| |
|
| | |
| | |
| | print(" 🔲 Leichter Dilate für natürliche Abdeckung") |
| | kernel_dilate = np.ones((5, 5), np.uint8) |
| | mask_array = cv2.dilate(mask_array, kernel_dilate, iterations=1) |
| |
|
| |
|
| | |
| | inpaint_binary_mask = mask_array.copy() |
| | print(f"🔳 [FACE] Inpainting-Maske gespeichert: {np.unique(inpaint_binary_mask)}") |
| | |
| | |
| | |
| | |
| | print(" 🔷 Gaussian Blur für weiche Hautübergänge (3x3, sigma=0.5)") |
| | mask_array = cv2.GaussianBlur(mask_array, (3, 3), 0.5) |
| |
|
| | |
| | mask_array = np.where(mask_array > 128, 255, 0).astype(np.uint8) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | |
| | print(" 📏 Prüfe Maskendichte...") |
| | white_pixels = np.sum(mask_array > 128) |
| | bbox_area = (x2 - x1) * (y2 - y1) |
| | coverage_ratio = white_pixels / bbox_area if bbox_area > 0 else 0 |
| |
|
| | print(f" 📊 Aktuelle Abdeckung: {white_pixels:,}px / {bbox_area:,}px = {coverage_ratio:.1%}") |
| |
|
| | |
| | if coverage_ratio < 0.9: |
| | print(f" ⚠️ Maske zu dünn für Gesicht (<90%)") |
| | print(f" 📈 Zusätzlicher Dilate...") |
| | kernel_extra = np.ones((3, 3), np.uint8) |
| | mask_array = cv2.dilate(mask_array, kernel_extra, iterations=1) |
| | |
| | |
| | |
| |
|
| | |
| | print(f"✅ Finale Maske Werte: {np.unique(mask_array)}") |
| |
|
| | |
| | |
| | |
| | print("🔄 MASKE AUF ORIGINALGRÖSSE TRANSFORMIEREN") |
| | |
| | |
| | mask_crop_pil = Image.fromarray(mask_array).convert("L") |
| | |
| | |
| | mask_original = Image.new("L", original_image.size, 0) |
| | |
| | |
| | |
| | mask_original.paste(mask_crop_pil, (crop_x1, crop_y1)) |
| | |
| | |
| | raw_mask_crop_pil = Image.fromarray(raw_mask_array).convert("L") |
| | raw_mask_original = Image.new("L", original_image.size, 0) |
| | raw_mask_original.paste(raw_mask_crop_pil, (crop_x1, crop_y1)) |
| |
|
| | |
| | |
| | inpaint_binary_crop_pil = Image.fromarray(inpaint_binary_mask).convert("L") |
| | |
| | inpaint_binary_original = Image.new("L", original_image.size, 0) |
| | |
| | inpaint_binary_original.paste(inpaint_binary_crop_pil, (crop_x1, crop_y1)) |
| |
|
| | else: |
| | print("👤 POSTPROCESSING AUF ORIGINALGRÖSSE (≤512px)") |
| | |
| | |
| | kernel_close = np.ones((5, 5), np.uint8) |
| | mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_CLOSE, kernel_close, iterations=1) |
| | print(" • MORPH_CLOSE (5x5) - Löcher im Kopf füllen") |
| | |
| | kernel_open = np.ones((3, 3), np.uint8) |
| | mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_OPEN, kernel_open, iterations=1) |
| | print(" • MORPH_OPEN (3x3) - Rauschen entfernen") |
| |
|
| | |
| | kernel_dilate = np.ones((9, 9), np.uint8) |
| | mask_array = cv2.dilate(mask_array, kernel_dilate, iterations=1) |
| | print(" • DILATE (3x3) - Natürliche Abdeckung") |
| |
|
| | |
| | inpaint_binary_mask = mask_array.copy() |
| | |
| | |
| | mask_array = cv2.GaussianBlur(mask_array, (11, 11), 2.0) |
| | print(" • GAUSSIAN BLUR (11x11, sigma=2.0) - Weiche Übergänge") |
| |
|
| | |
| | mask_array_float = mask_array.astype(np.float32) / 255.0 |
| | mask_array_float = np.clip(mask_array_float, 0.0, 1.0) |
| | mask_array_float = mask_array_float ** 0.8 |
| | mask_array = (mask_array_float * 255).astype(np.uint8) |
| | print(" • GAMMA (0.8) - Glatte Übergänge") |
| |
|
| | |
| | mask_array = cv2.GaussianBlur(mask_array, (7, 7), 1.0) |
| | print(" • FINALER BLUR (7x7, sigma=1.0)") |
| |
|
| | |
| | white_pixels = np.sum(mask_array > 128) |
| | coverage_ratio = white_pixels / bbox_area if bbox_area > 0 else 0 |
| | print(f" 📊 Aktuelle Abdeckung: {white_pixels:,}px / {bbox_area:,}px = {coverage_ratio:.1%}") |
| |
|
| | if coverage_ratio < 0.9: |
| | print(f" ⚠️ Maske zu dünn für Gesicht (<90%)") |
| | kernel_extra = np.ones((5, 5), np.uint8) |
| | mask_array = cv2.dilate(mask_array, kernel_extra, iterations=1) |
| | mask_array = cv2.GaussianBlur(mask_array, (7, 7), 1.5) |
| |
|
| | |
| | mask_original = Image.fromarray(mask_array).convert("L") |
| | raw_mask_original = Image.fromarray(raw_mask_array).convert("L") |
| | inpaint_binary_original = Image.fromarray(inpaint_binary_mask).convert("L") |
| | |
| | |
| | |
| | |
| | |
| | print("📊 FINALE MASKEN-STATISTIK FÜR FEHLERANALYSE") |
| |
|
| | |
| | final_white = np.sum(mask_array > 128) |
| | final_coverage = final_white / bbox_area if bbox_area > 0 else 0 |
| | final_array = np.array(mask_original) |
| | white_pixels = np.sum(final_array > 0) |
| | total_pixels = final_array.size |
| | white_ratio = white_pixels / total_pixels * 100 if total_pixels > 0 else 0 |
| |
|
| | original_bbox_width = original_bbox[2] - original_bbox[0] |
| | original_bbox_height = original_bbox[3] - original_bbox[1] |
| | original_face_area = original_bbox_width * original_bbox_height |
| | coverage_ratio = white_pixels / original_face_area if original_face_area > 0 else 0 |
| |
|
| | |
| | if not use_crop_strategy: |
| | crop_size = "N/A" |
| | crop_x1 = crop_y1 = crop_x2 = crop_y2 = "N/A" |
| |
|
| | |
| | print(f" 🖼️ Bildgröße: {original_image.size[0]}×{original_image.size[1]} | " |
| | f"Crop-Strategie: {'JA' if use_crop_strategy else 'NEIN'}") |
| | print(f" 📦 Original-BBox: {original_bbox} | " |
| | f"Fläche: {original_face_area:,} px² ({original_bbox_width}×{original_bbox_height})") |
| | print(f" 🎯 SAM-Ergebnis: Beste Maske #{best_mask_idx+1} | Score: {best_score:.3f} | " |
| | f"Konfidenz: {max_val:.3f} | Verfügbare Masken: {num_masks}") |
| | print(f" 👤 Gesichtsabdeckung: {coverage_ratio:.1%} der BBox " |
| | f"({'OPTIMAL' if 0.8 <= coverage_ratio <= 1.2 else 'UNTER' if coverage_ratio < 0.7 else 'ÜBER'})") |
| | print(f" 📐 Maskenpixel: {white_pixels:,} weiß ({white_ratio:.1f}%) | " |
| | f"{total_pixels-white_pixels:,} schwarz ({100-white_ratio:.1f}%)") |
| | print(f" ⚙️ Postprocessing: {num_features} Komponenten → 1 behalten | " |
| | f"Operationen: MORPH_CLOSE, MORPH_OPEN, DILATE, GAUSSIAN_BLUR, GAMMA") |
| | if use_crop_strategy: |
| | print(f" 🔄 Crop-Größe: {crop_size}×{crop_size}px | " |
| | f"Position: [{crop_x1},{crop_y1},{crop_x2},{crop_y2}]") |
| | print(f" 📏 Crop-interne Abdeckung: {final_coverage:.1%} (technisch)") |
| | else: |
| | print(f" 🔄 Direktverarbeitung auf Originalgröße") |
| | print(f" 🎛️ Modus: {mode} | Threshold: {dynamic_threshold:.3f}") |
| |
|
| | |
| | if coverage_ratio < 0.7: |
| | print(f" ⚠️ WARNUNG: Geringe Gesichtsabdeckung ({coverage_ratio:.1%})") |
| | elif coverage_ratio > 1.3: |
| | print(f" ⚠️ WARNUNG: Sehr hohe Gesichtsabdeckung ({coverage_ratio:.1%})") |
| | elif 0.8 <= coverage_ratio <= 1.2: |
| | print(f" ✅ OPTIMALE Gesichtsabdeckung ({coverage_ratio:.1%})") |
| |
|
| | print("#" * 80) |
| | print(f"✅ SAM 2 SEGMENTIERUNG ABGESCHLOSSEN") |
| | print(f"📐 Finale Maskengröße: {mask_original.size}") |
| | print(f"🎛️ Verwendeter Modus: {mode}") |
| |
|
| | if use_crop_strategy: |
| | print(f"👤 Crop={crop_size}×{crop_size}px, Heuristik-Score={best_score:.3f}") |
| | else: |
| | print(f"👤 Direktverarbeitung, Heuristik-Score={best_score:.3f}") |
| | |
| | print(f"👤 Kopfabdeckung: {coverage_ratio:.1%} der BBox") |
| | print("#" * 80) |
| |
|
| | |
| | return mask_original, raw_mask_original, inpaint_binary_original |
| |
|
| | |
| | |
| | |
| | |
| | |
| | else: |
| | print(f"❌ Unbekannter Modus: {mode}") |
| | return self._create_rectangular_mask(image, bbox_coords, "focus_change") |
| | |
| | except Exception as e: |
| | print("❌" * 40) |
| | print("❌ FEHLER IN SAM 2 SEGMENTIERUNG") |
| | print(f"Fehler: {str(e)[:200]}") |
| | print("❌" * 40) |
| | import traceback |
| | traceback.print_exc() |
| | |
| | |
| | fallback_mask = self._create_rectangular_mask(original_image, original_bbox, mode) |
| | if fallback_mask.size != original_image.size: |
| | print(f" ⚠️ Fallback-Maske angepasst: {fallback_mask.size} → {original_image.size}") |
| | fallback_mask = fallback_mask.resize(original_image.size, Image.Resampling.NEAREST) |
| | |
| | return fallback_mask, fallback_mask |
| |
|
| |
|
| | def _create_rectangular_mask(self, image, bbox_coords, mode): |
| | """Fallback: Erstellt rechteckige Maske""" |
| | print("#" * 80) |
| | print("# ⚠️ FALLBACK: ERSTELLE RECHTECKIGE MASKE") |
| | print("#" * 80) |
| | |
| | from PIL import ImageDraw |
| |
|
| | mask = Image.new("L", image.size, 0) |
| | print(f"📐 Erstelle leere Maske: {mask.size}") |
| |
|
| | if bbox_coords and all(coord is not None for coord in bbox_coords): |
| | x1, y1, x2, y2 = self._validate_bbox(image, bbox_coords) |
| | draw = ImageDraw.Draw(mask) |
| |
|
| | if mode == "environment_change": |
| | draw.rectangle([0, 0, image.size[0], image.size[1]], fill=255) |
| | draw.rectangle([x1, y1, x2, y2], fill=0) |
| | print(f" Modus: Umgebung ändern - BBox geschützt: [{x1}, {y1}, {x2}, {y2}]") |
| | else: |
| | draw.rectangle([x1, y1, x2, y2], fill=255) |
| | print(f" Modus: Focus/Gesicht ändern - BBox verändert: [{x1}, {y1}, {x2}, {y2}]") |
| |
|
| | print("✅ Rechteckige Maske erstellt") |
| | return mask |
| |
|
| | def load_pose_detector(self): |
| | """Lädt nur den Pose-Detector""" |
| | if self.pose_detector is None: |
| | print("#" * 80) |
| | print("# 📥 LADE POSE DETECTOR") |
| | print("#" * 80) |
| | try: |
| | self.pose_detector = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") |
| | print("✅ Pose-Detector geladen") |
| | except Exception as e: |
| | print(f"⚠️ Pose-Detector konnte nicht geladen werden: {e}") |
| | return self.pose_detector |
| |
|
| | def load_midas_model(self): |
| | """Lädt MiDaS Model für Depth Maps""" |
| | if self.midas_model is None: |
| | print("#" * 80) |
| | print("# 📥 LADE MIDAS MODELL FÜR DEPTH MAPS") |
| | print("#" * 80) |
| | try: |
| | import torchvision.transforms as T |
| |
|
| | self.midas_model = torch.hub.load( |
| | "intel-isl/MiDaS", |
| | "DPT_Hybrid", |
| | trust_repo=True |
| | ) |
| |
|
| | self.midas_model.to(self.device) |
| | self.midas_model.eval() |
| |
|
| | self.midas_transform = T.Compose([ |
| | T.Resize(384), |
| | T.ToTensor(), |
| | T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), |
| | ]) |
| |
|
| | print("✅ MiDaS Modell erfolgreich geladen") |
| | except Exception as e: |
| | print(f"❌ MiDaS konnte nicht geladen werden: {e}") |
| | print("ℹ️ Verwende Fallback-Methode") |
| | self.midas_model = None |
| |
|
| | return self.midas_model |
| |
|
| | def extract_pose_simple(self, image): |
| | """Einfache Pose-Extraktion ohne komplexe Abhängigkeiten""" |
| | print("#" * 80) |
| | print("# ⚠️ ERSTELLE EINFACHE POSE-MAP (FALLBACK)") |
| | print("#" * 80) |
| | try: |
| | img_array = np.array(image.convert("RGB")) |
| | edges = cv2.Canny(img_array, 100, 200) |
| | pose_image = Image.fromarray(edges).convert("RGB") |
| | print("⚠️ Verwende Kanten-basierte Pose-Approximation") |
| | return pose_image |
| | except Exception as e: |
| | print(f"Fehler bei einfacher Pose-Extraktion: {e}") |
| | return image.convert("RGB").resize((512, 512)) |
| |
|
| | def extract_pose(self, image): |
| | """Extrahiert Pose-Map aus Bild mit Fallback""" |
| | print("#" * 80) |
| | print("# 🕺 ERSTELLE POSE-MAP") |
| | print("#" * 80) |
| | try: |
| | detector = self.load_pose_detector() |
| | if detector is None: |
| | print("⚠️ Kein Pose-Detector verfügbar, verwende Fallback") |
| | return self.extract_pose_simple(image) |
| |
|
| | print(" Extrahiere Pose mit OpenPose und allen Gelenkpunkten") |
| | |
| | pose_image = detector( |
| | image, |
| | include_body=True, |
| | include_hand=True, |
| | include_face=True, |
| | hand_and_face=True, |
| | include_foot=True, |
| | detect_resolution=896, |
| | image_resolution=512, |
| | return_pil=True |
| | ) |
| | print("✅ Detaillierte Pose-Map erstellt") |
| | print(f" 🔥 137 Gelenkpunkte (statt nur 25)") |
| | print(f" 🔥 Enthält: Körper (25) + Hände (42) + Gesicht (70)") |
| | print(f" 🔥 Detektionsauflösung: 768px für mehr Details") |
| |
|
| | return pose_image |
| | except Exception as e: |
| | print(f"Fehler bei Pose-Extraktion: {e}") |
| | return self.extract_pose_simple(image) |
| |
|
| | def extract_canny_edges(self, image): |
| | """Extrahiert Canny Edges für Umgebungserhaltung""" |
| | print("#" * 80) |
| | print("# 🎨 ERSTELLE CANNY EDGE MAP") |
| | print("#" * 80) |
| | try: |
| | img_array = np.array(image.convert("RGB")) |
| |
|
| | gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY) |
| | edges = cv2.Canny(gray, 100, 200) |
| |
|
| | edges_rgb = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) |
| | edges_image = Image.fromarray(edges_rgb) |
| |
|
| | print("✅ Canny Edge Map erstellt") |
| | return edges_image |
| | except Exception as e: |
| | print(f"Fehler bei Canny Edge Extraction: {e}") |
| | return image.convert("RGB").resize((512, 512)) |
| |
|
| | def extract_depth_map(self, image): |
| | """ |
| | Extrahiert Depth Map mit MiDaS (Fallback auf Filter) |
| | """ |
| | print("#" * 80) |
| | print("# 🏔️ ERSTELLE DEPTH MAP") |
| | print("#" * 80) |
| | try: |
| | midas = self.load_midas_model() |
| | if midas is not None: |
| | print("🎯 Verwende MiDaS für Depth Map...") |
| |
|
| | import torchvision.transforms as T |
| |
|
| | img_transformed = self.midas_transform(image).unsqueeze(0).to(self.device) |
| |
|
| | with torch.no_grad(): |
| | print(" Führe MiDaS Inferenz durch...") |
| | prediction = midas(img_transformed) |
| | prediction = torch.nn.functional.interpolate( |
| | prediction.unsqueeze(1), |
| | size=image.size[::-1], |
| | mode="bicubic", |
| | align_corners=False, |
| | ).squeeze() |
| |
|
| | depth_np = prediction.cpu().numpy() |
| | depth_min, depth_max = depth_np.min(), depth_np.max() |
| | print(f" Tiefenwerte: Min={depth_min:.3f}, Max={depth_max:.3f}") |
| |
|
| | if depth_max > depth_min: |
| | depth_np = (depth_np - depth_min) / (depth_max - depth_min) |
| |
|
| | depth_np = (depth_np * 255).astype(np.uint8) |
| | depth_image = Image.fromarray(depth_np).convert("RGB") |
| |
|
| | print("✅ MiDaS Depth Map erfolgreich erstellt") |
| | return depth_image |
| |
|
| | else: |
| | raise Exception("MiDaS nicht geladen") |
| |
|
| | except Exception as e: |
| | print(f"⚠️ MiDaS Fehler: {e}. Verwende Fallback...") |
| | try: |
| | img_array = np.array(image.convert("RGB")) |
| | gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY) |
| |
|
| | depth_map = cv2.GaussianBlur(gray, (5, 5), 0) |
| | depth_rgb = cv2.cvtColor(depth_map, cv2.COLOR_GRAY2RGB) |
| | depth_image = Image.fromarray(depth_rgb) |
| |
|
| | print("✅ Fallback Depth Map erstellt") |
| | return depth_image |
| | except Exception as fallback_error: |
| | print(f"❌ Auch Fallback fehlgeschlagen: {fallback_error}") |
| | return image.convert("RGB").resize((512, 512)) |
| |
|
| | def prepare_controlnet_maps(self, image, keep_environment=False): |
| | """ |
| | ERSTELLT NUR CONDITIONING-MAPS, generiert KEIN Bild. |
| | """ |
| | print("#" * 80) |
| | print("# 🎯 STARTE CONTROLNET CONDITIONING-MAP ERSTELLUNG") |
| | print("#" * 80) |
| | print(f"📐 Eingabebild-Größe: {image.size}") |
| | print(f"🎛️ Modus: {'Depth + Canny' if keep_environment else 'OpenPose + Canny'}") |
| |
|
| | if keep_environment: |
| | print(" Modus: Depth + Canny") |
| | print(" Schritt 1/2: Extrahiere Depth Map...") |
| | depth_map = self.extract_depth_map(image) |
| | print(" Schritt 2/2: Extrahiere Canny Edges...") |
| | canny_map = self.extract_canny_edges(image) |
| | conditioning_images = [depth_map, canny_map] |
| | extra_maps = {"depth": depth_map, "canny": canny_map} |
| | else: |
| | print(" Modus: OpenPose + Canny") |
| | print(" Schritt 1/2: Extrahiere Pose Map...") |
| | pose_map = self.extract_pose(image) |
| | print(" Schritt 2/2: Extrahiere Canny Edges...") |
| | canny_map = self.extract_canny_edges(image) |
| | conditioning_images = [pose_map, canny_map] |
| | extra_maps = {"pose": pose_map, "canny": canny_map} |
| |
|
| | print("-" * 60) |
| | print(f"✅ {len(conditioning_images)} CONDITIONING-MAPS ERSTELLT") |
| | for i, img in enumerate(conditioning_images): |
| | print(f" Map {i+1}: {img.size}, Modus: {img.mode}") |
| | print("#" * 80) |
| | |
| | return conditioning_images, extra_maps |
| |
|
| |
|
| | |
| | device = "cuda" if torch.cuda.is_available() else "cpu" |
| | torch_dtype = torch.float16 if device == "cuda" else torch.float32 |
| | controlnet_processor = ControlNetProcessor(device=device, torch_dtype=torch_dtype) |