|
|
import torch |
|
|
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel |
|
|
from controlnet_aux import OpenposeDetector |
|
|
from PIL import Image, ImageFilter, ImageEnhance |
|
|
import random |
|
|
import cv2 |
|
|
import numpy as np |
|
|
import gradio as gr |
|
|
import torch.nn.functional as F |
|
|
from transformers import Sam2Model, Sam2Processor |
|
|
from scipy import ndimage |
|
|
from skimage import measure, morphology |
|
|
|
|
|
|
|
|
class ControlNetProgressCallback: |
|
|
def __init__(self, progress, total_steps): |
|
|
self.progress = progress |
|
|
self.total_steps = total_steps |
|
|
self.current_step = 0 |
|
|
|
|
|
def __call__(self, pipe, step_index, timestep, callback_kwargs): |
|
|
self.current_step = step_index + 1 |
|
|
progress_percentage = self.current_step / self.total_steps |
|
|
|
|
|
if self.progress is not None: |
|
|
self.progress(progress_percentage, desc=f"ControlNet: Schritt {self.current_step}/{self.total_steps}") |
|
|
|
|
|
print(f"ControlNet Fortschritt: {self.current_step}/{self.total_steps} ({progress_percentage:.1%})") |
|
|
return callback_kwargs |
|
|
|
|
|
|
|
|
class ControlNetProcessor: |
|
|
def __init__(self, device="cuda", torch_dtype=torch.float32): |
|
|
self.device = device |
|
|
self.torch_dtype = torch_dtype |
|
|
self.pose_detector = None |
|
|
self.midas_model = None |
|
|
self.midas_transform = None |
|
|
self.sam_processor = None |
|
|
self.sam_model = None |
|
|
self.sam_initialized = False |
|
|
|
|
|
def _lazy_load_sam(self): |
|
|
"""Lazy Loading von SAM 2 über 🤗 Transformers API""" |
|
|
if self.sam_initialized: |
|
|
return True |
|
|
|
|
|
try: |
|
|
print("#" * 80) |
|
|
print("# 🔄 LADE SAM 2 (Segment Anything Model 2)") |
|
|
print("#" * 80) |
|
|
model_id = "facebook/sam2-hiera-tiny" |
|
|
|
|
|
print(f"📥 Modell-ID: {model_id}") |
|
|
print(f"📥 Lade Processor...") |
|
|
self.sam_processor = Sam2Processor.from_pretrained(model_id) |
|
|
print(f"📥 Lade Modell...") |
|
|
self.sam_model = Sam2Model.from_pretrained(model_id, torch_dtype=torch.float32).to(self.device) |
|
|
self.sam_model.eval() |
|
|
|
|
|
self.sam_initialized = True |
|
|
print("✅ SAM 2 erfolgreich geladen (via Transformers)") |
|
|
return True |
|
|
|
|
|
except Exception as e: |
|
|
print(f"❌ FEHLER beim Laden von SAM 2: {str(e)[:200]}") |
|
|
self.sam_initialized = True |
|
|
return False |
|
|
|
|
|
def _validate_bbox(self, image, bbox_coords): |
|
|
"""Validiert und korrigiert BBox-Koordinaten""" |
|
|
width, height = image.size |
|
|
|
|
|
if isinstance(bbox_coords, (list, tuple)) and len(bbox_coords) == 4: |
|
|
x1, y1, x2, y2 = bbox_coords |
|
|
else: |
|
|
x1, y1, x2, y2 = bbox_coords |
|
|
|
|
|
x1, x2 = min(x1, x2), max(x1, x2) |
|
|
y1, y2 = min(y1, y2), max(y1, y2) |
|
|
|
|
|
x1 = max(0, min(x1, width - 1)) |
|
|
y1 = max(0, min(y1, height - 1)) |
|
|
x2 = max(0, min(x2, width - 1)) |
|
|
y2 = max(0, min(y2, height - 1)) |
|
|
|
|
|
if x2 - x1 < 10 or y2 - y1 < 10: |
|
|
size = min(width, height) * 0.3 |
|
|
x1 = max(0, width/2 - size/2) |
|
|
y1 = max(0, height/2 - size/2) |
|
|
x2 = min(width, width/2 + size/2) |
|
|
y2 = min(height, height/2 + size/2) |
|
|
|
|
|
return int(x1), int(y1), int(x2), int(y2) |
|
|
|
|
|
def _smooth_mask(self, mask_array, blur_radius=3): |
|
|
"""Glättet die Maske für bessere Übergänge""" |
|
|
try: |
|
|
if blur_radius > 0: |
|
|
mask_array = cv2.medianBlur(mask_array, blur_radius*2+1) |
|
|
return mask_array |
|
|
except Exception as e: |
|
|
print(f"⚠️ Fehler beim Glätten der Maske: {e}") |
|
|
return mask_array |
|
|
|
|
|
|
|
|
def create_sam_mask(self, image, bbox_coords, mode): |
|
|
""" |
|
|
ERWEITERTE Funktion: Erstellt präzise Maske mit SAM 2 |
|
|
Sonderbehandlung für face_only_change: Arbeitet auf Bildausschnitt |
|
|
""" |
|
|
try: |
|
|
print("#" * 80) |
|
|
print("# 🎯 STARTE SAM 2 SEGMENTIERUNG") |
|
|
print("#" * 80) |
|
|
print(f"📐 Eingabebild-Größe: {image.size}") |
|
|
print(f"🎛️ Ausgewählter Modus: {mode}") |
|
|
|
|
|
|
|
|
crop_size = None |
|
|
crop_x1 = crop_y1 = crop_x2 = crop_y2 = None |
|
|
original_image = image |
|
|
best_score = 0.0 |
|
|
|
|
|
|
|
|
if not self.sam_initialized: |
|
|
print("📥 SAM 2 ist noch nicht geladen, starte Lazy Loading...") |
|
|
self._lazy_load_sam() |
|
|
|
|
|
if self.sam_model is None or self.sam_processor is None: |
|
|
print("⚠️ SAM 2 Model nicht verfügbar, verwende Fallback") |
|
|
return self._create_rectangular_mask(image, bbox_coords, mode) |
|
|
else: |
|
|
print("✅ SAM 2 Modell ist geladen und bereit") |
|
|
|
|
|
|
|
|
x1, y1, x2, y2 = self._validate_bbox(image, bbox_coords) |
|
|
original_bbox = (x1, y1, x2, y2) |
|
|
original_bbox_size = (x2 - x1, y2 - y1) |
|
|
print(f"📏 Original-BBox Größe: {original_bbox_size[0]} × {original_bbox_size[1]} px") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if mode == "face_only_change": |
|
|
print("-" * 60) |
|
|
print("👤 SPEZIALMODUS: NUR GESICHT - ROBUSTER WORKFLOW") |
|
|
print("-" * 60) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
original_image = image |
|
|
print(f"💾 Originalbild gesichert: {original_image.size}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("✂️ SCHRITT 2: ERSTELLE QUADRATISCHEN AUSSCHNITT (BBox × 2.5)") |
|
|
|
|
|
|
|
|
bbox_center_x = (x1 + x2) // 2 |
|
|
bbox_center_y = (y1 + y2) // 2 |
|
|
print(f" 📍 BBox-Zentrum: ({bbox_center_x}, {bbox_center_y})") |
|
|
|
|
|
|
|
|
bbox_width = x2 - x1 |
|
|
bbox_height = y2 - y1 |
|
|
bbox_max_dim = max(bbox_width, bbox_height) |
|
|
print(f" 📏 BBox Dimensionen: {bbox_width} × {bbox_height} px") |
|
|
print(f" 📐 Maximale BBox-Dimension: {bbox_max_dim} px") |
|
|
|
|
|
|
|
|
crop_size = int(bbox_max_dim * 2.5) |
|
|
print(f" 🎯 Ziel-Crop-Größe: {crop_size} × {crop_size} px (BBox × 2.5)") |
|
|
|
|
|
|
|
|
crop_x1 = bbox_center_x - crop_size // 2 |
|
|
crop_y1 = bbox_center_y - crop_size // 2 |
|
|
crop_x2 = crop_x1 + crop_size |
|
|
crop_y2 = crop_y1 + crop_size |
|
|
|
|
|
|
|
|
crop_x1 = max(0, crop_x1) |
|
|
crop_y1 = max(0, crop_y1) |
|
|
crop_x2 = min(original_image.width, crop_x2) |
|
|
crop_y2 = min(original_image.height, crop_y2) |
|
|
|
|
|
|
|
|
actual_crop_width = crop_x2 - crop_x1 |
|
|
actual_crop_height = crop_y2 - crop_y1 |
|
|
|
|
|
if actual_crop_width < crop_size or actual_crop_height < crop_size: |
|
|
|
|
|
if crop_x1 == 0: |
|
|
crop_x2 = min(original_image.width, crop_size) |
|
|
elif crop_x2 == original_image.width: |
|
|
crop_x1 = max(0, original_image.width - crop_size) |
|
|
|
|
|
if crop_y1 == 0: |
|
|
crop_y2 = min(original_image.height, crop_size) |
|
|
elif crop_y2 == original_image.height: |
|
|
crop_y1 = max(0, original_image.height - crop_size) |
|
|
|
|
|
print(f" 🔲 Crop-Bereich: [{crop_x1}, {crop_y1}, {crop_x2}, {crop_y2}]") |
|
|
print(f" 📏 Tatsächliche Crop-Größe: {crop_x2-crop_x1} × {crop_y2-crop_y1} px") |
|
|
|
|
|
|
|
|
cropped_image = original_image.crop((crop_x1, crop_y1, crop_x2, crop_y2)) |
|
|
print(f" ✅ Quadratischer Ausschnitt erstellt: {cropped_image.size}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("📐 SCHRITT 3: BBox-KOORDINATEN TRANSFORMIEREN") |
|
|
rel_x1 = x1 - crop_x1 |
|
|
rel_y1 = y1 - crop_y1 |
|
|
rel_x2 = x2 - crop_x1 |
|
|
rel_y2 = y2 - crop_y1 |
|
|
|
|
|
|
|
|
rel_x1 = max(0, rel_x1) |
|
|
rel_y1 = max(0, rel_y1) |
|
|
rel_x2 = min(cropped_image.width, rel_x2) |
|
|
rel_y2 = min(cropped_image.height, rel_y2) |
|
|
|
|
|
print(f" 🎯 Relative BBox im Crop: [{rel_x1}, {rel_y1}, {rel_x2}, {rel_y2}]") |
|
|
print(f" 📏 Relative BBox Größe: {rel_x2-rel_x1} × {rel_y2-rel_y1} px") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("🔍 SCHRITT 4: ERWEITERTE BILDAUFBEREITUNG FÜR GESICHTSERKENNUNG") |
|
|
|
|
|
|
|
|
contrast_enhancer = ImageEnhance.Contrast(cropped_image) |
|
|
enhanced_image = contrast_enhancer.enhance(1.8) |
|
|
|
|
|
|
|
|
sharpness_enhancer = ImageEnhance.Sharpness(enhanced_image) |
|
|
enhanced_image = sharpness_enhancer.enhance(2.0) |
|
|
|
|
|
|
|
|
brightness_enhancer = ImageEnhance.Brightness(enhanced_image) |
|
|
enhanced_image = brightness_enhancer.enhance(1.1) |
|
|
|
|
|
print(f" ✅ Erweiterte Bildaufbereitung abgeschlossen") |
|
|
print(f" • Kontrast: +80%") |
|
|
print(f" • Schärfe: +100%") |
|
|
print(f" • Helligkeit: +10%") |
|
|
|
|
|
|
|
|
image = enhanced_image |
|
|
x1, y1, x2, y2 = rel_x1, rel_y1, rel_x2, rel_y2 |
|
|
|
|
|
print(" 🔄 SAM wird auf aufbereitetem Ausschnitt ausgeführt") |
|
|
print(f" 📊 SAM-Eingabegröße: {image.size}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("-" * 60) |
|
|
print(f"📦 BOUNDING BOX DETAILS FÜR SAM:") |
|
|
print(f" Bild-Größe für SAM: {image.size}") |
|
|
print(f" BBox Koordinaten: [{x1}, {y1}, {x2}, {y2}]") |
|
|
print(f" BBox Dimensionen: {x2-x1}px × {y2-y1}px") |
|
|
|
|
|
|
|
|
print("-" * 60) |
|
|
print("🖼️ BILDAUFBEREITUNG FÜR SAM 2") |
|
|
image_np = np.array(image.convert("RGB")) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("🎯 SCHRITT 4-5: ERWEITERTE SAM-PROMPTING") |
|
|
|
|
|
bbox_width = x2 - x1 |
|
|
bbox_height = y2 - y1 |
|
|
|
|
|
|
|
|
if mode == "face_only_change": |
|
|
|
|
|
input_boxes = [[[x1, y1, x2, y2]]] |
|
|
|
|
|
|
|
|
expand_factor = 0.15 |
|
|
expanded_x1 = max(0, int(x1 - bbox_width * expand_factor)) |
|
|
expanded_y1 = max(0, int(y1 - bbox_height * expand_factor)) |
|
|
expanded_x2 = min(image.width, int(x2 + bbox_width * expand_factor)) |
|
|
expanded_y2 = min(image.height, int(y2 + bbox_height * expand_factor)) |
|
|
|
|
|
input_boxes.append([[expanded_x1, expanded_y1, expanded_x2, expanded_y2]]) |
|
|
|
|
|
print(f" Haupt-BBox: [{x1}, {y1}, {x2}, {y2}]") |
|
|
print(f" Erweiterte BBox: [{expanded_x1}, {expanded_y1}, {expanded_x2}, {expanded_y2}]") |
|
|
print(f" Anzahl BBox-Prompts: {len(input_boxes)}") |
|
|
else: |
|
|
|
|
|
input_boxes = [[[x1, y1, x2, y2]]] |
|
|
print(f" Standard-BBox: [{x1}, {y1}, {x2}, {y2}]") |
|
|
|
|
|
print(" Verarbeite Bild mit SAM 2 Processor...") |
|
|
inputs = self.sam_processor( |
|
|
image_np, |
|
|
input_boxes=input_boxes, |
|
|
return_tensors="pt" |
|
|
).to(self.device) |
|
|
print(f"✅ Processor-Ausgabe: {len(inputs)} Elemente") |
|
|
|
|
|
|
|
|
print("-" * 60) |
|
|
print("🧠 SAM 2 INFERENZ (Vorhersage)") |
|
|
with torch.no_grad(): |
|
|
print(" Führe Vorhersage durch...") |
|
|
outputs = self.sam_model(**inputs) |
|
|
print(f"✅ Vorhersage abgeschlossen") |
|
|
print(f" Anzahl der Vorhersagemasken: {outputs.pred_masks.shape[2]}") |
|
|
|
|
|
|
|
|
print("📏 SCHRITT 6: MASKE EXTRAHIEREN") |
|
|
|
|
|
num_masks = outputs.pred_masks.shape[2] |
|
|
print(f" SAM lieferte {num_masks} verschiedene Masken") |
|
|
|
|
|
|
|
|
all_masks = [] |
|
|
|
|
|
for i in range(num_masks): |
|
|
single_mask = outputs.pred_masks[:, :, i, :, :] |
|
|
resized_mask = F.interpolate( |
|
|
single_mask, |
|
|
size=(image.height, image.width), |
|
|
mode='bilinear', |
|
|
align_corners=False |
|
|
).squeeze() |
|
|
|
|
|
mask_np = resized_mask.sigmoid().cpu().numpy() |
|
|
all_masks.append(mask_np) |
|
|
|
|
|
|
|
|
mask_binary = (mask_np > 0.5).astype(np.uint8) |
|
|
mask_area = np.sum(mask_binary) |
|
|
print(f" Maske {i+1}: Größe={mask_area:,} Pixel, Max-Konfidenz={mask_np.max():.3f}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("🤔 SCHRITT 6: MASKENAUSWAHL MIT MODUS-SPEZIFISCHER HEURISTIK") |
|
|
|
|
|
bbox_center = ((x1 + x2) // 2, (y1 + y2) // 2) |
|
|
bbox_area = (x2 - x1) * (y2 - y1) |
|
|
print(f" Erwartetes BBox-Zentrum: {bbox_center}") |
|
|
print(f" Erwartete BBox-Fläche: {bbox_area:,} Pixel") |
|
|
|
|
|
best_mask_idx = 0 |
|
|
best_score = -1 |
|
|
|
|
|
for i, mask_np in enumerate(all_masks): |
|
|
mask_max = mask_np.max() |
|
|
|
|
|
|
|
|
if mask_max < 0.3: |
|
|
print(f" ❌ Maske {i+1}: Zu niedrige Konfidenz ({mask_max:.3f}), überspringe") |
|
|
continue |
|
|
|
|
|
|
|
|
adaptive_threshold = max(0.3, mask_max * 0.7) |
|
|
mask_binary = (mask_np > adaptive_threshold).astype(np.uint8) |
|
|
|
|
|
if np.sum(mask_binary) == 0: |
|
|
print(f" ❌ Maske {i+1}: Keine Pixel nach Threshold {adaptive_threshold:.3f}") |
|
|
continue |
|
|
|
|
|
mask_area_pixels = np.sum(mask_binary) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if mode == "face_only_change": |
|
|
print(f" 🔍 Analysiere Maske {i+1} mit GESICHTS-HEURISTIK") |
|
|
|
|
|
|
|
|
area_ratio = mask_area_pixels / bbox_area |
|
|
print(f" 📐 Flächen-Ratio: {area_ratio:.3f} ({mask_area_pixels:,} / {bbox_area:,} Pixel)") |
|
|
|
|
|
|
|
|
if area_ratio < 0.6: |
|
|
print(f" ⚠️ Fläche zu klein für Kopf (<60% der BBox)") |
|
|
area_score = area_ratio * 0.5 |
|
|
elif area_ratio > 1.5: |
|
|
print(f" ⚠️ Fläche zu groß für Kopf (>150% der BBox)") |
|
|
area_score = 2.0 - area_ratio |
|
|
elif 0.8 <= area_ratio <= 1.2: |
|
|
area_score = 1.0 |
|
|
print(f" ✅ Perfekte Kopfgröße (80-120% der BBox)") |
|
|
else: |
|
|
|
|
|
area_score = 1.0 - abs(area_ratio - 1.0) * 0.5 |
|
|
|
|
|
|
|
|
labeled_mask = measure.label(mask_binary) |
|
|
regions = measure.regionprops(labeled_mask) |
|
|
|
|
|
if len(regions) == 0: |
|
|
compactness_score = 0.1 |
|
|
print(f" ❌ Keine zusammenhängenden Regionen gefunden") |
|
|
else: |
|
|
|
|
|
largest_region = max(regions, key=lambda r: r.area) |
|
|
|
|
|
|
|
|
solidity = largest_region.solidity if hasattr(largest_region, 'solidity') else 0.7 |
|
|
|
|
|
|
|
|
eccentricity = largest_region.eccentricity if hasattr(largest_region, 'eccentricity') else 0.5 |
|
|
|
|
|
|
|
|
|
|
|
if 0.4 <= eccentricity <= 0.9: |
|
|
eccentricity_score = 1.0 - abs(eccentricity - 0.65) * 2 |
|
|
else: |
|
|
eccentricity_score = 0.2 |
|
|
|
|
|
compactness_score = (solidity * 0.6 + eccentricity_score * 0.4) |
|
|
print(f" 🎯 Kompaktheits-Analyse:") |
|
|
print(f" • Solidität (Fläche/Konvex): {solidity:.3f}") |
|
|
print(f" • Exzentrizität (Form): {eccentricity:.3f}") |
|
|
print(f" • Kompaktheits-Score: {compactness_score:.3f}") |
|
|
|
|
|
|
|
|
bbox_mask = np.zeros((image.height, image.width), dtype=np.uint8) |
|
|
bbox_mask[y1:y2, x1:x2] = 1 |
|
|
overlap = np.sum(mask_binary & bbox_mask) |
|
|
bbox_overlap_ratio = overlap / mask_area_pixels if mask_area_pixels > 0 else 0 |
|
|
|
|
|
|
|
|
if bbox_overlap_ratio >= 0.7: |
|
|
bbox_score = 1.0 |
|
|
print(f" ✅ Hohe BBox-Überlappung: {bbox_overlap_ratio:.3f} ({overlap:,} Pixel)") |
|
|
elif bbox_overlap_ratio >= 0.5: |
|
|
bbox_score = bbox_overlap_ratio * 1.2 |
|
|
print(f" ⚠️ Mittlere BBox-Überlappung: {bbox_overlap_ratio:.3f}") |
|
|
else: |
|
|
bbox_score = bbox_overlap_ratio * 0.8 |
|
|
print(f" ❌ Geringe BBox-Überlappung: {bbox_overlap_ratio:.3f}") |
|
|
|
|
|
|
|
|
confidence_score = mask_max |
|
|
|
|
|
|
|
|
score = ( |
|
|
area_score * 0.4 + |
|
|
compactness_score * 0.3 + |
|
|
bbox_score * 0.2 + |
|
|
confidence_score * 0.1 |
|
|
) |
|
|
|
|
|
print(f" 📊 GESICHTS-SCORES für Maske {i+1}:") |
|
|
print(f" • Flächen-Score: {area_score:.3f}") |
|
|
print(f" • Kompaktheits-Score: {compactness_score:.3f}") |
|
|
print(f" • BBox-Überlappungs-Score: {bbox_score:.3f}") |
|
|
print(f" • Konfidenz-Score: {confidence_score:.3f}") |
|
|
print(f" • GESAMTSCORE: {score:.3f}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
else: |
|
|
|
|
|
bbox_mask = np.zeros((image.height, image.width), dtype=np.uint8) |
|
|
bbox_mask[y1:y2, x1:x2] = 1 |
|
|
|
|
|
overlap = np.sum(mask_binary & bbox_mask) |
|
|
bbox_overlap_ratio = overlap / np.sum(bbox_mask) if np.sum(bbox_mask) > 0 else 0 |
|
|
|
|
|
|
|
|
y_coords, x_coords = np.where(mask_binary > 0) |
|
|
if len(y_coords) > 0: |
|
|
centroid_y = np.mean(y_coords) |
|
|
centroid_x = np.mean(x_coords) |
|
|
centroid_distance = np.sqrt((centroid_x - bbox_center[0])**2 + (centroid_y - bbox_center[1])**2) |
|
|
normalized_distance = centroid_distance / max(image.width, image.height) |
|
|
else: |
|
|
normalized_distance = 1.0 |
|
|
|
|
|
|
|
|
area_ratio = mask_area_pixels / bbox_area |
|
|
area_score = 1.0 - min(abs(area_ratio - 1.0), 1.0) |
|
|
|
|
|
|
|
|
confidence_score = mask_max |
|
|
|
|
|
|
|
|
score = ( |
|
|
bbox_overlap_ratio * 0.4 + |
|
|
(1.0 - normalized_distance) * 0.25 + |
|
|
area_score * 0.25 + |
|
|
confidence_score * 0.1 |
|
|
) |
|
|
|
|
|
print(f" 📊 STANDARD-SCORES für Maske {i+1}:") |
|
|
print(f" • BBox-Überlappung: {bbox_overlap_ratio:.3f}") |
|
|
print(f" • Zentrums-Distanz: {centroid_distance if 'centroid_distance' in locals() else 'N/A'}") |
|
|
print(f" • Flächen-Ratio: {area_ratio:.3f}") |
|
|
print(f" • GESAMTSCORE: {score:.3f}") |
|
|
|
|
|
if score > best_score: |
|
|
best_score = score |
|
|
best_mask_idx = i |
|
|
print(f" 🏆 Neue beste Maske: Nr. {i+1} mit Score {score:.3f}") |
|
|
|
|
|
print(f"✅ Beste Maske ausgewählt: Nr. {best_mask_idx+1} mit Score {best_score:.3f}") |
|
|
|
|
|
|
|
|
mask_np = all_masks[best_mask_idx] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
max_val = mask_np.max() |
|
|
print(f" 🔍 Maximaler SAM-Konfidenzwert der besten Maske: {max_val:.3f}") |
|
|
|
|
|
if mode == "face_only_change": |
|
|
|
|
|
if max_val < 0.5: |
|
|
dynamic_threshold = 0.25 |
|
|
print(f" ⚠️ SAM ist unsicher für Gesicht (max_val={max_val:.3f} < 0.5)") |
|
|
elif max_val < 0.8: |
|
|
dynamic_threshold = max_val * 0.65 |
|
|
print(f" ℹ️ SAM ist mäßig sicher für Gesicht (max_val={max_val:.3f})") |
|
|
else: |
|
|
dynamic_threshold = max_val * 0.75 |
|
|
print(f" ✅ SAM ist sicher für Gesicht (max_val={max_val:.3f} >= 0.8)") |
|
|
|
|
|
print(f" 🎯 Gesichts-Threshold: {dynamic_threshold:.3f}") |
|
|
else: |
|
|
|
|
|
if max_val < 0.6: |
|
|
dynamic_threshold = 0.3 |
|
|
print(f" ⚠️ SAM ist unsicher (max_val={max_val:.3f} < 0.6)") |
|
|
else: |
|
|
dynamic_threshold = max_val * 0.8 |
|
|
print(f" ✅ SAM ist sicher (max_val={max_val:.3f} >= 0.6)") |
|
|
|
|
|
print(f" 🎯 Standard-Threshold: {dynamic_threshold:.3f}") |
|
|
|
|
|
mask_array = (mask_np > dynamic_threshold).astype(np.uint8) * 255 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("🔧 SCHRITT 7: MODUS-SPEZIFISCHES POSTPROCESSING") |
|
|
|
|
|
if mode == "face_only_change": |
|
|
print("👤 GESICHTS-SPEZIFISCHES POSTPROCESSING") |
|
|
|
|
|
|
|
|
labeled_array, num_features = ndimage.label(mask_array) |
|
|
|
|
|
if num_features > 0: |
|
|
print(f" 🔍 Gefundene Komponenten: {num_features}") |
|
|
|
|
|
sizes = ndimage.sum(mask_array, labeled_array, range(1, num_features + 1)) |
|
|
largest_component_idx = np.argmax(sizes) + 1 |
|
|
|
|
|
print(f" 👑 Größte Komponente: Nr. {largest_component_idx} mit {sizes[largest_component_idx-1]:,} Pixel") |
|
|
|
|
|
|
|
|
mask_array = np.where(labeled_array == largest_component_idx, mask_array, 0) |
|
|
|
|
|
|
|
|
print(" 🎯 Formbasierte Optimierung für Kopf") |
|
|
|
|
|
|
|
|
labeled_single = np.where(labeled_array == largest_component_idx, 1, 0).astype(np.uint8) |
|
|
regions = measure.regionprops(labeled_single) |
|
|
|
|
|
if regions: |
|
|
region = regions[0] |
|
|
|
|
|
|
|
|
minr, minc, maxr, maxc = region.bbox |
|
|
head_bbox_height = maxr - minr |
|
|
head_bbox_width = maxc - minc |
|
|
|
|
|
|
|
|
aspect_ratio = head_bbox_height / head_bbox_width if head_bbox_width > 0 else 1.0 |
|
|
|
|
|
print(f" 📏 Kopf-BBox: {head_bbox_width}×{head_bbox_height} (Ratio: {aspect_ratio:.2f})") |
|
|
|
|
|
|
|
|
if aspect_ratio < 1.0 and head_bbox_height < bbox_height * 0.8: |
|
|
print(f" ⬇️ Kopf zu flach, vertikal erweitern") |
|
|
expand_y = int((bbox_height * 0.8 - head_bbox_height) / 2) |
|
|
minr = max(0, minr - expand_y) |
|
|
maxr = min(mask_array.shape[0], maxr + expand_y) |
|
|
|
|
|
|
|
|
mask_array[minr:maxr, minc:maxc] = 255 |
|
|
|
|
|
|
|
|
print(" ⚙️ Morphologische Operationen für sauberen Kopf") |
|
|
|
|
|
|
|
|
kernel_close = np.ones((7, 7), np.uint8) |
|
|
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_CLOSE, kernel_close, iterations=1) |
|
|
print(" • MORPH_CLOSE (7x7) - Löcher im Kopf füllen") |
|
|
|
|
|
|
|
|
kernel_open = np.ones((5, 5), np.uint8) |
|
|
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_OPEN, kernel_open, iterations=1) |
|
|
print(" • MORPH_OPEN (5x5) - Rauschen entfernen") |
|
|
|
|
|
|
|
|
mask_array = cv2.GaussianBlur(mask_array, (5, 5), 1.0) |
|
|
mask_array = (mask_array > 127).astype(np.uint8) * 255 |
|
|
print(" • GaussianBlur + Re-Threshold - Glatte Kanten") |
|
|
|
|
|
|
|
|
print("-" * 60) |
|
|
print("🔄 MASKE VOM AUSSCHNITT ZURÜCK AUF ORIGINALGRÖSSE") |
|
|
|
|
|
temp_mask = Image.fromarray(mask_array).convert("L") |
|
|
print(f" Maskengröße auf Ausschnitt: {temp_mask.size}") |
|
|
|
|
|
final_mask = Image.new("L", original_image.size, 0) |
|
|
print(f" Leere Maske in Originalgröße: {final_mask.size}") |
|
|
|
|
|
final_mask.paste(temp_mask, (crop_x1, crop_y1)) |
|
|
print(f" Maskenposition im Original: ({crop_x1}, {crop_y1})") |
|
|
|
|
|
mask_array = np.array(final_mask) |
|
|
print(f" ✅ Maske zurück auf Originalgröße skaliert: {mask_array.shape}") |
|
|
|
|
|
image = original_image |
|
|
print(f" 🔄 Bild-Referenz wieder auf Original gesetzt: {image.size}") |
|
|
|
|
|
elif mode == "focus_change": |
|
|
print("🎯 FOCUS-CHANGE POSTPROCESSING") |
|
|
mask_array = mask_array.copy() |
|
|
|
|
|
|
|
|
labeled_array, num_features = ndimage.label(mask_array) |
|
|
if num_features > 1: |
|
|
sizes = ndimage.sum(mask_array, labeled_array, range(1, num_features + 1)) |
|
|
largest_component = np.argmax(sizes) + 1 |
|
|
mask_array = np.where(labeled_array == largest_component, mask_array, 0) |
|
|
print(f" ✅ Behalte größte Person-Komponente ({num_features} → 1 Komponente)") |
|
|
|
|
|
|
|
|
kernel = np.ones((3,3), np.uint8) |
|
|
mask_array = cv2.dilate(mask_array, kernel, iterations=1) |
|
|
print(" ✅ Dilation für bessere Personenabdeckung") |
|
|
|
|
|
elif mode == "environment_change": |
|
|
print("🌳 ENVIRONMENT-CHANGE POSTPROCESSING") |
|
|
mask_array = 255 - mask_array |
|
|
print(" ✅ Maske invertiert (Person schwarz, Hintergrund weiß)") |
|
|
|
|
|
|
|
|
kernel = np.ones((5,5), np.uint8) |
|
|
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_CLOSE, kernel) |
|
|
print(" ✅ MORPH_CLOSE für zusammenhängende Umgebung") |
|
|
|
|
|
|
|
|
white_pixels = np.sum(mask_array > 127) |
|
|
total_pixels = mask_array.size |
|
|
white_ratio = white_pixels / total_pixels * 100 |
|
|
|
|
|
print("-" * 60) |
|
|
print("📊 MASKEN-STATISTIK (FINAL)") |
|
|
print(f" Weiße Pixel (Veränderungsbereich): {white_pixels:,} ({white_ratio:.1f}%)") |
|
|
print(f" Schwarze Pixel (Erhaltungsbereich): {total_pixels-white_pixels:,} ({100-white_ratio:.1f}%)") |
|
|
print(f" Gesamtpixel: {total_pixels:,}") |
|
|
|
|
|
if mode == "face_only_change": |
|
|
original_face_area = original_bbox_size[0] * original_bbox_size[1] |
|
|
coverage_ratio = white_pixels / original_face_area if original_face_area > 0 else 0 |
|
|
print(f" 👤 GESICHTSABDECKUNG: {coverage_ratio:.1%} der ursprünglichen BBox") |
|
|
|
|
|
|
|
|
if coverage_ratio < 0.7: |
|
|
print(f" ⚠️ WARNUNG: Geringe Gesichtsabdeckung ({coverage_ratio:.1%})") |
|
|
print(f" 💡 Tipp: BBox könnte zu groß sein oder SAM erkennt Gesicht nicht vollständig") |
|
|
elif coverage_ratio > 1.3: |
|
|
print(f" ⚠️ WARNUNG: Sehr hohe Gesichtsabdeckung ({coverage_ratio:.1%})") |
|
|
print(f" 💡 Tipp: Maske könnte zu viel Hintergrund enthalten") |
|
|
elif 0.8 <= coverage_ratio <= 1.2: |
|
|
print(f" ✅ OPTIMALE Gesichtsabdeckung ({coverage_ratio:.1%})") |
|
|
|
|
|
|
|
|
mask = Image.fromarray(mask_array).convert("L") |
|
|
|
|
|
print("#" * 80) |
|
|
print(f"✅ SAM 2 SEGMENTIERUNG ABGESCHLOSSEN") |
|
|
print(f"📐 Finale Maskengröße: {mask.size}") |
|
|
print(f"🎛️ Verwendeter Modus: {mode}") |
|
|
|
|
|
if mode == "face_only_change" and crop_size is not None: |
|
|
print(f"👤 Bei face_only_change: Crop={crop_size}×{crop_size}px, Heuristik-Score={best_score:.3f}") |
|
|
print(f"👤 Kopfabdeckung: {coverage_ratio:.1%} der BBox") |
|
|
|
|
|
print("#" * 80) |
|
|
return mask |
|
|
|
|
|
except Exception as e: |
|
|
print("❌" * 40) |
|
|
print("❌ FEHLER IN SAM 2 SEGMENTIERUNG") |
|
|
print("❌" * 40) |
|
|
print(f"Fehler: {str(e)[:200]}") |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
print("ℹ️ Fallback auf rechteckige Maske") |
|
|
return self._create_rectangular_mask(image, bbox_coords, mode) |
|
|
|
|
|
|
|
|
def _create_rectangular_mask(self, image, bbox_coords, mode): |
|
|
"""Fallback: Erstellt rechteckige Maske""" |
|
|
print("#" * 80) |
|
|
print("# ⚠️ FALLBACK: ERSTELLE RECHTECKIGE MASKE") |
|
|
print("#" * 80) |
|
|
|
|
|
from PIL import ImageDraw |
|
|
|
|
|
mask = Image.new("L", image.size, 0) |
|
|
print(f"📐 Erstelle leere Maske: {mask.size}") |
|
|
|
|
|
if bbox_coords and all(coord is not None for coord in bbox_coords): |
|
|
x1, y1, x2, y2 = self._validate_bbox(image, bbox_coords) |
|
|
draw = ImageDraw.Draw(mask) |
|
|
|
|
|
if mode == "environment_change": |
|
|
draw.rectangle([0, 0, image.size[0], image.size[1]], fill=255) |
|
|
draw.rectangle([x1, y1, x2, y2], fill=0) |
|
|
print(f" Modus: Umgebung ändern - BBox geschützt: [{x1}, {y1}, {x2}, {y2}]") |
|
|
else: |
|
|
draw.rectangle([x1, y1, x2, y2], fill=255) |
|
|
print(f" Modus: Focus/Gesicht ändern - BBox verändert: [{x1}, {y1}, {x2}, {y2}]") |
|
|
|
|
|
print("✅ Rechteckige Maske erstellt") |
|
|
return mask |
|
|
|
|
|
def load_pose_detector(self): |
|
|
"""Lädt nur den Pose-Detector""" |
|
|
if self.pose_detector is None: |
|
|
print("#" * 80) |
|
|
print("# 📥 LADE POSE DETECTOR") |
|
|
print("#" * 80) |
|
|
try: |
|
|
self.pose_detector = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") |
|
|
print("✅ Pose-Detector geladen") |
|
|
except Exception as e: |
|
|
print(f"⚠️ Pose-Detector konnte nicht geladen werden: {e}") |
|
|
return self.pose_detector |
|
|
|
|
|
def load_midas_model(self): |
|
|
"""Lädt MiDaS Model für Depth Maps""" |
|
|
if self.midas_model is None: |
|
|
print("#" * 80) |
|
|
print("# 📥 LADE MIDAS MODELL FÜR DEPTH MAPS") |
|
|
print("#" * 80) |
|
|
try: |
|
|
import torchvision.transforms as T |
|
|
|
|
|
self.midas_model = torch.hub.load( |
|
|
"intel-isl/MiDaS", |
|
|
"DPT_Hybrid", |
|
|
trust_repo=True |
|
|
) |
|
|
|
|
|
self.midas_model.to(self.device) |
|
|
self.midas_model.eval() |
|
|
|
|
|
self.midas_transform = T.Compose([ |
|
|
T.Resize(384), |
|
|
T.ToTensor(), |
|
|
T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), |
|
|
]) |
|
|
|
|
|
print("✅ MiDaS Modell erfolgreich geladen") |
|
|
except Exception as e: |
|
|
print(f"❌ MiDaS konnte nicht geladen werden: {e}") |
|
|
print("ℹ️ Verwende Fallback-Methode") |
|
|
self.midas_model = None |
|
|
|
|
|
return self.midas_model |
|
|
|
|
|
def extract_pose_simple(self, image): |
|
|
"""Einfache Pose-Extraktion ohne komplexe Abhängigkeiten""" |
|
|
print("#" * 80) |
|
|
print("# ⚠️ ERSTELLE EINFACHE POSE-MAP (FALLBACK)") |
|
|
print("#" * 80) |
|
|
try: |
|
|
img_array = np.array(image.convert("RGB")) |
|
|
edges = cv2.Canny(img_array, 100, 200) |
|
|
pose_image = Image.fromarray(edges).convert("RGB") |
|
|
print("⚠️ Verwende Kanten-basierte Pose-Approximation") |
|
|
return pose_image |
|
|
except Exception as e: |
|
|
print(f"Fehler bei einfacher Pose-Extraktion: {e}") |
|
|
return image.convert("RGB").resize((512, 512)) |
|
|
|
|
|
def extract_pose(self, image): |
|
|
"""Extrahiert Pose-Map aus Bild mit Fallback""" |
|
|
print("#" * 80) |
|
|
print("# 🕺 ERSTELLE POSE-MAP") |
|
|
print("#" * 80) |
|
|
try: |
|
|
detector = self.load_pose_detector() |
|
|
if detector is None: |
|
|
print("⚠️ Kein Pose-Detector verfügbar, verwende Fallback") |
|
|
return self.extract_pose_simple(image) |
|
|
|
|
|
print(" Extrahiere Pose mit OpenPose...") |
|
|
pose_image = detector(image, hand_and_face=True) |
|
|
print("✅ Pose-Map erfolgreich erstellt") |
|
|
return pose_image |
|
|
except Exception as e: |
|
|
print(f"Fehler bei Pose-Extraktion: {e}") |
|
|
return self.extract_pose_simple(image) |
|
|
|
|
|
def extract_canny_edges(self, image): |
|
|
"""Extrahiert Canny Edges für Umgebungserhaltung""" |
|
|
print("#" * 80) |
|
|
print("# 🎨 ERSTELLE CANNY EDGE MAP") |
|
|
print("#" * 80) |
|
|
try: |
|
|
img_array = np.array(image.convert("RGB")) |
|
|
|
|
|
gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY) |
|
|
edges = cv2.Canny(gray, 100, 200) |
|
|
|
|
|
edges_rgb = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) |
|
|
edges_image = Image.fromarray(edges_rgb) |
|
|
|
|
|
print("✅ Canny Edge Map erstellt") |
|
|
return edges_image |
|
|
except Exception as e: |
|
|
print(f"Fehler bei Canny Edge Extraction: {e}") |
|
|
return image.convert("RGB").resize((512, 512)) |
|
|
|
|
|
def extract_depth_map(self, image): |
|
|
""" |
|
|
Extrahiert Depth Map mit MiDaS (Fallback auf Filter) |
|
|
""" |
|
|
print("#" * 80) |
|
|
print("# 🏔️ ERSTELLE DEPTH MAP") |
|
|
print("#" * 80) |
|
|
try: |
|
|
midas = self.load_midas_model() |
|
|
if midas is not None: |
|
|
print("🎯 Verwende MiDaS für Depth Map...") |
|
|
|
|
|
import torchvision.transforms as T |
|
|
|
|
|
img_transformed = self.midas_transform(image).unsqueeze(0).to(self.device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
print(" Führe MiDaS Inferenz durch...") |
|
|
prediction = midas(img_transformed) |
|
|
prediction = torch.nn.functional.interpolate( |
|
|
prediction.unsqueeze(1), |
|
|
size=image.size[::-1], |
|
|
mode="bicubic", |
|
|
align_corners=False, |
|
|
).squeeze() |
|
|
|
|
|
depth_np = prediction.cpu().numpy() |
|
|
depth_min, depth_max = depth_np.min(), depth_np.max() |
|
|
print(f" Tiefenwerte: Min={depth_min:.3f}, Max={depth_max:.3f}") |
|
|
|
|
|
if depth_max > depth_min: |
|
|
depth_np = (depth_np - depth_min) / (depth_max - depth_min) |
|
|
|
|
|
depth_np = (depth_np * 255).astype(np.uint8) |
|
|
depth_image = Image.fromarray(depth_np).convert("RGB") |
|
|
|
|
|
print("✅ MiDaS Depth Map erfolgreich erstellt") |
|
|
return depth_image |
|
|
|
|
|
else: |
|
|
raise Exception("MiDaS nicht geladen") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"⚠️ MiDaS Fehler: {e}. Verwende Fallback...") |
|
|
try: |
|
|
img_array = np.array(image.convert("RGB")) |
|
|
gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY) |
|
|
|
|
|
depth_map = cv2.GaussianBlur(gray, (5, 5), 0) |
|
|
depth_rgb = cv2.cvtColor(depth_map, cv2.COLOR_GRAY2RGB) |
|
|
depth_image = Image.fromarray(depth_rgb) |
|
|
|
|
|
print("✅ Fallback Depth Map erstellt") |
|
|
return depth_image |
|
|
except Exception as fallback_error: |
|
|
print(f"❌ Auch Fallback fehlgeschlagen: {fallback_error}") |
|
|
return image.convert("RGB").resize((512, 512)) |
|
|
|
|
|
def prepare_controlnet_maps(self, image, keep_environment=False): |
|
|
""" |
|
|
ERSTELLT NUR CONDITIONING-MAPS, generiert KEIN Bild. |
|
|
""" |
|
|
print("#" * 80) |
|
|
print("# 🎯 STARTE CONTROLNET CONDITIONING-MAP ERSTELLUNG") |
|
|
print("#" * 80) |
|
|
print(f"📐 Eingabebild-Größe: {image.size}") |
|
|
print(f"🎛️ Modus: {'Depth + Canny' if keep_environment else 'OpenPose + Canny'}") |
|
|
|
|
|
if keep_environment: |
|
|
print(" Modus: Depth + Canny") |
|
|
print(" Schritt 1/2: Extrahiere Depth Map...") |
|
|
depth_map = self.extract_depth_map(image) |
|
|
print(" Schritt 2/2: Extrahiere Canny Edges...") |
|
|
canny_map = self.extract_canny_edges(image) |
|
|
conditioning_images = [depth_map, canny_map] |
|
|
else: |
|
|
print(" Modus: OpenPose + Canny") |
|
|
print(" Schritt 1/2: Extrahiere Pose Map...") |
|
|
pose_map = self.extract_pose(image) |
|
|
print(" Schritt 2/2: Extrahiere Canny Edges...") |
|
|
canny_map = self.extract_canny_edges(image) |
|
|
conditioning_images = [pose_map, canny_map] |
|
|
|
|
|
print("-" * 60) |
|
|
print(f"✅ {len(conditioning_images)} CONDITIONING-MAPS ERSTELLT") |
|
|
for i, img in enumerate(conditioning_images): |
|
|
print(f" Map {i+1}: {img.size}, Modus: {img.mode}") |
|
|
print("#" * 80) |
|
|
|
|
|
return conditioning_images |
|
|
|
|
|
|
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
torch_dtype = torch.float16 if device == "cuda" else torch.float32 |
|
|
controlnet_processor = ControlNetProcessor(device=device, torch_dtype=torch_dtype) |