Update sam_module.py
Browse files- sam_module.py +0 -1027
sam_module.py
CHANGED
|
@@ -1082,1033 +1082,6 @@ def create_sam_mask(self, image, bbox_coords, mode):
|
|
| 1082 |
print(f"❌ Unbekannter Modus: {mode}")
|
| 1083 |
return self._create_rectangular_mask(image, bbox_coords, "focus_change")
|
| 1084 |
|
| 1085 |
-
except Exception as e:
|
| 1086 |
-
print("❌" * 40)
|
| 1087 |
-
print("❌ FEHLER IN SAM 2 SEGMENTIERUNG")
|
| 1088 |
-
print(f"Fehler: {str(e)[:200]}")
|
| 1089 |
-
print("❌" * 40)
|
| 1090 |
-
import traceback
|
| 1091 |
-
traceback.print_exc()
|
| 1092 |
-
|
| 1093 |
-
# Fallback
|
| 1094 |
-
fallback_mask = self._create_rectangular_mask(original_image, original_bbox, mode)
|
| 1095 |
-
if fallback_mask.size != original_image.size:
|
| 1096 |
-
print(f" ⚠️ Fallback-Maske angepasst: {fallback_mask.size} → {original_image.size}")
|
| 1097 |
-
fallback_mask = fallback_mask.resize(original_image.size, Image.Resampling.NEAREST)
|
| 1098 |
-
|
| 1099 |
-
return fallback_mask, fallback_mask
|
| 1100 |
-
|
| 1101 |
-
|
| 1102 |
-
|
| 1103 |
-
|
| 1104 |
-
|
| 1105 |
-
|
| 1106 |
-
|
| 1107 |
-
|
| 1108 |
-
|
| 1109 |
-
|
| 1110 |
-
|
| 1111 |
-
def create_sam_mask(self, image, bbox_coords, mode):
|
| 1112 |
-
"""
|
| 1113 |
-
ERWEITERTE Funktion: Erstellt präzise Maske mit SAM 2
|
| 1114 |
-
"""
|
| 1115 |
-
try:
|
| 1116 |
-
print("#" * 80)
|
| 1117 |
-
print("# 🎯 STARTE SAM 2 SEGMENTIERUNG")
|
| 1118 |
-
print("#" * 80)
|
| 1119 |
-
print(f"📐 Eingabebild-Größe: {image.size}")
|
| 1120 |
-
print(f"🎛️ Ausgewählter Modus: {mode}")
|
| 1121 |
-
|
| 1122 |
-
# ============================================================
|
| 1123 |
-
# VORBEREITUNG FÜR ALLE MODI
|
| 1124 |
-
# ============================================================
|
| 1125 |
-
original_image = image
|
| 1126 |
-
|
| 1127 |
-
# 1. SAM2 laden
|
| 1128 |
-
if not self.sam_initialized:
|
| 1129 |
-
print("📥 SAM 2 ist noch nicht geladen, starte Lazy Loading...")
|
| 1130 |
-
self._lazy_load_sam()
|
| 1131 |
-
|
| 1132 |
-
if self.sam_model is None or self.sam_processor is None:
|
| 1133 |
-
print("⚠️ SAM 2 Model nicht verfügbar, verwende Fallback")
|
| 1134 |
-
return self._create_rectangular_mask(image, bbox_coords, mode)
|
| 1135 |
-
|
| 1136 |
-
# 2. Validiere BBox
|
| 1137 |
-
x1, y1, x2, y2 = self._validate_bbox(image, bbox_coords)
|
| 1138 |
-
original_bbox = (x1, y1, x2, y2)
|
| 1139 |
-
print(f"📏 Original-BBox Größe: {x2-x1} × {y2-y1} px")
|
| 1140 |
-
|
| 1141 |
-
# ============================================================
|
| 1142 |
-
# BLOCK 1: ENVIRONMENT_CHANGE
|
| 1143 |
-
# ============================================================
|
| 1144 |
-
if mode == "environment_change":
|
| 1145 |
-
print("-" * 60)
|
| 1146 |
-
print("🌳 MODUS: ENVIRONMENT_CHANGE")
|
| 1147 |
-
print("-" * 60)
|
| 1148 |
-
|
| 1149 |
-
# Der Prozessor von SAM erwartet ein NumPy-Array kein PIL
|
| 1150 |
-
image_np = np.array(image.convert("RGB"))
|
| 1151 |
-
|
| 1152 |
-
# Packt die BBox-Koordinaten in eine 3D-Liste
|
| 1153 |
-
input_boxes = [[[x1, y1, x2, y2]]]
|
| 1154 |
-
|
| 1155 |
-
# Aufruf des SAM-Prozessors mit Originalbild in Form NumPy-Array und BBox.Der Processor verarbeitet Bild und BBox
|
| 1156 |
-
# in die für SAM erforderlichen Tensoren und speichert sie in inputs.
|
| 1157 |
-
inputs = self.sam_processor(
|
| 1158 |
-
image_np,
|
| 1159 |
-
input_boxes=input_boxes,
|
| 1160 |
-
return_tensors="pt"
|
| 1161 |
-
).to(self.device) # Ohne .to(self.device) werden die Tensoren standardmäßig im CPU-RAM erzeugt und gespeichert! Da GPU-Fehler!
|
| 1162 |
-
|
| 1163 |
-
print(f" - 'input_boxes' Shape: {inputs['input_boxes'].shape}")
|
| 1164 |
-
|
| 1165 |
-
# SAM2 Vorhersage
|
| 1166 |
-
print("-" * 60)
|
| 1167 |
-
print("🧠 SAM 2 INFERENZ (Vorhersage)")
|
| 1168 |
-
with torch.no_grad():
|
| 1169 |
-
print(" Führe Vorhersage durch...")
|
| 1170 |
-
outputs = self.sam_model(**inputs) #führt die Segmentierung mit SAM aus
|
| 1171 |
-
print(f"✅ Vorhersage abgeschlossen")
|
| 1172 |
-
print(f" Anzahl der Vorhersagemasken: {outputs.pred_masks.shape[2]}")
|
| 1173 |
-
|
| 1174 |
-
num_masks = outputs.pred_masks.shape[2]
|
| 1175 |
-
print(f" SAM lieferte {num_masks} verschiedene Masken")
|
| 1176 |
-
|
| 1177 |
-
# Sammlung aller Masken in all_masks
|
| 1178 |
-
all_masks = []
|
| 1179 |
-
|
| 1180 |
-
for i in range(num_masks):
|
| 1181 |
-
single_mask = outputs.pred_masks[:, :, i, :, :]
|
| 1182 |
-
resized_mask = F.interpolate(
|
| 1183 |
-
single_mask,
|
| 1184 |
-
size=(image.height, image.width),
|
| 1185 |
-
mode='bilinear',
|
| 1186 |
-
align_corners=False
|
| 1187 |
-
).squeeze()
|
| 1188 |
-
|
| 1189 |
-
mask_np = resized_mask.sigmoid().cpu().numpy() #wandelt Modellausgaben in Wahrscheinlichkeiten und bewegt Daten von GPU nach CPU
|
| 1190 |
-
all_masks.append(mask_np) #fügt die aktuelle Maske der Liste all_masks hinzu
|
| 1191 |
-
|
| 1192 |
-
|
| 1193 |
-
bbox_center = ((x1 + x2) // 2, (y1 + y2) // 2)
|
| 1194 |
-
bbox_area = (x2 - x1) * (y2 - y1)
|
| 1195 |
-
print(f" Erwartetes BBox-Zentrum: {bbox_center}")
|
| 1196 |
-
print(f" Erwartete BBox-Fläche: {bbox_area:,} Pixel")
|
| 1197 |
-
|
| 1198 |
-
print("🤔 HEURISTIK: Beste Maske auswählen")
|
| 1199 |
-
best_mask_idx = 0
|
| 1200 |
-
best_score = -1
|
| 1201 |
-
|
| 1202 |
-
# Alle 3 Masken analysieren (OHNE sie alle zu skalieren!)
|
| 1203 |
-
for i in range(num_masks):
|
| 1204 |
-
mask_np_temp = all_masks[i] #verwende Maske auf Original-Bildgröße
|
| 1205 |
-
|
| 1206 |
-
# Adaptive Vor-Filterung (prüft ob Maske überhaupt gültig ist)
|
| 1207 |
-
mask_max = mask_np_temp.max()
|
| 1208 |
-
if mask_max < 0.3:
|
| 1209 |
-
continue # Maske überspringen
|
| 1210 |
-
|
| 1211 |
-
adaptive_threshold = max(0.3, mask_max * 0.7)
|
| 1212 |
-
mask_binary = (mask_np_temp > adaptive_threshold).astype(np.uint8)
|
| 1213 |
-
|
| 1214 |
-
# wenn nur schwarze Pixel (keine Segmentierung) nimm die nächste Maske
|
| 1215 |
-
if np.sum(mask_binary) == 0:
|
| 1216 |
-
print(f" ❌ Maske {i+1}: Keine Pixel nach adaptive_threshold {adaptive_threshold:.3f}")
|
| 1217 |
-
continue
|
| 1218 |
-
|
| 1219 |
-
# Heuristik-Berechnung
|
| 1220 |
-
mask_area_pixels = np.sum(mask_binary)
|
| 1221 |
-
|
| 1222 |
-
#Berechnung von Überlappung SAM-Maske und ursprünglicher BBox
|
| 1223 |
-
bbox_mask = np.zeros((image.height, image.width), dtype=np.uint8)
|
| 1224 |
-
bbox_mask[y1:y2, x1:x2] = 1
|
| 1225 |
-
|
| 1226 |
-
overlap = np.sum(mask_binary & bbox_mask)
|
| 1227 |
-
bbox_overlap_ratio = overlap / np.sum(bbox_mask) if np.sum(bbox_mask) > 0 else 0
|
| 1228 |
-
|
| 1229 |
-
# Schwerpunkt berechnen
|
| 1230 |
-
y_coords, x_coords = np.where(mask_binary > 0)
|
| 1231 |
-
if len(y_coords) > 0:
|
| 1232 |
-
centroid_y = np.mean(y_coords)
|
| 1233 |
-
centroid_x = np.mean(x_coords)
|
| 1234 |
-
centroid_distance = np.sqrt((centroid_x - bbox_center[0])**2 + (centroid_y - bbox_center[1])**2)
|
| 1235 |
-
normalized_distance = centroid_distance / max(image.width, image.height)
|
| 1236 |
-
else:
|
| 1237 |
-
normalized_distance = 1.0
|
| 1238 |
-
|
| 1239 |
-
# Flächen-Ratio
|
| 1240 |
-
area_ratio = mask_area_pixels / bbox_area
|
| 1241 |
-
area_score = 1.0 - min(abs(area_ratio - 1.0), 1.0)
|
| 1242 |
-
|
| 1243 |
-
# Konfidenz
|
| 1244 |
-
confidence_score = mask_max
|
| 1245 |
-
|
| 1246 |
-
# Standard-Score
|
| 1247 |
-
score = (
|
| 1248 |
-
bbox_overlap_ratio * 0.4 +
|
| 1249 |
-
(1.0 - normalized_distance) * 0.25 +
|
| 1250 |
-
area_score * 0.25 +
|
| 1251 |
-
confidence_score * 0.1
|
| 1252 |
-
)
|
| 1253 |
-
|
| 1254 |
-
print(f" 📊 STANDARD-SCORES für Maske {i+1}:")
|
| 1255 |
-
print(f" • BBox-Überlappung: {bbox_overlap_ratio:.3f}")
|
| 1256 |
-
print(f" • Zentrums-Distanz: {centroid_distance if 'centroid_distance' in locals() else 'N/A'}")
|
| 1257 |
-
print(f" • Flächen-Ratio: {area_ratio:.3f}")
|
| 1258 |
-
print(f" • GESAMTSCORE: {score:.3f}")
|
| 1259 |
-
|
| 1260 |
-
if score > best_score:
|
| 1261 |
-
best_score = score
|
| 1262 |
-
best_mask_idx = i
|
| 1263 |
-
print(f" 🏆 Neue beste Maske: Nr. {i+1} mit Score {score:.3f}")
|
| 1264 |
-
|
| 1265 |
-
print(f"✅ Beste Maske ausgewählt: Nr. {best_mask_idx+1} mit Score {best_score:.3f}")
|
| 1266 |
-
|
| 1267 |
-
# Beste Maske verwenden - mask_np beste Maske
|
| 1268 |
-
mask_np = all_masks[best_mask_idx]
|
| 1269 |
-
|
| 1270 |
-
max_val = mask_np.max()
|
| 1271 |
-
print(f" 🔍 Maximaler SAM-Konfidenzwert der besten Maske: {max_val:.3f}")
|
| 1272 |
-
|
| 1273 |
-
if max_val < 0.6:
|
| 1274 |
-
dynamic_threshold = 0.3
|
| 1275 |
-
print(f" ⚠️ SAM ist unsicher (max_val={max_val:.3f} < 0.6)")
|
| 1276 |
-
else:
|
| 1277 |
-
dynamic_threshold = max_val * 0.8
|
| 1278 |
-
print(f" ✅ SAM ist sicher (max_val={max_val:.3f} >= 0.6)")
|
| 1279 |
-
|
| 1280 |
-
# Binärmaske erstellen (256x256)
|
| 1281 |
-
mask_array = (mask_np > dynamic_threshold).astype(np.uint8) * 255
|
| 1282 |
-
|
| 1283 |
-
# Fallback bei leerer Maske, der höchste Wert ist 0 also schwarz
|
| 1284 |
-
if mask_array.max() == 0:
|
| 1285 |
-
print(" ⚠️ Maske leer, erstelle rechteckige Fallback-Maske")
|
| 1286 |
-
mask_array = np.zeros((512, 512), dtype=np.uint8) * 255 # weiße 512x512-Maske
|
| 1287 |
-
|
| 1288 |
-
# Skaliere BBox auf 512x512
|
| 1289 |
-
scale_x = 512 / image.width
|
| 1290 |
-
scale_y = 512 / image.height
|
| 1291 |
-
fb_x1 = int(x1 * scale_x)
|
| 1292 |
-
fb_y1 = int(y1 * scale_y)
|
| 1293 |
-
fb_x2 = int(x2 * scale_x)
|
| 1294 |
-
fb_y2 = int(y2 * scale_y)
|
| 1295 |
-
|
| 1296 |
-
# Schwarzes Rechteck für Person bzw. BBox
|
| 1297 |
-
cv2.rectangle(mask_array, (fb_x1, fb_y1), (fb_x2, fb_y2), 0, -1)
|
| 1298 |
-
|
| 1299 |
-
# Damit wird die Rohmaske für die UI-Anzeige gespeichert
|
| 1300 |
-
raw_mask_array = mask_array.copy()
|
| 1301 |
-
|
| 1302 |
-
print("🌳 ENVIRONMENT-CHANGE POSTPROCESSING")
|
| 1303 |
-
|
| 1304 |
-
# Konvertierung zu PIL, hochskalieren auf Originalgröße (korrekte Überlagerung mit O-Bild),
|
| 1305 |
-
# Konvertierung NumPy für weitere Verarbeitung da mathematisch korrekter als PIL.
|
| 1306 |
-
if image.size != original_image.size:
|
| 1307 |
-
print(f" ⚠️ Bildgröße angepasst: {image.size} → {original_image.size}")
|
| 1308 |
-
temp_mask = Image.fromarray(mask_array).convert("L")
|
| 1309 |
-
temp_mask = temp_mask.resize(original_image.size, Image.Resampling.NEAREST)
|
| 1310 |
-
mask_array = np.array(temp_mask)
|
| 1311 |
-
print(f" ✅ Maske auf Originalgröße skaliert: {mask_array.shape}")
|
| 1312 |
-
|
| 1313 |
-
# Maske invertieren (Person wird schwarz, Hintergrund weiß)
|
| 1314 |
-
mask_array = 255 - mask_array
|
| 1315 |
-
print(" ✅ Maske invertiert (Person schwarz, Hintergrund weiß)")
|
| 1316 |
-
|
| 1317 |
-
# Weiße Punkte in der Person (schwarz) entfernen
|
| 1318 |
-
print("🧹 Entferne weiße Punkte in der Person...")
|
| 1319 |
-
kernel_open = np.ones((3, 3), np.uint8)
|
| 1320 |
-
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_OPEN, kernel_open, iterations=3)
|
| 1321 |
-
print(" ✅ MORPH_OPEN entfernt weiße Punkte in der Person")
|
| 1322 |
-
|
| 1323 |
-
# DEBUG nach MORPH_OPEN
|
| 1324 |
-
print(f" Nach MORPH_OPEN - Weiße Pixel: {np.sum(mask_array > 127)}")
|
| 1325 |
-
|
| 1326 |
-
# Morphologische Operationen für saubere Umgebung - entfernt schwarze Pixel aus Umgebung
|
| 1327 |
-
print("🔧 Verbessere Umgebungsmaske...")
|
| 1328 |
-
kernel_close = np.ones((5, 5), np.uint8)
|
| 1329 |
-
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_CLOSE, kernel_close)
|
| 1330 |
-
print(" ✅ MORPH_CLOSE für zusammenhängende Umgebung")
|
| 1331 |
-
|
| 1332 |
-
# DEBUG nach MORPH_CLOSE
|
| 1333 |
-
print(f" Nach MORPH_CLOSE - Weiße Pixel: {np.sum(mask_array > 127)}")
|
| 1334 |
-
|
| 1335 |
-
# Weiche Ränder für bessere Integration der Person
|
| 1336 |
-
print("🌈 Erstelle weiche Übergänge...")
|
| 1337 |
-
mask_array = cv2.GaussianBlur(mask_array, (9, 9), 2.0) #2.0 bestimmt wie stark die Unschärfe ist
|
| 1338 |
-
print(" ✅ Gaussian Blur für weiche Übergänge")
|
| 1339 |
-
|
| 1340 |
-
# DEBUG nach Gaussian Blur
|
| 1341 |
-
print(f" Nach Gaussian Blur - Min/Max: {mask_array.min()}/{mask_array.max()}")
|
| 1342 |
-
print(f" Nach Gaussian Blur - dtype: {mask_array.dtype}")
|
| 1343 |
-
|
| 1344 |
-
# Gamma-Korrektur für präzisere Ränder
|
| 1345 |
-
print("🎛️ Wende Gamma-Korrektur an...")
|
| 1346 |
-
mask_array = mask_array.astype(np.float32) / 255.0
|
| 1347 |
-
print(f" Konvertiert zu Float32: Min={mask_array.min():.3f}, Max={mask_array.max():.3f}")
|
| 1348 |
-
|
| 1349 |
-
mask_array = np.clip(mask_array, 0.0, 1.0) #begrenzt alle Werte auf 0 und 1
|
| 1350 |
-
mask_array = mask_array ** 0.85 # Gamma-Korrektur Werte > 0.5 werden abgedunkelt, <0.5 aufgehellt-erzeugt natürliche Maskenübergänge
|
| 1351 |
-
print(f" Nach Gamma 0.85: Min={mask_array.min():.3f}, Max={mask_array.max():.3f}")
|
| 1352 |
-
|
| 1353 |
-
mask_array = (mask_array * 255).astype(np.uint8)
|
| 1354 |
-
print(" ✅ Gamma-Korrektur (0.85) gegen milchige Ränder")
|
| 1355 |
-
|
| 1356 |
-
# FINALE QUALITÄTSKONTROLLE
|
| 1357 |
-
print("-" * 60)
|
| 1358 |
-
print("📊 FINALE MASKEN-STATISTIK (ENVIRONMENT_CHANGE)")
|
| 1359 |
-
|
| 1360 |
-
white_pixels = np.sum(mask_array > 127)
|
| 1361 |
-
black_pixels = np.sum(mask_array <= 127)
|
| 1362 |
-
total_pixels = mask_array.size
|
| 1363 |
-
|
| 1364 |
-
white_ratio = white_pixels / total_pixels * 100
|
| 1365 |
-
black_ratio = black_pixels / total_pixels * 100
|
| 1366 |
-
|
| 1367 |
-
print(f" Weiße Pixel (HINTERGRUND - Veränderung): {white_pixels:,} ({white_ratio:.1f}%)")
|
| 1368 |
-
print(f" Schwarze Pixel (PERSON - Erhaltung): {black_pixels:,} ({black_ratio:.1f}%)")
|
| 1369 |
-
print(f" Gesamtpixel: {total_pixels:,}")
|
| 1370 |
-
|
| 1371 |
-
# Warnungen basierend auf Verhältnis
|
| 1372 |
-
if white_ratio < 30:
|
| 1373 |
-
print(f" ⚠️ WARNUNG: Sehr wenig Hintergrund ({white_ratio:.1f}%)")
|
| 1374 |
-
print(f" ℹ️ Das könnte bedeuten, dass die Person zu groß segmentiert wurde")
|
| 1375 |
-
elif white_ratio > 90:
|
| 1376 |
-
print(f" ⚠️ WARNUNG: Sehr viel Hintergrund ({white_ratio:.1f}%)")
|
| 1377 |
-
print(f" ℹ️ Das könnte bedeuten, dass die Person zu klein segmentiert wurde")
|
| 1378 |
-
elif 50 <= white_ratio <= 80:
|
| 1379 |
-
print(f" ✅ OPTIMALES Verhältnis ({white_ratio:.1f}%)")
|
| 1380 |
-
else:
|
| 1381 |
-
print(f" ℹ️ Normales Verhältnis ({white_ratio:.1f}%)")
|
| 1382 |
-
|
| 1383 |
-
# Zurück zu PIL Image
|
| 1384 |
-
mask = Image.fromarray(mask_array).convert("L")
|
| 1385 |
-
raw_mask = Image.fromarray(raw_mask_array).convert("L")
|
| 1386 |
-
|
| 1387 |
-
print("#" * 80)
|
| 1388 |
-
print(f"✅ SAM 2 SEGMENTIERUNG ABGESCHLOSSEN")
|
| 1389 |
-
print(f"📐 Finale Maskengröße: {mask.size}")
|
| 1390 |
-
print(f"🎛️ Verwendeter Modus: {mode}")
|
| 1391 |
-
print("#" * 80)
|
| 1392 |
-
|
| 1393 |
-
return mask, raw_mask # in mask steht die invertierte nachbearbeitete Maske, in raw_mask die Rohmaske
|
| 1394 |
-
|
| 1395 |
-
# ============================================================
|
| 1396 |
-
# BLOCK 2: FOCUS_CHANGE
|
| 1397 |
-
# ============================================================
|
| 1398 |
-
elif mode == "focus_change":
|
| 1399 |
-
print("-" * 60)
|
| 1400 |
-
print("🎯 MODUS: FOCUS_CHANGE (OPTIMIERT)")
|
| 1401 |
-
print("-" * 60)
|
| 1402 |
-
|
| 1403 |
-
# Konvertierung O-Bild in NumPy-Array für SAM
|
| 1404 |
-
image_np = np.array(image.convert("RGB"))
|
| 1405 |
-
|
| 1406 |
-
# Packt die BBox-Koordinaten in eine 3D-Liste
|
| 1407 |
-
input_boxes = [[[x1, y1, x2, y2]]]
|
| 1408 |
-
|
| 1409 |
-
# Nur Mittelpunkt als positiver Prompt
|
| 1410 |
-
center_x = (x1 + x2) // 2
|
| 1411 |
-
center_y = (y1 + y2) // 2
|
| 1412 |
-
input_points = [[[[center_x, center_y]]]] # NUR EIN PUNKT in 4D-Liste
|
| 1413 |
-
input_labels = [[[1]]] # Markiert Punkt als Positiver Prompt also der Bereich muß segmentiert werden
|
| 1414 |
-
|
| 1415 |
-
print(f" 🎯 SAM-Prompt: BBox [{x1},{y1},{x2},{y2}]")
|
| 1416 |
-
print(f" 👁️ Punkt: Nur Mitte ({center_x},{center_y})")
|
| 1417 |
-
|
| 1418 |
-
# SAM Inputs vorbereiten
|
| 1419 |
-
inputs = self.sam_processor(
|
| 1420 |
-
image_np,
|
| 1421 |
-
input_boxes=input_boxes,
|
| 1422 |
-
input_points=input_points,
|
| 1423 |
-
input_labels=input_labels,
|
| 1424 |
-
return_tensors="pt"
|
| 1425 |
-
).to(self.device)
|
| 1426 |
-
|
| 1427 |
-
# SAM Vorhersage (alle 3 Masken)
|
| 1428 |
-
print("🧠 SAM 2 INFERENZ (3 Masken-Varianten)")
|
| 1429 |
-
with torch.no_grad():
|
| 1430 |
-
print(" Führe Vorhersage durch...")
|
| 1431 |
-
outputs = self.sam_model(**inputs)
|
| 1432 |
-
print(f"✅ Vorhersage abgeschlossen")
|
| 1433 |
-
print(f" Anzahl der Vorhersagemasken: {outputs.pred_masks.shape[2]}")
|
| 1434 |
-
|
| 1435 |
-
num_masks = outputs.pred_masks.shape[2]
|
| 1436 |
-
|
| 1437 |
-
|
| 1438 |
-
# Sammlung aller Masken in all_masks
|
| 1439 |
-
all_masks = []
|
| 1440 |
-
|
| 1441 |
-
for i in range(num_masks):
|
| 1442 |
-
single_mask = outputs.pred_masks[:, :, i, :, :]
|
| 1443 |
-
resized_mask = F.interpolate(
|
| 1444 |
-
single_mask,
|
| 1445 |
-
size=(image.height, image.width),
|
| 1446 |
-
mode='bilinear',
|
| 1447 |
-
align_corners=False
|
| 1448 |
-
).squeeze()
|
| 1449 |
-
|
| 1450 |
-
mask_np = resized_mask.sigmoid().cpu().numpy()
|
| 1451 |
-
all_masks.append(mask_np) #fügt die aktuelle Maske der Liste all_masks hinzu
|
| 1452 |
-
|
| 1453 |
-
|
| 1454 |
-
# BBox-Information für Heuristik
|
| 1455 |
-
bbox_center = ((x1 + x2) // 2, (y1 + y2) // 2)
|
| 1456 |
-
bbox_area = (x2 - x1) * (y2 - y1)
|
| 1457 |
-
|
| 1458 |
-
print("🤔 HEURISTIK: Beste Maske auswählen")
|
| 1459 |
-
best_mask_idx = 0
|
| 1460 |
-
best_score = -1
|
| 1461 |
-
|
| 1462 |
-
# Alle 3 Masken analysieren
|
| 1463 |
-
for i in range(num_masks):
|
| 1464 |
-
# Maske in Original-Bildgröße -vorher interpolate- analysieren
|
| 1465 |
-
|
| 1466 |
-
mask_np_temp = all_masks[i]
|
| 1467 |
-
|
| 1468 |
-
# Adaptive Vor-Filterung (prüft ob Maske überhaupt gültig ist)
|
| 1469 |
-
mask_max = mask_np_temp.max()
|
| 1470 |
-
if mask_max < 0.3:
|
| 1471 |
-
continue # Maske überspringen
|
| 1472 |
-
|
| 1473 |
-
adaptive_threshold = max(0.3, mask_max * 0.7)
|
| 1474 |
-
mask_binary = (mask_np_temp > adaptive_threshold).astype(np.uint8)
|
| 1475 |
-
|
| 1476 |
-
# wenn nur schwarze Pixel (keine Segmentierung) nimm die nächste Maske
|
| 1477 |
-
if np.sum(mask_binary) == 0:
|
| 1478 |
-
continue
|
| 1479 |
-
|
| 1480 |
-
# Heuristik-Berechnung
|
| 1481 |
-
mask_area_pixels = np.sum(mask_binary) # zählt alle weißen Pixel in der Binärmaske
|
| 1482 |
-
|
| 1483 |
-
# Berechnet wie gut die SAM-Maske mit der ursprünglichen BBox überlappt
|
| 1484 |
-
bbox_mask = np.zeros((image.height, image.width), dtype=np.uint8)
|
| 1485 |
-
bbox_mask[y1:y2, x1:x2] = 1
|
| 1486 |
-
overlap = np.sum(mask_binary & bbox_mask)
|
| 1487 |
-
bbox_overlap_ratio = overlap / np.sum(bbox_mask) if np.sum(bbox_mask) > 0 else 0
|
| 1488 |
-
|
| 1489 |
-
# Schwerpunkt
|
| 1490 |
-
y_coords, x_coords = np.where(mask_binary > 0)
|
| 1491 |
-
if len(y_coords) > 0:
|
| 1492 |
-
centroid_y = np.mean(y_coords)
|
| 1493 |
-
centroid_x = np.mean(x_coords)
|
| 1494 |
-
centroid_distance = np.sqrt((centroid_x - bbox_center[0])**2 +
|
| 1495 |
-
(centroid_y - bbox_center[1])**2)
|
| 1496 |
-
normalized_distance = centroid_distance / max(image.width, image.height)
|
| 1497 |
-
else:
|
| 1498 |
-
normalized_distance = 1.0
|
| 1499 |
-
|
| 1500 |
-
# Flächen-Ratio
|
| 1501 |
-
area_ratio = mask_area_pixels / bbox_area
|
| 1502 |
-
area_score = 1.0 - min(abs(area_ratio - 1.0), 1.0)
|
| 1503 |
-
|
| 1504 |
-
# FOCUS_CHANGE spezifischer Score
|
| 1505 |
-
score = (
|
| 1506 |
-
bbox_overlap_ratio * 0.4 + # 40% BBox-Überlappung
|
| 1507 |
-
(1.0 - normalized_distance) * 0.25 + # 25% Zentrumsnähe
|
| 1508 |
-
area_score * 0.25 + # 25% Flächenpassung
|
| 1509 |
-
mask_max * 0.1 # 10% SAM-Konfidenz
|
| 1510 |
-
)
|
| 1511 |
-
|
| 1512 |
-
print(f" Maske {i+1}: Score={score:.3f}, "
|
| 1513 |
-
f"Überlappung={bbox_overlap_ratio:.3f}, "
|
| 1514 |
-
f"Fläche={mask_area_pixels:,}px")
|
| 1515 |
-
|
| 1516 |
-
if score > best_score:
|
| 1517 |
-
best_score = score
|
| 1518 |
-
best_mask_idx = i
|
| 1519 |
-
|
| 1520 |
-
print(f"✅ Beste Maske: Nr. {best_mask_idx+1} mit Score {best_score:.3f}")
|
| 1521 |
-
|
| 1522 |
-
# NUR DIE BESTE MASKE AUF 512x512 HERUNTERSKALIEREN -Für Inpaint
|
| 1523 |
-
best_mask_256 = outputs.pred_masks[:, :, best_mask_idx, :, :]
|
| 1524 |
-
resized_mask = F.interpolate(
|
| 1525 |
-
best_mask_256,
|
| 1526 |
-
size=(512, 512), # DIREKT AUF CONTROLNET-ZIELGRÖßE
|
| 1527 |
-
mode='bilinear',
|
| 1528 |
-
align_corners=False
|
| 1529 |
-
).squeeze()
|
| 1530 |
-
|
| 1531 |
-
mask_np = resized_mask.cpu().numpy()
|
| 1532 |
-
print(f" 🔄 Beste Maske skaliert auf 512×512 für ControlNet")
|
| 1533 |
-
|
| 1534 |
-
# ============================================================
|
| 1535 |
-
# DYNAMISCHER THRESHOLD
|
| 1536 |
-
# SAM gibt nur Wahrscheinlichkeiten aus!
|
| 1537 |
-
# Nachdem das Modell eine Maske für eine Person vorhersagt (wo jeder Pixel einen Wert zwischen 0 und 1 hat,
|
| 1538 |
-
# wie "wahrscheinlich gehört dieser Pixel zur Person"), wird diese Maske binarisiert (0 oder 1), indem alle
|
| 1539 |
-
# Pixel unter 0.05 auf 0 gesetzt werden, alle darüber auf 1.
|
| 1540 |
-
# ============================================================
|
| 1541 |
-
mask_max = mask_np.max() #höchster Wahrscheinlichkeitswert in SAM-Maske
|
| 1542 |
-
if best_score < 0.7: # Schlechte Maskenqualität
|
| 1543 |
-
dynamic_threshold = 0.05 # SEHR NIEDRIG für maximale Abdeckung
|
| 1544 |
-
print(f" ⚠️ Masken-Score niedrig ({best_score:.3f}). "
|
| 1545 |
-
f"Threshold=0.05 für maximale Abdeckung")
|
| 1546 |
-
else:
|
| 1547 |
-
dynamic_threshold = max(0.15, mask_max * 0.3) # Moderater Threshold
|
| 1548 |
-
print(f" ✅ Gute Maske. Threshold={dynamic_threshold:.3f}")
|
| 1549 |
-
|
| 1550 |
-
# Binärmaske erstellen (512x512)
|
| 1551 |
-
mask_array = (mask_np > dynamic_threshold).astype(np.uint8) * 255
|
| 1552 |
-
|
| 1553 |
-
# Fallback bei leerer Maske, der höchste Wert ist 0 also schwarz
|
| 1554 |
-
if mask_array.max() == 0:
|
| 1555 |
-
print(" ⚠️ Maske leer, erstelle rechteckige Fallback-Maske")
|
| 1556 |
-
mask_array = np.zeros((512, 512), dtype=np.uint8)
|
| 1557 |
-
# BBox auf 512x512 skalieren für Fallback
|
| 1558 |
-
scale_x = 512 / image.width
|
| 1559 |
-
scale_y = 512 / image.height
|
| 1560 |
-
fb_x1 = int(x1 * scale_x)
|
| 1561 |
-
fb_y1 = int(y1 * scale_y)
|
| 1562 |
-
fb_x2 = int(x2 * scale_x)
|
| 1563 |
-
fb_y2 = int(y2 * scale_y)
|
| 1564 |
-
cv2.rectangle(mask_array, (fb_x1, fb_y1), (fb_x2, fb_y2), 255, -1) #weiße Rechteckbox
|
| 1565 |
-
|
| 1566 |
-
# Damit wird die Rohmaske für die UI-Anzeige gespeichert
|
| 1567 |
-
raw_mask_array = mask_array.copy()
|
| 1568 |
-
|
| 1569 |
-
# FOCUS_CHANGE POSTPROCESSING (angepasst für 512x512)
|
| 1570 |
-
print("🔧 FOCUS_CHANGE POSTPROCESSING (auf 512×512)")
|
| 1571 |
-
print(f" mask_array - Min/Max: {mask_array.min()}/{mask_array.max()}")
|
| 1572 |
-
print(f" mask_array - Weiße Pixel: {np.sum(mask_array > 0)}")
|
| 1573 |
-
print(f" mask_array - Shape: {mask_array.shape}")
|
| 1574 |
-
print(f" mask_array - dtype: {mask_array.dtype}")
|
| 1575 |
-
|
| 1576 |
-
# 1. Findet und behält nur die größte zusammenhängende Komponente der Maske
|
| 1577 |
-
labeled_array, num_features = ndimage.label(mask_array)
|
| 1578 |
-
if num_features > 1:
|
| 1579 |
-
sizes = ndimage.sum(mask_array, labeled_array, range(1, num_features + 1))
|
| 1580 |
-
largest_component = np.argmax(sizes) + 1
|
| 1581 |
-
mask_array = np.where(labeled_array == largest_component, mask_array, 0)
|
| 1582 |
-
print(f" ✅ Größte Komponente behalten ({num_features}→1)")
|
| 1583 |
-
|
| 1584 |
-
# 2. Morphologische Operationen
|
| 1585 |
-
kernel_close = np.ones((5, 5), np.uint8)
|
| 1586 |
-
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_CLOSE, kernel_close, iterations=2)
|
| 1587 |
-
|
| 1588 |
-
kernel_dilate = np.ones((15, 15), np.uint8)
|
| 1589 |
-
mask_array = cv2.dilate(mask_array, kernel_dilate, iterations=1)
|
| 1590 |
-
|
| 1591 |
-
# 3. Weiche Übergänge mittlerer Blur für natürliche Übergänge
|
| 1592 |
-
mask_array = cv2.GaussianBlur(mask_array, (9, 9), 2.0)
|
| 1593 |
-
|
| 1594 |
-
# 4. Gamma-Korrektur
|
| 1595 |
-
mask_array_float = mask_array.astype(np.float32) / 255.0
|
| 1596 |
-
mask_array_float = np.clip(mask_array_float, 0.0, 1.0)
|
| 1597 |
-
mask_array_float = mask_array_float ** 0.85
|
| 1598 |
-
mask_array = (mask_array_float * 255).astype(np.uint8)
|
| 1599 |
-
|
| 1600 |
-
# 5. Auf Originalgröße für Rückgabe (falls benötigt)
|
| 1601 |
-
mask_512 = Image.fromarray(mask_array).convert("L")
|
| 1602 |
-
raw_mask = Image.fromarray(raw_mask_array).convert("L")
|
| 1603 |
-
|
| 1604 |
-
# Finale Maske für ControlNet ist 512x512
|
| 1605 |
-
mask = mask_512
|
| 1606 |
-
|
| 1607 |
-
print(f"✅ FOCUS_CHANGE Maske erstellt: {mask.size}")
|
| 1608 |
-
return mask, raw_mask
|
| 1609 |
-
|
| 1610 |
-
# ============================================================
|
| 1611 |
-
# BLOCK 3: FACE_ONLY_CHANGE
|
| 1612 |
-
# ============================================================
|
| 1613 |
-
elif mode == "face_only_change":
|
| 1614 |
-
print("-" * 60)
|
| 1615 |
-
print("👤 SPEZIALMODUS: NUR GESICHT - ROBUSTER WORKFLOW")
|
| 1616 |
-
print("-" * 60)
|
| 1617 |
-
|
| 1618 |
-
# ============================================================
|
| 1619 |
-
# Originalbild sichern
|
| 1620 |
-
# Andere Vorgehensweise da SAM bei kleinen Köpfen sonst keine Chance hat!
|
| 1621 |
-
# Bild ausschneiden auf eine vergrößerte quadratische Box - Crops
|
| 1622 |
-
# ============================================================
|
| 1623 |
-
original_image = image
|
| 1624 |
-
print(f"💾 Originalbild gesichert: {original_image.size}")
|
| 1625 |
-
|
| 1626 |
-
# ============================================================
|
| 1627 |
-
# Crop = BBox × 2.5 (ERHÖHT für mehr Kontext)
|
| 1628 |
-
# ============================================================
|
| 1629 |
-
print("✂️ SCHRITT 2: ERSTELLE QUADRATISCHEN AUSSCHNITT (BBox × 2.5)")
|
| 1630 |
-
|
| 1631 |
-
# BBox-Zentrum berechnen
|
| 1632 |
-
bbox_center_x = (x1 + x2) // 2
|
| 1633 |
-
bbox_center_y = (y1 + y2) // 2
|
| 1634 |
-
print(f" 📍 BBox-Zentrum: ({bbox_center_x}, {bbox_center_y})")
|
| 1635 |
-
|
| 1636 |
-
# Größte Dimension der BBox finden
|
| 1637 |
-
bbox_width = x2 - x1
|
| 1638 |
-
bbox_height = y2 - y1
|
| 1639 |
-
bbox_max_dim = max(bbox_width, bbox_height)
|
| 1640 |
-
print(f" 📏 BBox Dimensionen: {bbox_width} × {bbox_height} px")
|
| 1641 |
-
print(f" 📐 Maximale BBox-Dimension: {bbox_max_dim} px")
|
| 1642 |
-
|
| 1643 |
-
# Crop-Größe berechnen (BBox × 2.5)
|
| 1644 |
-
crop_size = int(bbox_max_dim * 2.5)
|
| 1645 |
-
print(f" 🎯 Ziel-Crop-Größe: {crop_size} × {crop_size} px (BBox × 2.5)")
|
| 1646 |
-
|
| 1647 |
-
# Crop-Koordinaten berechnen (zentriert um BBox)
|
| 1648 |
-
crop_x1 = bbox_center_x - crop_size // 2
|
| 1649 |
-
crop_y1 = bbox_center_y - crop_size // 2
|
| 1650 |
-
crop_x2 = crop_x1 + crop_size
|
| 1651 |
-
crop_y2 = crop_y1 + crop_size
|
| 1652 |
-
|
| 1653 |
-
# Sicherstellen, dass Crop innerhalb der Bildgrenzen bleibt
|
| 1654 |
-
crop_x1 = max(0, crop_x1)
|
| 1655 |
-
crop_y1 = max(0, crop_y1)
|
| 1656 |
-
crop_x2 = min(original_image.width, crop_x2)
|
| 1657 |
-
crop_y2 = min(original_image.height, crop_y2)
|
| 1658 |
-
|
| 1659 |
-
# Falls Crop zu klein ist, anpassen
|
| 1660 |
-
actual_crop_width = crop_x2 - crop_x1
|
| 1661 |
-
actual_crop_height = crop_y2 - crop_y1
|
| 1662 |
-
|
| 1663 |
-
if actual_crop_width < crop_size or actual_crop_height < crop_size:
|
| 1664 |
-
# An Kanten anpassen
|
| 1665 |
-
if crop_x1 == 0:
|
| 1666 |
-
crop_x2 = min(original_image.width, crop_size)
|
| 1667 |
-
elif crop_x2 == original_image.width:
|
| 1668 |
-
crop_x1 = max(0, original_image.width - crop_size)
|
| 1669 |
-
|
| 1670 |
-
if crop_y1 == 0:
|
| 1671 |
-
crop_y2 = min(original_image.height, crop_size)
|
| 1672 |
-
elif crop_y2 == original_image.height:
|
| 1673 |
-
crop_y1 = max(0, original_image.height - crop_size)
|
| 1674 |
-
|
| 1675 |
-
print(f" 🔲 Crop-Bereich: [{crop_x1}, {crop_y1}, {crop_x2}, {crop_y2}]")
|
| 1676 |
-
print(f" 📏 Tatsächliche Crop-Größe: {crop_x2-crop_x1} × {crop_y2-crop_y1} px")
|
| 1677 |
-
|
| 1678 |
-
# Bild ausschneiden- 2,5 mal so groß und quadratisch wie BBox
|
| 1679 |
-
cropped_image = original_image.crop((crop_x1, crop_y1, crop_x2, crop_y2))
|
| 1680 |
-
print(f" ✅ Quadratischer Ausschnitt erstellt: {cropped_image.size}")
|
| 1681 |
-
|
| 1682 |
-
# ============================================================
|
| 1683 |
-
# BBox-Koordinaten transformieren
|
| 1684 |
-
# ============================================================
|
| 1685 |
-
print("📐 SCHRITT 3: BBox-KOORDINATEN TRANSFORMIEREN")
|
| 1686 |
-
rel_x1 = x1 - crop_x1
|
| 1687 |
-
rel_y1 = y1 - crop_y1
|
| 1688 |
-
rel_x2 = x2 - crop_x1
|
| 1689 |
-
rel_y2 = y2 - crop_y1
|
| 1690 |
-
|
| 1691 |
-
# Sicherstellen, dass BBox innerhalb des Crops liegt
|
| 1692 |
-
rel_x1 = max(0, rel_x1)
|
| 1693 |
-
rel_y1 = max(0, rel_y1)
|
| 1694 |
-
rel_x2 = min(cropped_image.width, rel_x2)
|
| 1695 |
-
rel_y2 = min(cropped_image.height, rel_y2)
|
| 1696 |
-
|
| 1697 |
-
print(f" 🎯 Relative BBox im Crop: [{rel_x1}, {rel_y1}, {rel_x2}, {rel_y2}]")
|
| 1698 |
-
print(f" 📏 Relative BBox Größe: {rel_x2-rel_x1} × {rel_y2-rel_y1} px")
|
| 1699 |
-
|
| 1700 |
-
# ============================================================
|
| 1701 |
-
# INTENSIVE BILDAUFBEREITUNG FÜR GESICHTSERKENNUNG
|
| 1702 |
-
# ============================================================
|
| 1703 |
-
print("🔍 SCHRITT 4: ERWEITERTE BILDAUFBEREITUNG FÜR GESICHTSERKENNUNG")
|
| 1704 |
-
|
| 1705 |
-
# 1. Kontrast verstärken
|
| 1706 |
-
contrast_enhancer = ImageEnhance.Contrast(cropped_image)
|
| 1707 |
-
enhanced_image = contrast_enhancer.enhance(1.8) # 80% mehr Kontrast
|
| 1708 |
-
|
| 1709 |
-
# 2. Schärfe erhöhen für bessere Kantenerkennung
|
| 1710 |
-
sharpness_enhancer = ImageEnhance.Sharpness(enhanced_image)
|
| 1711 |
-
enhanced_image = sharpness_enhancer.enhance(2.0) # 100% mehr Schärfe
|
| 1712 |
-
|
| 1713 |
-
# 3. Helligkeit anpassen
|
| 1714 |
-
brightness_enhancer = ImageEnhance.Brightness(enhanced_image)
|
| 1715 |
-
enhanced_image = brightness_enhancer.enhance(1.1) # 10% heller
|
| 1716 |
-
|
| 1717 |
-
print(f" ✅ Erweiterte Bildaufbereitung abgeschlossen")
|
| 1718 |
-
print(f" • Kontrast: +80%")
|
| 1719 |
-
print(f" • Schärfe: +100%")
|
| 1720 |
-
print(f" • Helligkeit: +10%")
|
| 1721 |
-
|
| 1722 |
-
# Für SAM: Verwende aufbereiteten Ausschnitt
|
| 1723 |
-
image = enhanced_image
|
| 1724 |
-
x1, y1, x2, y2 = rel_x1, rel_y1, rel_x2, rel_y2
|
| 1725 |
-
|
| 1726 |
-
print(" 🔄 SAM wird auf aufbereitetem Ausschnitt ausgeführt")
|
| 1727 |
-
print(f" 📊 SAM-Eingabegröße: {image.size}")
|
| 1728 |
-
|
| 1729 |
-
# ============================================================
|
| 1730 |
-
# SAM-AUSFÜHRUNG
|
| 1731 |
-
# ============================================================
|
| 1732 |
-
print("-" * 60)
|
| 1733 |
-
print(f"📦 BOUNDING BOX DETAILS FÜR SAM:")
|
| 1734 |
-
print(f" Bild-Größe für SAM: {image.size}")
|
| 1735 |
-
print(f" BBox Koordinaten: [{x1}, {y1}, {x2}, {y2}]")
|
| 1736 |
-
print(f" BBox Dimensionen: {x2-x1}px × {y2-y1}px")
|
| 1737 |
-
|
| 1738 |
-
# Vorbereitung für SAM2 - WICHTIG: NUR EINE BBOX
|
| 1739 |
-
print("-" * 60)
|
| 1740 |
-
print("🖼️ BILDAUFBEREITUNG FÜR SAM 2")
|
| 1741 |
-
# SAM erwartet NumPy-Array, kein PIL
|
| 1742 |
-
image_np = np.array(image.convert("RGB"))
|
| 1743 |
-
|
| 1744 |
-
# Immer nur eine BBox verwenden (SAM 2 erwartet genau 1)
|
| 1745 |
-
input_boxes = [[[x1, y1, x2, y2]]]
|
| 1746 |
-
|
| 1747 |
-
# Punkt in der BBox-Mitte (zur Ünterstützung von SAM damit BBox nicht zu dicht um Kopf gezogen werden muß!)
|
| 1748 |
-
center_x = (x1 + x2) // 2
|
| 1749 |
-
center_y = (y1 + y2) // 2
|
| 1750 |
-
|
| 1751 |
-
# Punkt im Gesicht (30% höher vom Mittelpunkt)(auch für größere BBox)
|
| 1752 |
-
bbox_height = y2 - y1
|
| 1753 |
-
face_offset = int(bbox_height * 0.3)
|
| 1754 |
-
face_x = center_x
|
| 1755 |
-
face_y = center_y - face_offset
|
| 1756 |
-
face_y = max(y1 + 10, min(face_y, y2 - 10)) # In BBox halten
|
| 1757 |
-
|
| 1758 |
-
# BEIDE Punkte kombinieren
|
| 1759 |
-
input_points = [[[[center_x, center_y], [face_x, face_y]]]] # ZWEI Punkte
|
| 1760 |
-
input_labels = [[[1, 1]]] # Beide sind positive Prompts
|
| 1761 |
-
|
| 1762 |
-
print(f" 🎯 SAM-Prompt: BBox [{x1},{y1},{x2},{y2}]")
|
| 1763 |
-
print(f" 👁️ Punkte: Mitte ({center_x},{center_y}), Gesicht ({face_x},{face_y})")
|
| 1764 |
-
|
| 1765 |
-
# Aufruf des SAM-Prozessors mit den Variablen. Der Processor verpackt diese Rohdaten
|
| 1766 |
-
# in die für das SAM-Modell erforderlichen Tensoren und speichert sie in inputs.
|
| 1767 |
-
inputs = self.sam_processor(
|
| 1768 |
-
image_np,
|
| 1769 |
-
input_boxes=input_boxes,
|
| 1770 |
-
input_points=input_points, # ZWEI Punkte
|
| 1771 |
-
input_labels=input_labels, # Zwei Labels
|
| 1772 |
-
return_tensors="pt"
|
| 1773 |
-
).to(self.device) # Ohne .to(self.device) werden die Tensoren standardmäßig im CPU-RAM erzeugt und gespeichert! Da GPU-Fehler!
|
| 1774 |
-
|
| 1775 |
-
print(f"✅ Processor-Ausgabe: Dictionary mit {len(inputs)} Schlüsseln: {list(inputs.keys())}")
|
| 1776 |
-
print(f" - 'pixel_values' Shape: {inputs['pixel_values'].shape}")
|
| 1777 |
-
print(f" - 'input_boxes' Shape: {inputs['input_boxes'].shape}")
|
| 1778 |
-
if 'input_points' in inputs:
|
| 1779 |
-
print(f" - 'input_points' Shape: {inputs['input_points'].shape}")
|
| 1780 |
-
|
| 1781 |
-
# 4. SAM2 Vorhersage
|
| 1782 |
-
print("-" * 60)
|
| 1783 |
-
print("🧠 SAM 2 INFERENZ (Vorhersage)")
|
| 1784 |
-
with torch.no_grad():
|
| 1785 |
-
print(" Führe Vorhersage durch...")
|
| 1786 |
-
outputs = self.sam_model(**inputs)
|
| 1787 |
-
print(f"✅ Vorhersage abgeschlossen")
|
| 1788 |
-
print(f" Anzahl der Vorhersagemasken: {outputs.pred_masks.shape[2]}")
|
| 1789 |
-
|
| 1790 |
-
# 5. Maske extrahieren
|
| 1791 |
-
print("📏 SCHRITT 6: MASKE EXTRAHIEREN")
|
| 1792 |
-
|
| 1793 |
-
num_masks = outputs.pred_masks.shape[2]
|
| 1794 |
-
print(f" SAM lieferte {num_masks} verschiedene Masken")
|
| 1795 |
-
|
| 1796 |
-
# Extrahiere alle Masken
|
| 1797 |
-
all_masks = []
|
| 1798 |
-
|
| 1799 |
-
for i in range(num_masks):
|
| 1800 |
-
single_mask = outputs.pred_masks[:, :, i, :, :]
|
| 1801 |
-
resized_mask = F.interpolate(
|
| 1802 |
-
single_mask,
|
| 1803 |
-
size=(image.height, image.width),
|
| 1804 |
-
mode='bilinear',
|
| 1805 |
-
align_corners=False
|
| 1806 |
-
).squeeze()
|
| 1807 |
-
|
| 1808 |
-
mask_np = resized_mask.sigmoid().cpu().numpy()
|
| 1809 |
-
all_masks.append(mask_np)
|
| 1810 |
-
|
| 1811 |
-
# Basis-Statistiken für jede Maske
|
| 1812 |
-
mask_binary = (mask_np > 0.5).astype(np.uint8)
|
| 1813 |
-
mask_area = np.sum(mask_binary)
|
| 1814 |
-
print(f" Maske {i+1}: Größe={mask_area:,} Pixel, Max-Konfidenz={mask_np.max():.3f}")
|
| 1815 |
-
|
| 1816 |
-
# ============================================================
|
| 1817 |
-
# HEURISTIK
|
| 1818 |
-
# ============================================================
|
| 1819 |
-
print("🤔 SCHRITT 6: MASKENAUSWAHL MIT MODUS-SPEZIFISCHER HEURISTIK")
|
| 1820 |
-
|
| 1821 |
-
bbox_center = ((x1 + x2) // 2, (y1 + y2) // 2)
|
| 1822 |
-
bbox_area = (x2 - x1) * (y2 - y1)
|
| 1823 |
-
print(f" Erwartetes BBox-Zentrum: {bbox_center}")
|
| 1824 |
-
print(f" Erwartete BBox-Fläche: {bbox_area:,} Pixel")
|
| 1825 |
-
|
| 1826 |
-
best_mask_idx = 0
|
| 1827 |
-
best_score = -1
|
| 1828 |
-
|
| 1829 |
-
for i, mask_np in enumerate(all_masks):
|
| 1830 |
-
mask_max = mask_np.max()
|
| 1831 |
-
|
| 1832 |
-
# Grundlegende Filterung
|
| 1833 |
-
if mask_max < 0.3:
|
| 1834 |
-
print(f" ❌ Maske {i+1}: Zu niedrige Konfidenz ({mask_max:.3f}), überspringe")
|
| 1835 |
-
continue
|
| 1836 |
-
|
| 1837 |
-
# Adaptiver Threshold
|
| 1838 |
-
adaptive_threshold = max(0.3, mask_max * 0.7)
|
| 1839 |
-
mask_binary = (mask_np > adaptive_threshold).astype(np.uint8)
|
| 1840 |
-
|
| 1841 |
-
if np.sum(mask_binary) == 0:
|
| 1842 |
-
print(f" ❌ Maske {i+1}: Keine Pixel nach Threshold {adaptive_threshold:.3f}")
|
| 1843 |
-
continue
|
| 1844 |
-
|
| 1845 |
-
mask_area_pixels = np.sum(mask_binary)
|
| 1846 |
-
|
| 1847 |
-
# ============================================================
|
| 1848 |
-
# SPEZIALHEURISTIK
|
| 1849 |
-
# ============================================================
|
| 1850 |
-
|
| 1851 |
-
print(f" 🔍 Analysiere Maske {i+1} mit GESICHTS-HEURISTIK")
|
| 1852 |
-
|
| 1853 |
-
# 1. FLÄCHENBASIERTE BEWERTUNG (40%)
|
| 1854 |
-
area_ratio = mask_area_pixels / bbox_area
|
| 1855 |
-
print(f" 📐 Flächen-Ratio: {area_ratio:.3f} ({mask_area_pixels:,} / {bbox_area:,} Pixel)")
|
| 1856 |
-
|
| 1857 |
-
# Optimale Kopfgröße: 80-120% der BBox
|
| 1858 |
-
if area_ratio < 0.6:
|
| 1859 |
-
print(f" ⚠️ Fläche zu klein für Kopf (<60% der BBox)")
|
| 1860 |
-
area_score = area_ratio * 0.5 # Stark bestrafen
|
| 1861 |
-
elif area_ratio > 1.5:
|
| 1862 |
-
print(f" ⚠️ Fläche zu groß für Kopf (>150% der BBox)")
|
| 1863 |
-
area_score = 2.0 - area_ratio # Linear bestrafen
|
| 1864 |
-
elif 0.8 <= area_ratio <= 1.2:
|
| 1865 |
-
area_score = 1.0 # Perfekte Größe
|
| 1866 |
-
print(f" ✅ Perfekte Kopfgröße (80-120% der BBox)")
|
| 1867 |
-
else:
|
| 1868 |
-
# Sanfte Abweichung
|
| 1869 |
-
area_score = 1.0 - abs(area_ratio - 1.0) * 0.5
|
| 1870 |
-
|
| 1871 |
-
# 2. KOMPAKTHEIT/SOLIDITÄT (30%)
|
| 1872 |
-
labeled_mask = measure.label(mask_binary)
|
| 1873 |
-
regions = measure.regionprops(labeled_mask)
|
| 1874 |
-
|
| 1875 |
-
if len(regions) == 0:
|
| 1876 |
-
compactness_score = 0.1
|
| 1877 |
-
print(f" ❌ Keine zusammenhängenden Regionen gefunden")
|
| 1878 |
-
else:
|
| 1879 |
-
# Größte Region finden (sollte der Kopf sein)
|
| 1880 |
-
largest_region = max(regions, key=lambda r: r.area)
|
| 1881 |
-
|
| 1882 |
-
# Solidität = Fläche / konvexe Hüllenfläche
|
| 1883 |
-
solidity = largest_region.solidity if hasattr(largest_region, 'solidity') else 0.7
|
| 1884 |
-
|
| 1885 |
-
# Exzentrizität (wie elliptisch) - Köpfe sind tendenziell elliptisch
|
| 1886 |
-
eccentricity = largest_region.eccentricity if hasattr(largest_region, 'eccentricity') else 0.5
|
| 1887 |
-
|
| 1888 |
-
# Perfekt runde Formen (Kreis) sind 0, Linie wäre 1
|
| 1889 |
-
# Köpfe haben typischerweise 0.5-0.8
|
| 1890 |
-
if 0.4 <= eccentricity <= 0.9:
|
| 1891 |
-
eccentricity_score = 1.0 - abs(eccentricity - 0.65) * 2
|
| 1892 |
-
else:
|
| 1893 |
-
eccentricity_score = 0.2
|
| 1894 |
-
|
| 1895 |
-
compactness_score = (solidity * 0.6 + eccentricity_score * 0.4)
|
| 1896 |
-
print(f" 🎯 Kompaktheits-Analyse:")
|
| 1897 |
-
print(f" • Solidität (Fläche/Konvex): {solidity:.3f}")
|
| 1898 |
-
print(f" • Exzentrizität (Form): {eccentricity:.3f}")
|
| 1899 |
-
print(f" • Kompaktheits-Score: {compactness_score:.3f}")
|
| 1900 |
-
|
| 1901 |
-
# 3. BBOX-ÜBERLAPPUNG (20%)
|
| 1902 |
-
bbox_mask = np.zeros((image.height, image.width), dtype=np.uint8)
|
| 1903 |
-
bbox_mask[y1:y2, x1:x2] = 1
|
| 1904 |
-
overlap = np.sum(mask_binary & bbox_mask)
|
| 1905 |
-
bbox_overlap_ratio = overlap / mask_area_pixels if mask_area_pixels > 0 else 0
|
| 1906 |
-
|
| 1907 |
-
# Für Kopf: Sollte großteils in BBox sein (mind. 70%)
|
| 1908 |
-
if bbox_overlap_ratio >= 0.7:
|
| 1909 |
-
bbox_score = 1.0
|
| 1910 |
-
print(f" ✅ Hohe BBox-Überlappung: {bbox_overlap_ratio:.3f} ({overlap:,} Pixel)")
|
| 1911 |
-
elif bbox_overlap_ratio >= 0.5:
|
| 1912 |
-
bbox_score = bbox_overlap_ratio * 1.2
|
| 1913 |
-
print(f" ⚠️ Mittlere BBox-Überlappung: {bbox_overlap_ratio:.3f}")
|
| 1914 |
-
else:
|
| 1915 |
-
bbox_score = bbox_overlap_ratio * 0.8
|
| 1916 |
-
print(f" ❌ Geringe BBox-Überlappung: {bbox_overlap_ratio:.3f}")
|
| 1917 |
-
|
| 1918 |
-
# SAM-KONFIDENZ (10%)
|
| 1919 |
-
confidence_score = mask_max
|
| 1920 |
-
|
| 1921 |
-
# GESAMTSCORE für Gesicht
|
| 1922 |
-
score = (
|
| 1923 |
-
area_score * 0.4 + # 40% Flächenpassung
|
| 1924 |
-
compactness_score * 0.3 + # 30% Kompaktheit
|
| 1925 |
-
bbox_score * 0.2 + # 20% BBox-Überlappung
|
| 1926 |
-
confidence_score * 0.1 # 10% Konfidenz
|
| 1927 |
-
)
|
| 1928 |
-
|
| 1929 |
-
print(f" 📊 GESICHTS-SCORES für Maske {i+1}:")
|
| 1930 |
-
print(f" • Flächen-Score: {area_score:.3f}")
|
| 1931 |
-
print(f" • Kompaktheits-Score: {compactness_score:.3f}")
|
| 1932 |
-
print(f" • BBox-Überlappungs-Score: {bbox_score:.3f}")
|
| 1933 |
-
print(f" • Konfidenz-Score: {confidence_score:.3f}")
|
| 1934 |
-
print(f" • GESAMTSCORE: {score:.3f}")
|
| 1935 |
-
|
| 1936 |
-
if score > best_score:
|
| 1937 |
-
best_score = score
|
| 1938 |
-
best_mask_idx = i
|
| 1939 |
-
print(f" 🏆 Neue beste Maske: Nr. {i+1} mit Score {score:.3f}")
|
| 1940 |
-
|
| 1941 |
-
print(f"✅ Beste Maske ausgewählt: Nr. {best_mask_idx+1} mit Score {best_score:.3f}")
|
| 1942 |
-
|
| 1943 |
-
# Beste Maske verwenden
|
| 1944 |
-
mask_np = all_masks[best_mask_idx]
|
| 1945 |
-
max_val = mask_np.max()
|
| 1946 |
-
print(f"🔍 Maximaler SAM-Konfidenzwert der besten Maske: {max_val:.3f}")
|
| 1947 |
-
|
| 1948 |
-
# ============================================================
|
| 1949 |
-
# THRESHOLD-BESTIMMUNG
|
| 1950 |
-
# ============================================================
|
| 1951 |
-
# Spezieller Threshold für Gesichter
|
| 1952 |
-
if max_val < 0.5:
|
| 1953 |
-
dynamic_threshold = 0.25
|
| 1954 |
-
print(f" ⚠️ SAM ist unsicher für Gesicht (max_val={max_val:.3f} < 0.5)")
|
| 1955 |
-
elif max_val < 0.8:
|
| 1956 |
-
dynamic_threshold = max_val * 0.65 # Mittlerer Threshold
|
| 1957 |
-
print(f" ℹ️ SAM ist mäßig sicher für Gesicht (max_val={max_val:.3f})")
|
| 1958 |
-
else:
|
| 1959 |
-
dynamic_threshold = max_val * 0.75 # Hoher Threshold
|
| 1960 |
-
print(f" ✅ SAM ist sicher für Gesicht (max_val={max_val:.3f} >= 0.8)")
|
| 1961 |
-
|
| 1962 |
-
print(f" 🎯 Gesichts-Threshold: {dynamic_threshold:.3f}")
|
| 1963 |
-
|
| 1964 |
-
# Binärmaske erstellen
|
| 1965 |
-
print("🐛 DEBUG THRESHOLD:")
|
| 1966 |
-
print(f" mask_np Min/Max: {mask_np.min():.3f}/{mask_np.max():.3f}")
|
| 1967 |
-
print(f" dynamic_threshold: {dynamic_threshold:.3f}")
|
| 1968 |
-
|
| 1969 |
-
mask_array = (mask_np > dynamic_threshold).astype(np.uint8) * 255
|
| 1970 |
-
|
| 1971 |
-
print(f"🚨 DEBUG BINÄRMASKE:")
|
| 1972 |
-
print(f" mask_array Min/Max: {mask_array.min()}/{mask_array.max()}")
|
| 1973 |
-
print(f" Weiße Pixel in mask_array: {np.sum(mask_array > 0)}")
|
| 1974 |
-
print(f" Anteil weiße Pixel: {np.sum(mask_array > 0) / mask_array.size:.1%}")
|
| 1975 |
-
|
| 1976 |
-
# Fallback wenn Maske leer
|
| 1977 |
-
if mask_array.max() == 0:
|
| 1978 |
-
print("⚠️ KRITISCH: Binärmaske ist leer! Erzwinge Testmaske (BBox).")
|
| 1979 |
-
print(f" 🚨 BBox für Fallback: x1={x1}, y1={y1}, x2={x2}, y2={y2}")
|
| 1980 |
-
|
| 1981 |
-
test_mask = np.zeros((image.height, image.width), dtype=np.uint8)
|
| 1982 |
-
cv2.rectangle(test_mask, (x1, y1), (x2, y2), 255, -1)
|
| 1983 |
-
|
| 1984 |
-
mask_array = test_mask
|
| 1985 |
-
print(f"🐛 DEBUG ERZWUNGENE MASKE: Weiße Pixel: {np.sum(mask_array > 0)}")
|
| 1986 |
-
|
| 1987 |
-
# Rohmaske speichern
|
| 1988 |
-
raw_mask_array = mask_array.copy()
|
| 1989 |
-
|
| 1990 |
-
# ============================================================
|
| 1991 |
-
# POSTPROCESSING
|
| 1992 |
-
# ============================================================
|
| 1993 |
-
|
| 1994 |
-
print("👤 GESICHTS-SPEZIFISCHES POSTPROCESSING")
|
| 1995 |
-
|
| 1996 |
-
# 1. Größte zusammenhängende Komponente finden
|
| 1997 |
-
labeled_array, num_features = ndimage.label(mask_array)
|
| 1998 |
-
|
| 1999 |
-
if num_features > 0:
|
| 2000 |
-
print(f" 🔍 Gefundene Komponenten: {num_features}")
|
| 2001 |
-
|
| 2002 |
-
sizes = ndimage.sum(mask_array, labeled_array, range(1, num_features + 1))
|
| 2003 |
-
largest_component_idx = np.argmax(sizes) + 1
|
| 2004 |
-
|
| 2005 |
-
print(f" 👑 Größte Komponente: Nr. {largest_component_idx} mit {sizes[largest_component_idx-1]:,} Pixel")
|
| 2006 |
-
|
| 2007 |
-
# NUR die größte Komponente behalten (der Kopf)
|
| 2008 |
-
mask_array = np.where(labeled_array == largest_component_idx, mask_array, 0)
|
| 2009 |
-
|
| 2010 |
-
# MORPHOLOGISCHE OPERATIONEN FÜR SAUBEREN KOPF
|
| 2011 |
-
print(" ⚙️ Morphologische Operationen für sauberen Kopf")
|
| 2012 |
-
|
| 2013 |
-
# Zuerst CLOSE, um kleine Löcher im Kopf zu füllen
|
| 2014 |
-
kernel_close = np.ones((7, 7), np.uint8)
|
| 2015 |
-
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_CLOSE, kernel_close, iterations=1)
|
| 2016 |
-
print(" • MORPH_CLOSE (7x7) - Löcher im Kopf füllen")
|
| 2017 |
-
|
| 2018 |
-
# Dann OPEN, um kleine Ausreißer zu entfernen
|
| 2019 |
-
kernel_open = np.ones((5, 5), np.uint8)
|
| 2020 |
-
mask_array = cv2.morphologyEx(mask_array, cv2.MORPH_OPEN, kernel_open, iterations=1)
|
| 2021 |
-
print(" • MORPH_OPEN (5x5) - Rauschen entfernen")
|
| 2022 |
-
|
| 2023 |
-
# ============================================================
|
| 2024 |
-
# KRITISCH: MASKE IMMER ZURÜCK AUF ORIGINALGRÖSSE (auch bei Fallback!)
|
| 2025 |
-
# ============================================================
|
| 2026 |
-
print("-" * 60)
|
| 2027 |
-
print("🔄 MASKE IMMER ZURÜCK AUF ORIGINALGRÖSSE TRANSFORMIEREN")
|
| 2028 |
-
|
| 2029 |
-
# WICHTIG: Immer die richtigen Crop-Koordinaten verwenden
|
| 2030 |
-
temp_mask = Image.fromarray(mask_array).convert("L")
|
| 2031 |
-
print(f" Maskengröße auf Ausschnitt: {temp_mask.size}")
|
| 2032 |
-
|
| 2033 |
-
# Maske auf ORIGINALBILDGRÖSSE bringen
|
| 2034 |
-
final_mask = Image.new("L", original_image.size, 0)
|
| 2035 |
-
print(f" Leere Maske in Originalgröße: {final_mask.size}")
|
| 2036 |
-
|
| 2037 |
-
# Immer die gespeicherten Crop-Koordinaten verwenden
|
| 2038 |
-
if 'crop_x1' in locals() and 'crop_y1' in locals():
|
| 2039 |
-
final_mask.paste(temp_mask, (crop_x1, crop_y1))
|
| 2040 |
-
print(f" Maskenposition im Original: ({crop_x1}, {crop_y1})")
|
| 2041 |
-
else:
|
| 2042 |
-
# Fallback: Zentrieren
|
| 2043 |
-
x_offset = (original_image.width - temp_mask.width) // 2
|
| 2044 |
-
y_offset = (original_image.height - temp_mask.height) // 2
|
| 2045 |
-
final_mask.paste(temp_mask, (x_offset, y_offset))
|
| 2046 |
-
print(f" ⚠️ Keine Crop-Koordinaten, zentriert: ({x_offset}, {y_offset})")
|
| 2047 |
-
|
| 2048 |
-
mask_array = np.array(final_mask)
|
| 2049 |
-
print(f" ✅ Maske zurück auf Originalgröße skaliert: {mask_array.shape}")
|
| 2050 |
-
|
| 2051 |
-
# Bild-Referenz zurücksetzen
|
| 2052 |
-
image = original_image
|
| 2053 |
-
print(f" 🔄 Bild-Referenz wieder auf Original gesetzt: {image.size}")
|
| 2054 |
-
|
| 2055 |
-
# ============================================================
|
| 2056 |
-
# ABSCHLIESSENDE STATISTIK
|
| 2057 |
-
# ============================================================
|
| 2058 |
-
|
| 2059 |
-
print("📊 FINALE MASKEN-STATISTIK")
|
| 2060 |
-
|
| 2061 |
-
# Weiße Pixel zählen
|
| 2062 |
-
white_pixels = np.sum(mask_array > 0)
|
| 2063 |
-
total_pixels = mask_array.size
|
| 2064 |
-
white_ratio = white_pixels / total_pixels * 100
|
| 2065 |
-
|
| 2066 |
-
# Original-BBox Fläche (vor Crop)
|
| 2067 |
-
original_bbox_width = original_bbox[2] - original_bbox[0]
|
| 2068 |
-
original_bbox_height = original_bbox[3] - original_bbox[1]
|
| 2069 |
-
original_face_area = original_bbox_width * original_bbox_height
|
| 2070 |
-
coverage_ratio = white_pixels / original_face_area if original_face_area > 0 else 0
|
| 2071 |
-
print(f" 👤 GESICHTSABDECKUNG: {coverage_ratio:.1%} der ursprünglichen BBox")
|
| 2072 |
-
|
| 2073 |
-
print(f" Weiße Pixel (Veränderungsbereich): {white_pixels:,} ({white_ratio:.1f}%)")
|
| 2074 |
-
print(f" Schwarze Pixel (Erhaltungsbereich): {total_pixels-white_pixels:,} ({100-white_ratio:.1f}%)")
|
| 2075 |
-
print(f" Gesamtpixel: {total_pixels:,}")
|
| 2076 |
-
|
| 2077 |
-
# Warnungen basierend auf Abdeckung
|
| 2078 |
-
if coverage_ratio < 0.7:
|
| 2079 |
-
print(f" ⚠️ WARNUNG: Geringe Gesichtsabdeckung ({coverage_ratio:.1%})")
|
| 2080 |
-
elif coverage_ratio > 1.3:
|
| 2081 |
-
print(f" ⚠️ WARNUNG: Sehr hohe Gesichtsabdeckung ({coverage_ratio:.1%})")
|
| 2082 |
-
elif 0.8 <= coverage_ratio <= 1.2:
|
| 2083 |
-
print(f" ✅ OPTIMALE Gesichtsabdeckung ({coverage_ratio:.1%})")
|
| 2084 |
-
|
| 2085 |
-
# Zurück zu PIL Image
|
| 2086 |
-
mask = Image.fromarray(mask_array).convert("L")
|
| 2087 |
-
raw_mask = Image.fromarray(raw_mask_array).convert("L")
|
| 2088 |
-
|
| 2089 |
-
print("#" * 80)
|
| 2090 |
-
print(f"✅ SAM 2 SEGMENTIERUNG ABGESCHLOSSEN")
|
| 2091 |
-
print(f"📐 Finale Maskengröße: {mask.size}")
|
| 2092 |
-
print(f"🎛️ Verwendeter Modus: {mode}")
|
| 2093 |
-
|
| 2094 |
-
print(f"👤 Crop={crop_size}×{crop_size}px, Heuristik-Score={best_score:.3f}")
|
| 2095 |
-
print(f"👤 Kopfabdeckung: {coverage_ratio:.1%} der BBox")
|
| 2096 |
-
|
| 2097 |
-
print(f"🔍 DEBUG FINALE MASKE:")
|
| 2098 |
-
print(f" mask_array Min/Max: {mask_array.min()}/{mask_array.max()}, Typ: {mask_array.dtype}")
|
| 2099 |
-
print(f" Weiße Pixel final: {np.sum(mask_array > 0)}")
|
| 2100 |
-
|
| 2101 |
-
print("#" * 80)
|
| 2102 |
-
|
| 2103 |
-
return mask, raw_mask
|
| 2104 |
-
|
| 2105 |
-
# ============================================================
|
| 2106 |
-
# UNBEKANNTER MODUS
|
| 2107 |
-
# ============================================================
|
| 2108 |
-
else:
|
| 2109 |
-
print(f"❌ Unbekannter Modus: {mode}")
|
| 2110 |
-
return self._create_rectangular_mask(image, bbox_coords, "focus_change")
|
| 2111 |
-
|
| 2112 |
except Exception as e:
|
| 2113 |
print("❌" * 40)
|
| 2114 |
print("❌ FEHLER IN SAM 2 SEGMENTIERUNG")
|
|
|
|
| 1082 |
print(f"❌ Unbekannter Modus: {mode}")
|
| 1083 |
return self._create_rectangular_mask(image, bbox_coords, "focus_change")
|
| 1084 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1085 |
except Exception as e:
|
| 1086 |
print("❌" * 40)
|
| 1087 |
print("❌ FEHLER IN SAM 2 SEGMENTIERUNG")
|