Update app.py
Browse files
app.py
CHANGED
|
@@ -197,41 +197,37 @@ def text_to_image(prompt, steps, guidance_scale, progress=gr.Progress()):
|
|
| 197 |
traceback.print_exc()
|
| 198 |
return None
|
| 199 |
|
| 200 |
-
def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale,
|
|
|
|
|
|
|
| 201 |
try:
|
| 202 |
if image is None:
|
| 203 |
return None
|
| 204 |
|
|
|
|
|
|
|
|
|
|
| 205 |
print(f"Img2Img Start → Strength: {strength}, Steps: {steps}, Guidance: {guidance_scale}")
|
| 206 |
print(f"Prompt: {prompt}")
|
| 207 |
print(f"Negativ-Prompt: {neg_prompt}")
|
| 208 |
print(f"Gesicht beibehalten: {face_preserve}")
|
| 209 |
-
start_time = time.time()
|
| 210 |
|
| 211 |
-
|
| 212 |
-
progress(0, desc="Generierung läuft - CPU benötigt bis zu 20 Minuten!")
|
| 213 |
|
| 214 |
-
#
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
controlnet_steps = min(25, actual_steps_from_strength)
|
| 221 |
-
|
| 222 |
-
print(f"🎯 ControlNet Step-Kalkulation: UI={steps}, Adj-Strength={adj_strength:.3f}, Echte Steps={actual_steps_from_strength}, ControlNet-Steps={controlnet_steps}")
|
| 223 |
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
# PERSON BEIBEHALTEN, UMGEBUNG ÄNDERN → NORMALE STRENGTH FÜR OPENPOSE
|
| 231 |
-
controlnet_strength = adj_strength * 0.5 # 50% für OpenPose
|
| 232 |
-
print(f"🎯 ControlNet OpenPose Modus: Strength = {controlnet_strength:.3f} (50% von {adj_strength:.3f})")
|
| 233 |
|
| 234 |
-
# ControlNet Modul aufrufen - MIT ZWEI RÜCKGABEWERTEN
|
| 235 |
controlnet_output, inpaint_input = controlnet_processor.generate_with_controlnet(
|
| 236 |
image=image,
|
| 237 |
prompt=prompt,
|
|
@@ -239,74 +235,57 @@ def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale, fac
|
|
| 239 |
steps=controlnet_steps,
|
| 240 |
guidance_scale=guidance_scale,
|
| 241 |
controlnet_strength=controlnet_strength,
|
| 242 |
-
progress=progress
|
| 243 |
-
keep_environment=face_preserve
|
| 244 |
)
|
| 245 |
-
|
| 246 |
print(f"✅ ControlNet Output erhalten: {type(controlnet_output)}")
|
| 247 |
-
print(f"✅ Inpaint Input erhalten: {type(inpaint_input)}")
|
| 248 |
-
|
| 249 |
-
#Progress-Balken zur Überbrückung für 0ten Step von Inpaint
|
| 250 |
-
progress(0.3, desc="ControlNet fertig, starte Inpaint...")
|
| 251 |
|
| 252 |
-
#
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
|
|
|
|
|
|
| 256 |
img_resized = inpaint_input.convert("RGB").resize((IMG_SIZE, IMG_SIZE))
|
| 257 |
|
| 258 |
-
# --- PARAMETER-TUNING ---
|
| 259 |
adj_guidance = min(guidance_scale, 12.0)
|
| 260 |
-
|
| 261 |
-
# ZUFÄLLIGER SEED für Variation
|
| 262 |
seed = random.randint(0, 2**32 - 1)
|
| 263 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 264 |
print(f"Using seed: {seed}")
|
| 265 |
|
| 266 |
-
#
|
|
|
|
|
|
|
| 267 |
mask = None
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
orig_width, orig_height = image.size
|
| 273 |
-
scale_x = IMG_SIZE / orig_width
|
| 274 |
-
scale_y = IMG_SIZE / orig_height
|
| 275 |
-
|
| 276 |
-
scaled_coords = [
|
| 277 |
int(bbox_x1 * scale_x),
|
| 278 |
int(bbox_y1 * scale_y),
|
| 279 |
int(bbox_x2 * scale_x),
|
| 280 |
int(bbox_y2 * scale_y)
|
| 281 |
]
|
| 282 |
-
|
| 283 |
-
print(f"Skalierte Koordinaten: {scaled_coords}")
|
| 284 |
-
|
| 285 |
-
# Maskenlogik basierend auf face_preserve
|
| 286 |
-
if bbox_coords:
|
| 287 |
mask = create_face_mask(img_resized, bbox_coords, face_preserve)
|
| 288 |
if mask:
|
| 289 |
-
print("Maske erfolgreich erstellt")
|
| 290 |
else:
|
| 291 |
-
print("Keine gültigen Koordinaten
|
| 292 |
-
mask = None
|
| 293 |
-
|
| 294 |
-
# Detaillierte Debug-Informationen vor dem Pipeline-Aufruf
|
| 295 |
-
print(f"⚙️ PIPELINE-KONFIGURATION:")
|
| 296 |
-
print(f" - Angefordert: {int(steps)} Steps")
|
| 297 |
-
print(f" - Strength: {adj_strength:.3f}")
|
| 298 |
-
print(f" - Scheduler: {pipe.scheduler.__class__.__name__}")
|
| 299 |
|
| 300 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 301 |
|
| 302 |
-
# NEUE Callback-Implementierung
|
| 303 |
callback = ImageToImageProgressCallback(progress, int(steps), adj_strength)
|
| 304 |
|
| 305 |
-
# --- PIPELINE-AUFRUF MIT KORREKTEM INPUT ---
|
| 306 |
result = pipe(
|
| 307 |
prompt=prompt,
|
| 308 |
negative_prompt=neg_prompt,
|
| 309 |
-
image=img_resized,
|
| 310 |
mask_image=mask,
|
| 311 |
strength=adj_strength,
|
| 312 |
num_inference_steps=int(steps),
|
|
@@ -316,28 +295,14 @@ def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale, fac
|
|
| 316 |
callback_on_step_end_tensor_inputs=[],
|
| 317 |
)
|
| 318 |
|
| 319 |
-
# ZUSÄTZLICHE AUSGABE: Tatsächliche Steps
|
| 320 |
-
try:
|
| 321 |
-
scheduler = pipe.scheduler
|
| 322 |
-
print(f"🔧 SCHEDULER-INFO: {scheduler.__class__.__name__}")
|
| 323 |
-
print(f"📊 TATSÄCHLICHE STEP-KONFIGURATION: {int(steps)} Schritte mit Strength {adj_strength:.3f}")
|
| 324 |
-
|
| 325 |
-
if hasattr(scheduler, 'timesteps'):
|
| 326 |
-
actual_steps = len(scheduler.timesteps)
|
| 327 |
-
print(f"🎯 BESTÄTIGTE INTERNE STEP-AUSGABE: Scheduler verwendete {actual_steps} tatsächliche Denoising-Schritte")
|
| 328 |
-
|
| 329 |
-
except Exception as e:
|
| 330 |
-
print(f"⚠️ Konnte Scheduler-Info nicht auslesen: {e}")
|
| 331 |
-
|
| 332 |
end_time = time.time()
|
| 333 |
-
print(f"
|
| 334 |
-
|
| 335 |
-
generated_image = result.images[0]
|
| 336 |
|
|
|
|
| 337 |
return generated_image
|
| 338 |
-
|
| 339 |
except Exception as e:
|
| 340 |
-
print(f"Fehler: {e}")
|
| 341 |
import traceback
|
| 342 |
traceback.print_exc()
|
| 343 |
return None
|
|
|
|
| 197 |
traceback.print_exc()
|
| 198 |
return None
|
| 199 |
|
| 200 |
+
def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale,
|
| 201 |
+
face_preserve, bbox_x1, bbox_y1, bbox_x2, bbox_y2,
|
| 202 |
+
progress=gr.Progress()):
|
| 203 |
try:
|
| 204 |
if image is None:
|
| 205 |
return None
|
| 206 |
|
| 207 |
+
import time, random
|
| 208 |
+
start_time = time.time()
|
| 209 |
+
|
| 210 |
print(f"Img2Img Start → Strength: {strength}, Steps: {steps}, Guidance: {guidance_scale}")
|
| 211 |
print(f"Prompt: {prompt}")
|
| 212 |
print(f"Negativ-Prompt: {neg_prompt}")
|
| 213 |
print(f"Gesicht beibehalten: {face_preserve}")
|
|
|
|
| 214 |
|
| 215 |
+
progress(0, desc="Starte Generierung mit ControlNet...")
|
|
|
|
| 216 |
|
| 217 |
+
# -------------------------------
|
| 218 |
+
# PARAMETER-TUNING
|
| 219 |
+
# -------------------------------
|
| 220 |
+
adj_strength = min(0.85, strength * 1.25)
|
| 221 |
+
controlnet_strength = adj_strength * (0.8 if face_preserve else 0.5)
|
| 222 |
+
controlnet_steps = min(25, int(steps * 0.8))
|
|
|
|
|
|
|
|
|
|
| 223 |
|
| 224 |
+
print(f"🎯 Steps={steps}, ControlNet-Steps={controlnet_steps}, Strength={controlnet_strength:.3f}")
|
| 225 |
+
|
| 226 |
+
# -------------------------------
|
| 227 |
+
# CONTROLNET GENERIERUNG
|
| 228 |
+
# -------------------------------
|
| 229 |
+
progress(0.05, desc="Erstelle ControlNet Maps...")
|
|
|
|
|
|
|
|
|
|
| 230 |
|
|
|
|
| 231 |
controlnet_output, inpaint_input = controlnet_processor.generate_with_controlnet(
|
| 232 |
image=image,
|
| 233 |
prompt=prompt,
|
|
|
|
| 235 |
steps=controlnet_steps,
|
| 236 |
guidance_scale=guidance_scale,
|
| 237 |
controlnet_strength=controlnet_strength,
|
| 238 |
+
progress=progress
|
|
|
|
| 239 |
)
|
| 240 |
+
|
| 241 |
print(f"✅ ControlNet Output erhalten: {type(controlnet_output)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
|
| 243 |
+
# -------------------------------
|
| 244 |
+
# INPAINT (STABLE DIFFUSION IMG2IMG)
|
| 245 |
+
# -------------------------------
|
| 246 |
+
progress(0.3, desc="ControlNet abgeschlossen – starte Inpaint...")
|
| 247 |
+
|
| 248 |
+
pipe = load_img2img() # ← deine bestehende Funktion
|
| 249 |
img_resized = inpaint_input.convert("RGB").resize((IMG_SIZE, IMG_SIZE))
|
| 250 |
|
|
|
|
| 251 |
adj_guidance = min(guidance_scale, 12.0)
|
|
|
|
|
|
|
| 252 |
seed = random.randint(0, 2**32 - 1)
|
| 253 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 254 |
print(f"Using seed: {seed}")
|
| 255 |
|
| 256 |
+
# -------------------------------
|
| 257 |
+
# GESICHTS-MASKE (falls Koordinaten)
|
| 258 |
+
# -------------------------------
|
| 259 |
mask = None
|
| 260 |
+
if bbox_x1 and bbox_y1 and bbox_x2 and bbox_y2:
|
| 261 |
+
orig_w, orig_h = image.size
|
| 262 |
+
scale_x, scale_y = IMG_SIZE / orig_w, IMG_SIZE / orig_h
|
| 263 |
+
bbox_coords = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 264 |
int(bbox_x1 * scale_x),
|
| 265 |
int(bbox_y1 * scale_y),
|
| 266 |
int(bbox_x2 * scale_x),
|
| 267 |
int(bbox_y2 * scale_y)
|
| 268 |
]
|
| 269 |
+
print(f"Skalierte Koordinaten: {bbox_coords}")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 270 |
mask = create_face_mask(img_resized, bbox_coords, face_preserve)
|
| 271 |
if mask:
|
| 272 |
+
print("✅ Maske erfolgreich erstellt")
|
| 273 |
else:
|
| 274 |
+
print("⚠️ Keine gültigen Koordinaten – keine Maske")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
|
| 276 |
+
# -------------------------------
|
| 277 |
+
# PIPELINE-AUFRUF
|
| 278 |
+
# -------------------------------
|
| 279 |
+
from diffusers import EulerAncestralDiscreteScheduler
|
| 280 |
+
if not isinstance(pipe.scheduler, EulerAncestralDiscreteScheduler):
|
| 281 |
+
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
| 282 |
|
|
|
|
| 283 |
callback = ImageToImageProgressCallback(progress, int(steps), adj_strength)
|
| 284 |
|
|
|
|
| 285 |
result = pipe(
|
| 286 |
prompt=prompt,
|
| 287 |
negative_prompt=neg_prompt,
|
| 288 |
+
image=img_resized,
|
| 289 |
mask_image=mask,
|
| 290 |
strength=adj_strength,
|
| 291 |
num_inference_steps=int(steps),
|
|
|
|
| 295 |
callback_on_step_end_tensor_inputs=[],
|
| 296 |
)
|
| 297 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 298 |
end_time = time.time()
|
| 299 |
+
print(f"🕒 Dauer: {end_time - start_time:.2f} Sekunden")
|
|
|
|
|
|
|
| 300 |
|
| 301 |
+
generated_image = result.images[0]
|
| 302 |
return generated_image
|
| 303 |
+
|
| 304 |
except Exception as e:
|
| 305 |
+
print(f"❌ Fehler in img_to_image: {e}")
|
| 306 |
import traceback
|
| 307 |
traceback.print_exc()
|
| 308 |
return None
|