Astridkraft commited on
Commit
650a9c1
·
verified ·
1 Parent(s): 17755da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +329 -157
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
  from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
3
- from diffusers import StableDiffusionInpaintPipeline
4
  from controlnet_module import controlnet_processor
5
  import torch
6
  from PIL import Image, ImageDraw
@@ -12,10 +12,103 @@ import random
12
  # === OPTIMIERTE EINSTELLUNGEN ===
13
  device = "cuda" if torch.cuda.is_available() else "cpu"
14
  torch_dtype = torch.float16 if device == "cuda" else torch.float32
15
- IMG_SIZE = 512 # Jetzt 512x512 für Realistic Vision
16
 
17
  print(f"Running on: {device}")
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  # === GESICHTSMASKEN-FUNKTIONEN ===
20
  def create_face_mask(image, bbox_coords, face_preserve):
21
  """Erzeugt eine Gesichtsmaske - WEIßE Bereiche werden VERÄNDERT, SCHWARZE BLEIBEN"""
@@ -55,50 +148,82 @@ def auto_detect_face_area(image):
55
 
56
  # === PIPELINES ===
57
  pipe_txt2img = None
 
58
  pipe_img2img = None
59
 
60
- def load_txt2img():
61
- global pipe_txt2img
62
- if pipe_txt2img is None:
63
- try:
64
- print("Loading Realistic Vision V6.0 for high-quality 512x512...")
65
- pipe_txt2img = StableDiffusionPipeline.from_pretrained(
66
- "runwayml/stable-diffusion-v1-5",
67
- torch_dtype=torch_dtype,
68
- safety_checker=None,
69
- requires_safety_checker=False,
70
- add_watermarker=False,
71
- use_safetensors=True, # Sicherheitsproblem behoben
72
- variant="fp16" if torch_dtype == torch.float16 else None,
73
- ).to(device)
74
-
75
- from diffusers import DPMSolverMultistepScheduler
76
- pipe_txt2img.scheduler = DPMSolverMultistepScheduler.from_config(
77
- pipe_txt2img.scheduler.config,
78
- use_karras_sigmas=True,
79
- algorithm_type="sde-dpmsolver++"
80
- )
81
-
82
- # T4 OPTIMIERUNGEN
83
- pipe_txt2img.enable_attention_slicing()
84
- pipe_txt2img.enable_vae_slicing()
85
- if hasattr(pipe_txt2img, 'vae'):
86
- pipe_txt2img.vae.enable_slicing()
87
-
88
- print("✅ Realistic Vision V6.0 erfolgreich geladen")
89
-
90
- except Exception as e:
91
- print(f"❌ Fehler beim Laden von Realistic Vision: {e}")
92
- print("🔄 Fallback auf SD 1.5...")
93
- # Fallback auf Standard SD 1.5
94
- pipe_txt2img = StableDiffusionPipeline.from_pretrained(
95
- "runwayml/stable-diffusion-v1-5",
96
- torch_dtype=torch_dtype,
97
- use_safetensors=True,
98
- ).to(device)
99
- pipe_txt2img.enable_attention_slicing()
100
 
101
- return pipe_txt2img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
  def load_img2img():
104
  global pipe_img2img
@@ -108,7 +233,6 @@ def load_img2img():
108
  pipe_img2img = StableDiffusionInpaintPipeline.from_pretrained(
109
  "runwayml/stable-diffusion-inpainting",
110
  torch_dtype=torch_dtype,
111
- #use_safetensors=True, # Sicherheitsproblem behoben
112
  allow_pickle=False,
113
  safety_checker=None,
114
  ).to(device)
@@ -116,7 +240,6 @@ def load_img2img():
116
  print(f"Fehler beim Laden des Inpainting-Modells: {e}")
117
  raise
118
 
119
-
120
  from diffusers import DPMSolverMultistepScheduler
121
  pipe_img2img.scheduler = DPMSolverMultistepScheduler.from_config(
122
  pipe_img2img.scheduler.config,
@@ -132,7 +255,7 @@ def load_img2img():
132
 
133
  return pipe_img2img
134
 
135
- # === NEUE CALLBACK-FUNKTIONEN FÜR FORTSCHRITT (kompatibel mit neuer API) ===
136
  class TextToImageProgressCallback:
137
  def __init__(self, progress, total_steps):
138
  self.progress = progress
@@ -140,7 +263,6 @@ class TextToImageProgressCallback:
140
  self.current_step = 0
141
 
142
  def __call__(self, pipe, step, timestep, callback_kwargs):
143
- """Neue Callback-Signatur für diffusers >= 1.0.0"""
144
  self.current_step = step + 1
145
  progress_percent = (step / self.total_steps) * 100
146
  self.progress(progress_percent / 100, desc="Generierung läuft...")
@@ -155,12 +277,9 @@ class ImageToImageProgressCallback:
155
  self.actual_total_steps = None
156
 
157
  def __call__(self, pipe, step, timestep, callback_kwargs):
158
- """Neue Callback-Signatur für diffusers >= 1.0.0"""
159
  self.current_step = step + 1
160
 
161
- # Korrekte Berechnung der tatsächlichen Steps
162
  if self.actual_total_steps is None:
163
- # Bei Strength < 1.0 werden weniger Steps verwendet
164
  if self.strength < 1.0:
165
  self.actual_total_steps = int(self.total_steps * self.strength)
166
  else:
@@ -178,41 +297,33 @@ def create_preview_image(image, bbox_coords, face_preserve, mode_color):
178
  if image is None:
179
  return None
180
 
181
- # Erstelle eine Kopie für die Vorschau
182
  preview = image.copy()
183
  draw = ImageDraw.Draw(preview)
184
 
185
- # Rahmenfarbe basierend auf Modus
186
  if mode_color == "red":
187
- border_color = (255, 0, 0, 180) # Rot mit Transparenz
188
  mode_text = "NUR BILDELEMENT VERÄNDERN"
189
  else:
190
- border_color = (0, 255, 0, 180) # Grün mit Transparenz
191
  mode_text = "BILDELEMENT BEIBEHALTEN"
192
 
193
- # Zeichne den Rahmen um das gesamte Bild
194
  border_width = 8
195
  draw.rectangle([0, 0, preview.width-1, preview.height-1],
196
  outline=border_color, width=border_width)
197
 
198
- # Zeichne Bounding Box wenn Koordinaten vorhanden
199
  if bbox_coords and all(coord is not None for coord in bbox_coords):
200
  x1, y1, x2, y2 = bbox_coords
201
 
202
- # Rahmen für Bounding Box
203
- box_color = (255, 255, 0, 200) # Gelb für Bounding Box
204
  draw.rectangle([x1, y1, x2, y2], outline=box_color, width=3)
205
 
206
- # Text-Label für den Modus
207
  text_color = (255, 255, 255)
208
  bg_color = (0, 0, 0, 160)
209
 
210
- # Hintergrund für Text
211
  text_bbox = draw.textbbox((x1, y1 - 25), mode_text)
212
  draw.rectangle([text_bbox[0]-5, text_bbox[1]-2, text_bbox[2]+5, text_bbox[3]+2],
213
  fill=bg_color)
214
 
215
- # Text zeichnen
216
  draw.text((x1, y1 - 25), mode_text, fill=text_color)
217
 
218
  return preview
@@ -223,8 +334,6 @@ def update_live_preview(image, bbox_x1, bbox_y1, bbox_x2, bbox_y2, face_preserve
223
  return None
224
 
225
  bbox_coords = [bbox_x1, bbox_y1, bbox_x2, bbox_y2]
226
-
227
- # Bestimme Rahmenfarbe basierend auf Modus
228
  mode_color = "green" if face_preserve else "red"
229
 
230
  return create_preview_image(image, bbox_coords, face_preserve, mode_color)
@@ -238,39 +347,44 @@ def process_image_upload(image):
238
  image = image.resize((512, 512), Image.LANCZOS)
239
  print(f"Bild auf 512x512 skaliert")
240
 
241
- # Auto-Koordinaten generieren
242
  bbox = auto_detect_face_area(image)
243
  bbox_x1, bbox_y1, bbox_x2, bbox_y2 = bbox
244
 
245
- # Vorschau mit grünem Rahmen (Standard: Gesicht beibehalten)
246
  preview = create_preview_image(image, bbox, True, "green")
247
 
248
  return preview, bbox_x1, bbox_y1, bbox_x2, bbox_y2
249
 
250
- # === FUNKTIONEN ===
251
- def text_to_image(prompt, steps, guidance_scale, progress=gr.Progress()):
252
  try:
253
  if not prompt or not prompt.strip():
254
- return None
255
 
256
- print(f"Starting generation for: {prompt}")
 
 
 
 
 
 
257
  start_time = time.time()
258
 
259
  progress(0, desc="Lade Modell...")
260
- pipe = load_txt2img()
261
 
262
- # ZUFÄLLIGER SEED für Variation
263
  seed = random.randint(0, 2**32 - 1)
264
  generator = torch.Generator(device=device).manual_seed(seed)
265
- print(f"Using seed: {seed}")
266
 
267
  callback = TextToImageProgressCallback(progress, steps)
268
 
269
- # NEUE: 512x512 für Realistic Vision
 
270
  image = pipe(
271
  prompt=prompt,
272
- height=512, # ��� 512 statt IMG_SIZE (1024)
273
- width=512, # ← 512 statt IMG_SIZE (1024)
 
274
  num_inference_steps=int(steps),
275
  guidance_scale=guidance_scale,
276
  generator=generator,
@@ -279,15 +393,20 @@ def text_to_image(prompt, steps, guidance_scale, progress=gr.Progress()):
279
  ).images[0]
280
 
281
  end_time = time.time()
282
- print(f"Bild generiert in {end_time - start_time:.2f} Sekunden")
 
283
 
284
- return image
 
 
 
285
 
286
  except Exception as e:
287
- print(f"Fehler in text_to_image: {e}")
 
288
  import traceback
289
  traceback.print_exc()
290
- return None
291
 
292
  def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale,
293
  face_preserve, bbox_x1, bbox_y1, bbox_x2, bbox_y2,
@@ -306,12 +425,8 @@ def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale,
306
 
307
  progress(0, desc="Starte Generierung mit ControlNet...")
308
 
309
- # -------------------------------
310
- # PARAMETER-TUNING
311
- # -------------------------------
312
  adj_strength = min(0.85, strength * 1.25)
313
 
314
- # CONTROLNET-STRENGTH ANPASSEN ABHÄNGIG VOM MODUS
315
  if face_preserve:
316
  controlnet_strength = adj_strength * 0.8
317
  print(f"🎯 ControlNet Modus: Umgebung beibehalten (Strength = {controlnet_strength:.3f})")
@@ -323,9 +438,6 @@ def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale,
323
 
324
  print(f"🎯 Steps={steps}, ControlNet-Steps={controlnet_steps}, Strength={controlnet_strength:.3f}")
325
 
326
- # -------------------------------
327
- # CONTROLNET GENERIERUNG
328
- # -------------------------------
329
  progress(0.05, desc="Erstelle ControlNet Maps...")
330
 
331
  controlnet_output, inpaint_input = controlnet_processor.generate_with_controlnet(
@@ -342,9 +454,6 @@ def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale,
342
  print(f"✅ ControlNet Output erhalten: {type(controlnet_output)}")
343
  print(f"✅ Inpaint Input erhalten: {type(inpaint_input)}")
344
 
345
- # -------------------------------
346
- # INPAINT (STABLE DIFFUSION IMG2IMG)
347
- # -------------------------------
348
  progress(0.3, desc="ControlNet abgeschlossen – starte Inpaint...")
349
 
350
  pipe = load_img2img()
@@ -356,9 +465,6 @@ def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale,
356
  generator = torch.Generator(device=device).manual_seed(seed)
357
  print(f"Using seed: {seed}")
358
 
359
- # -------------------------------
360
- # GESICHTS-MASKE (falls Koordinaten)
361
- # -------------------------------
362
  mask = None
363
  if bbox_x1 and bbox_y1 and bbox_x2 and bbox_y2:
364
  orig_w, orig_h = image.size
@@ -376,9 +482,6 @@ def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale,
376
  else:
377
  print("⚠️ Keine gültigen Koordinaten – keine Maske")
378
 
379
- # -------------------------------
380
- # PIPELINE-AUFRUF
381
- # -------------------------------
382
  from diffusers import EulerAncestralDiscreteScheduler
383
  if not isinstance(pipe.scheduler, EulerAncestralDiscreteScheduler):
384
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
@@ -418,6 +521,16 @@ def update_bbox_from_image(image):
418
  bbox = auto_detect_face_area(image)
419
  return bbox[0], bbox[1], bbox[2], bbox[3]
420
 
 
 
 
 
 
 
 
 
 
 
421
  def main_ui():
422
  with gr.Blocks(
423
  title="AI Image Generator",
@@ -444,16 +557,24 @@ def main_ui():
444
  background: #bbdefb;
445
  text-decoration: underline;
446
  }
447
- #start-button {
 
 
 
 
 
 
 
 
448
  background-color: #0080FF !important;
449
  border: none !important;
450
- margin: 50px auto !important;
451
  display: block !important;
452
  font-weight: 600;
453
  width: 280px;
454
  }
455
- #start-button:hover {
456
- background-color: #D3D3D3 !important;
457
  }
458
  .hint-box {
459
  margin-top: 20px;
@@ -487,57 +608,124 @@ def main_ui():
487
  font-weight: 600 !important;
488
  line-height: 1.4 !important;
489
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490
  """
491
  ) as demo:
492
 
493
  with gr.Column(visible=True) as content_area:
494
  with gr.Tab("Text zu Bild"):
495
- gr.Markdown("**Beschreibe dein gewünschtes Bild:**")
496
 
497
  with gr.Row():
498
- txt_input = gr.Textbox(
499
- placeholder="z.B. ultra realistic mountain landscape at sunrise, soft mist over the valley, detailed foliage, crisp textures, depth of field, sunlight rays through clouds, shot on medium format camera, 8k, HDR, hyper-detailed, natural lighting, masterpiece, Eingabe unten:(Schritt Inferenz:35, Prompt-Stärke:9)",
500
- lines=2,
501
- label="Prompt (Englisch)",
502
- info="Beschreibe detailliert, was du sehen möchtest. Verwende Kommas zur Trennung."
503
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
504
 
505
  with gr.Row():
506
  with gr.Column():
507
  txt_steps = gr.Slider(
508
  minimum=10, maximum=100, value=35, step=1,
509
- label="Inferenz-Schritte",
510
  info="Mehr Schritte = bessere Qualität, aber langsamer (20-50 empfohlen)"
511
  )
512
  with gr.Column():
513
  txt_guidance = gr.Slider(
514
  minimum=1.0, maximum=20.0, value=7.5, step=0.5,
515
- label="Prompt-Stärke",
516
  info="Wie stark der Prompt befolgt wird (7-12 für gute Balance)"
517
  )
518
 
519
- generate_btn = gr.Button("Bild generieren", variant="primary")
520
- txt_output = gr.Image(
521
- label="Generiertes Bild",
522
- show_download_button=True,
523
- type="pil"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
524
  )
525
 
526
  generate_btn.click(
527
  fn=text_to_image,
528
- inputs=[txt_input, txt_steps, txt_guidance],
529
- outputs=txt_output,
530
  concurrency_limit=1
531
  )
532
 
533
  with gr.Tab("Bild zu Bild"):
534
- gr.Markdown("**Lade ein Bild hoch und beschreibe die gewünschte Veränderung:**")
535
 
536
  with gr.Row():
537
  with gr.Column():
538
  img_input = gr.Image(
539
  type="pil",
540
- label="Eingabebild",
541
  height=300,
542
  sources=["upload"],
543
  elem_id="image-upload"
@@ -552,37 +740,37 @@ def main_ui():
552
 
553
  with gr.Row():
554
  face_preserve = gr.Checkbox(
555
- label="Schutz",
556
  value=True,
557
- info="🟢 Checkbox AN: Alles AUSSERHALB des gelben Rahmens verändern | 🔴 Checkbox AUS: Nur INNERHALB des gelben Rahmens verändern"
558
  )
559
 
560
  with gr.Row():
561
- gr.Markdown("**Bildelementbereich anpassen**")
562
 
563
  with gr.Row():
564
  with gr.Column():
565
  bbox_x1 = gr.Slider(
566
- label="Links (x1)",
567
  minimum=0, maximum=512, value=100, step=1,
568
  info="Linke Kante des Bildelementbereichs"
569
  )
570
  with gr.Column():
571
  bbox_y1 = gr.Slider(
572
- label="Oben (y1)",
573
  minimum=0, maximum=512, value=100, step=1,
574
  info="Obere Kante des Bildelementbereichs"
575
  )
576
  with gr.Row():
577
  with gr.Column():
578
  bbox_x2 = gr.Slider(
579
- label="Rechts (x2)",
580
  minimum=0, maximum=512, value=300, step=1,
581
  info="Rechte Kante des Bildelementbereichs"
582
  )
583
  with gr.Column():
584
  bbox_y2 = gr.Slider(
585
- label="Unten (y2)",
586
  minimum=0, maximum=512, value=300, step=1,
587
  info="Untere Kante des Bildelementbereichs"
588
  )
@@ -592,14 +780,14 @@ def main_ui():
592
  img_prompt = gr.Textbox(
593
  placeholder="change background to beach with palm trees, keep person unchanged, sunny day",
594
  lines=2,
595
- label="Transformations-Prompt (Englisch)",
596
  info="Was soll verändert werden? Sei spezifisch."
597
  )
598
  with gr.Column():
599
  img_neg_prompt = gr.Textbox(
600
  placeholder="blurry, deformed, ugly, bad anatomy, extra limbs, poorly drawn hands",
601
  lines=2,
602
- label="Negativ-Prompt (Englisch)",
603
  info="Was soll vermieden werden? Unerwünschte Elemente auflisten."
604
  )
605
 
@@ -607,38 +795,39 @@ def main_ui():
607
  with gr.Column():
608
  strength_slider = gr.Slider(
609
  minimum=0.1, maximum=0.9, value=0.4, step=0.05,
610
- label="Veränderungs-Stärke",
611
  info="0.1-0.3: Leichte Anpassungen, 0.4-0.6: Mittlere Veränderungen, 0.7-0.9: Starke Umgestaltung"
612
  )
613
  with gr.Column():
614
  img_steps = gr.Slider(
615
  minimum=10, maximum=100, value=35, step=1,
616
- label="Inferenz-Schritte",
617
  info="Anzahl der Verarbeitungsschritte (25-45 für gute Ergebnisse)"
618
  )
619
  with gr.Column():
620
  img_guidance = gr.Slider(
621
  minimum=1.0, maximum=20.0, value=7.5, step=0.5,
622
- label="Prompt-Stärke",
623
  info="Einfluss des Prompts auf das Ergebnis (6-10 für natürliche Ergebnisse)"
624
  )
625
 
626
  with gr.Row():
627
  gr.Markdown(
628
- "**Achtung:**\n"
629
  "• **🆕 Automatische Bildelementerkennung** setzt Koordinaten beim Upload\n"
630
  "• **🆕 Live-Vorschau** zeigt farbige Rahmen je nach Modus (🔴 Rot / 🟢 Grün)\n"
631
  "• **🆕 Koordinaten-Schieberegler** für präzise Anpassung mit Live-Update\n"
632
  "• **Koordinaten nur bei erkennbaren Verzerrungen anpassen** (Bereiche leicht verschieben)"
633
  )
634
 
635
- transform_btn = gr.Button("Bild transformieren", variant="primary")
636
 
637
  with gr.Row():
638
  img_output = gr.Image(
639
- label="Transformiertes Bild",
640
  show_download_button=True,
641
- type="pil"
 
642
  )
643
 
644
  img_input.change(
@@ -649,29 +838,12 @@ def main_ui():
649
 
650
  coordinate_inputs = [img_input, bbox_x1, bbox_y1, bbox_x2, bbox_y2, face_preserve]
651
 
652
- bbox_x1.change(
653
- fn=update_live_preview,
654
- inputs=coordinate_inputs,
655
- outputs=preview_output
656
- )
657
-
658
- bbox_y1.change(
659
- fn=update_live_preview,
660
- inputs=coordinate_inputs,
661
- outputs=preview_output
662
- )
663
-
664
- bbox_x2.change(
665
- fn=update_live_preview,
666
- inputs=coordinate_inputs,
667
- outputs=preview_output
668
- )
669
-
670
- bbox_y2.change(
671
- fn=update_live_preview,
672
- inputs=coordinate_inputs,
673
- outputs=preview_output
674
- )
675
 
676
  face_preserve.change(
677
  fn=update_live_preview,
@@ -694,7 +866,7 @@ def main_ui():
694
 
695
  if __name__ == "__main__":
696
  demo = main_ui()
697
- demo.queue(max_size=3) # Beide Parameter
698
  demo.launch(
699
  server_name="0.0.0.0",
700
  server_port=7860,
 
1
  import gradio as gr
2
  from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
3
+ from diffusers import StableDiffusionInpaintPipeline, AutoencoderKL
4
  from controlnet_module import controlnet_processor
5
  import torch
6
  from PIL import Image, ImageDraw
 
12
  # === OPTIMIERTE EINSTELLUNGEN ===
13
  device = "cuda" if torch.cuda.is_available() else "cpu"
14
  torch_dtype = torch.float16 if device == "cuda" else torch.float32
15
+ IMG_SIZE = 512
16
 
17
  print(f"Running on: {device}")
18
 
19
+ # === MODELLKONFIGURATION ===
20
+ MODEL_CONFIGS = {
21
+ "runwayml/stable-diffusion-v1-5": {
22
+ "name": "🏠 Stable Diffusion 1.5 (Universal)",
23
+ "description": "Universal model, good all-rounder, reliable results",
24
+ "requires_vae": False,
25
+ "recommended_steps": 35,
26
+ "recommended_cfg": 7.5
27
+ },
28
+ "SG161222/Realistic_Vision_V6.0_B1_noVAE": {
29
+ "name": "👤 Realistic Vision V6.0 (Portraits)",
30
+ "description": "Best for photorealistic faces, skin details, human portraits",
31
+ "requires_vae": True,
32
+ "vae_model": "stabilityai/sd-vae-ft-mse",
33
+ "recommended_steps": 40,
34
+ "recommended_cfg": 7.0
35
+ },
36
+ "RunDiffusion/Juggernaut-X-v10": {
37
+ "name": "🏢 Juggernaut X (Business)",
38
+ "description": "Ideal for corporate images, team photos, professional settings",
39
+ "requires_vae": False,
40
+ "recommended_steps": 35,
41
+ "recommended_cfg": 7.5
42
+ },
43
+ "Lykon/DreamShaper": {
44
+ "name": "🎨 DreamShaper (Artistic)",
45
+ "description": "Creative interpretations, artistic styles, illustrations",
46
+ "requires_vae": False,
47
+ "recommended_steps": 40,
48
+ "recommended_cfg": 8.0
49
+ },
50
+ "nitrosocke/redshift-diffusion": {
51
+ "name": "🖼️ Redshift Diffusion (Design)",
52
+ "description": "Clean CGI style, product visuals, design mockups",
53
+ "requires_vae": False,
54
+ "recommended_steps": 30,
55
+ "recommended_cfg": 8.5
56
+ }
57
+ }
58
+
59
+ # Aktuell ausgewähltes Modell (wird vom User gesetzt)
60
+ current_model_id = "runwayml/stable-diffusion-v1-5"
61
+
62
+ # === AUTOMATISCHE NEGATIVE PROMPT GENERIERUNG ===
63
+ def auto_negative_prompt(positive_prompt):
64
+ """Generiert automatisch negative Prompts basierend auf dem positiven Prompt"""
65
+ p = positive_prompt.lower()
66
+ negatives = []
67
+
68
+ # Personen / Portraits
69
+ if any(w in p for w in ["person", "man", "woman", "face", "portrait", "team", "employee", "people", "crowd"]):
70
+ negatives.append(
71
+ "bad anatomy, malformed hands, extra fingers, uneven eyes, distorted face, unrealistic skin, mutated"
72
+ )
73
+
74
+ # Business / Corporate
75
+ if any(w in p for w in ["office", "business", "team", "meeting", "corporate", "company", "workplace"]):
76
+ negatives.append(
77
+ "overexposed, oversaturated, harsh lighting, watermark, text, logo, brand"
78
+ )
79
+
80
+ # Produkt / CGI
81
+ if any(w in p for w in ["product", "packshot", "mockup", "render", "3d", "cgi", "packaging"]):
82
+ negatives.append(
83
+ "plastic texture, noisy, overly reflective surfaces, watermark, text, low poly"
84
+ )
85
+
86
+ # Landschaft / Umgebung
87
+ if any(w in p for w in ["landscape", "nature", "mountain", "forest", "outdoor", "beach", "sky"]):
88
+ negatives.append(
89
+ "blurry, oversaturated, unnatural colors, distorted horizon, floating objects"
90
+ )
91
+
92
+ # Logos / Symbole
93
+ if any(w in p for w in ["logo", "symbol", "icon", "typography", "badge", "emblem"]):
94
+ negatives.append(
95
+ "watermark, signature, username, text, writing, scribble, messy"
96
+ )
97
+
98
+ # Architektur / Gebäude
99
+ if any(w in p for w in ["building", "architecture", "house", "interior", "room", "facade"]):
100
+ negatives.append(
101
+ "deformed, distorted perspective, floating objects, collapsing structure"
102
+ )
103
+
104
+ # Basis negative Prompts für alle Fälle
105
+ base_negatives = "low quality, worst quality, blurry, jpeg artifacts, ugly, deformed"
106
+
107
+ if negatives:
108
+ return base_negatives + ", " + ", ".join(negatives)
109
+ else:
110
+ return base_negatives
111
+
112
  # === GESICHTSMASKEN-FUNKTIONEN ===
113
  def create_face_mask(image, bbox_coords, face_preserve):
114
  """Erzeugt eine Gesichtsmaske - WEIßE Bereiche werden VERÄNDERT, SCHWARZE BLEIBEN"""
 
148
 
149
  # === PIPELINES ===
150
  pipe_txt2img = None
151
+ current_pipe_model_id = None
152
  pipe_img2img = None
153
 
154
+ def load_txt2img(model_id):
155
+ """Lädt das Text-to-Image Modell basierend auf der Auswahl"""
156
+ global pipe_txt2img, current_pipe_model_id
157
+
158
+ # Wenn bereits das richtige Modell geladen ist, nichts tun
159
+ if pipe_txt2img is not None and current_pipe_model_id == model_id:
160
+ print(f"✅ Modell {model_id} bereits geladen")
161
+ return pipe_txt2img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
+ print(f"🔄 Lade Modell: {model_id}")
164
+
165
+ config = MODEL_CONFIGS[model_id]
166
+ print(f"📋 Modell-Konfiguration: {config['name']}")
167
+ print(f"📝 Beschreibung: {config['description']}")
168
+
169
+ try:
170
+ # VAE-Handling basierend auf Modellkonfiguration
171
+ vae = None
172
+ if config.get("requires_vae", False):
173
+ print(f"🔧 Lade externe VAE: {config['vae_model']}")
174
+ vae = AutoencoderKL.from_pretrained(
175
+ config["vae_model"],
176
+ torch_dtype=torch_dtype
177
+ ).to(device)
178
+ print("✅ VAE erfolgreich geladen")
179
+
180
+ # Modell laden
181
+ print(f"📥 Lade Hauptmodell von Hugging Face...")
182
+ pipe_txt2img = StableDiffusionPipeline.from_pretrained(
183
+ model_id,
184
+ torch_dtype=torch_dtype,
185
+ safety_checker=None,
186
+ requires_safety_checker=False,
187
+ add_watermarker=False,
188
+ use_safetensors=True,
189
+ variant="fp16" if torch_dtype == torch.float16 else None,
190
+ vae=vae
191
+ ).to(device)
192
+
193
+ # Scheduler konfigurieren
194
+ from diffusers import DPMSolverMultistepScheduler
195
+ pipe_txt2img.scheduler = DPMSolverMultistepScheduler.from_config(
196
+ pipe_txt2img.scheduler.config,
197
+ use_karras_sigmas=True,
198
+ algorithm_type="sde-dpmsolver++"
199
+ )
200
+
201
+ # Optimierungen
202
+ pipe_txt2img.enable_attention_slicing()
203
+ pipe_txt2img.enable_vae_slicing()
204
+ if hasattr(pipe_txt2img, 'vae'):
205
+ pipe_txt2img.vae.enable_slicing()
206
+
207
+ current_pipe_model_id = model_id
208
+ print(f"✅ {config['name']} erfolgreich geladen")
209
+ print(f"⚙️ Empfohlene Einstellungen: Steps={config['recommended_steps']}, CFG={config['recommended_cfg']}")
210
+
211
+ return pipe_txt2img
212
+
213
+ except Exception as e:
214
+ print(f"❌ Fehler beim Laden von {model_id}: {e}")
215
+ print("🔄 Fallback auf SD 1.5...")
216
+
217
+ # Fallback auf Standard SD 1.5
218
+ pipe_txt2img = StableDiffusionPipeline.from_pretrained(
219
+ "runwayml/stable-diffusion-v1-5",
220
+ torch_dtype=torch_dtype,
221
+ use_safetensors=True,
222
+ ).to(device)
223
+ pipe_txt2img.enable_attention_slicing()
224
+ current_pipe_model_id = "runwayml/stable-diffusion-v1-5"
225
+
226
+ return pipe_txt2img
227
 
228
  def load_img2img():
229
  global pipe_img2img
 
233
  pipe_img2img = StableDiffusionInpaintPipeline.from_pretrained(
234
  "runwayml/stable-diffusion-inpainting",
235
  torch_dtype=torch_dtype,
 
236
  allow_pickle=False,
237
  safety_checker=None,
238
  ).to(device)
 
240
  print(f"Fehler beim Laden des Inpainting-Modells: {e}")
241
  raise
242
 
 
243
  from diffusers import DPMSolverMultistepScheduler
244
  pipe_img2img.scheduler = DPMSolverMultistepScheduler.from_config(
245
  pipe_img2img.scheduler.config,
 
255
 
256
  return pipe_img2img
257
 
258
+ # === NEUE CALLBACK-FUNKTIONEN FÜR FORTSCHRITT ===
259
  class TextToImageProgressCallback:
260
  def __init__(self, progress, total_steps):
261
  self.progress = progress
 
263
  self.current_step = 0
264
 
265
  def __call__(self, pipe, step, timestep, callback_kwargs):
 
266
  self.current_step = step + 1
267
  progress_percent = (step / self.total_steps) * 100
268
  self.progress(progress_percent / 100, desc="Generierung läuft...")
 
277
  self.actual_total_steps = None
278
 
279
  def __call__(self, pipe, step, timestep, callback_kwargs):
 
280
  self.current_step = step + 1
281
 
 
282
  if self.actual_total_steps is None:
 
283
  if self.strength < 1.0:
284
  self.actual_total_steps = int(self.total_steps * self.strength)
285
  else:
 
297
  if image is None:
298
  return None
299
 
 
300
  preview = image.copy()
301
  draw = ImageDraw.Draw(preview)
302
 
 
303
  if mode_color == "red":
304
+ border_color = (255, 0, 0, 180)
305
  mode_text = "NUR BILDELEMENT VERÄNDERN"
306
  else:
307
+ border_color = (0, 255, 0, 180)
308
  mode_text = "BILDELEMENT BEIBEHALTEN"
309
 
 
310
  border_width = 8
311
  draw.rectangle([0, 0, preview.width-1, preview.height-1],
312
  outline=border_color, width=border_width)
313
 
 
314
  if bbox_coords and all(coord is not None for coord in bbox_coords):
315
  x1, y1, x2, y2 = bbox_coords
316
 
317
+ box_color = (255, 255, 0, 200)
 
318
  draw.rectangle([x1, y1, x2, y2], outline=box_color, width=3)
319
 
 
320
  text_color = (255, 255, 255)
321
  bg_color = (0, 0, 0, 160)
322
 
 
323
  text_bbox = draw.textbbox((x1, y1 - 25), mode_text)
324
  draw.rectangle([text_bbox[0]-5, text_bbox[1]-2, text_bbox[2]+5, text_bbox[3]+2],
325
  fill=bg_color)
326
 
 
327
  draw.text((x1, y1 - 25), mode_text, fill=text_color)
328
 
329
  return preview
 
334
  return None
335
 
336
  bbox_coords = [bbox_x1, bbox_y1, bbox_x2, bbox_y2]
 
 
337
  mode_color = "green" if face_preserve else "red"
338
 
339
  return create_preview_image(image, bbox_coords, face_preserve, mode_color)
 
347
  image = image.resize((512, 512), Image.LANCZOS)
348
  print(f"Bild auf 512x512 skaliert")
349
 
 
350
  bbox = auto_detect_face_area(image)
351
  bbox_x1, bbox_y1, bbox_x2, bbox_y2 = bbox
352
 
 
353
  preview = create_preview_image(image, bbox, True, "green")
354
 
355
  return preview, bbox_x1, bbox_y1, bbox_x2, bbox_y2
356
 
357
+ # === HAUPTFUNKTIONEN ===
358
+ def text_to_image(prompt, model_id, steps, guidance_scale, progress=gr.Progress()):
359
  try:
360
  if not prompt or not prompt.strip():
361
+ return None, "Bitte einen Prompt eingeben"
362
 
363
+ print(f"🚀 Starte Generierung mit Modell: {model_id}")
364
+ print(f"📝 Prompt: {prompt}")
365
+
366
+ # Automatische negative Prompts generieren
367
+ auto_negatives = auto_negative_prompt(prompt)
368
+ print(f"🤖 Automatisch generierte Negative Prompts: {auto_negatives}")
369
+
370
  start_time = time.time()
371
 
372
  progress(0, desc="Lade Modell...")
373
+ pipe = load_txt2img(model_id)
374
 
 
375
  seed = random.randint(0, 2**32 - 1)
376
  generator = torch.Generator(device=device).manual_seed(seed)
377
+ print(f"🌱 Seed: {seed}")
378
 
379
  callback = TextToImageProgressCallback(progress, steps)
380
 
381
+ print(f"⚙️ Einstellungen: Steps={steps}, CFG={guidance_scale}")
382
+
383
  image = pipe(
384
  prompt=prompt,
385
+ negative_prompt=auto_negatives,
386
+ height=512,
387
+ width=512,
388
  num_inference_steps=int(steps),
389
  guidance_scale=guidance_scale,
390
  generator=generator,
 
393
  ).images[0]
394
 
395
  end_time = time.time()
396
+ duration = end_time - start_time
397
+ print(f"✅ Bild generiert in {duration:.2f} Sekunden")
398
 
399
+ config = MODEL_CONFIGS.get(model_id, MODEL_CONFIGS["runwayml/stable-diffusion-v1-5"])
400
+ status_msg = f"✅ Generiert mit {config['name']} in {duration:.1f}s"
401
+
402
+ return image, status_msg
403
 
404
  except Exception as e:
405
+ error_msg = f"Fehler: {str(e)}"
406
+ print(f"❌ Fehler in text_to_image: {e}")
407
  import traceback
408
  traceback.print_exc()
409
+ return None, error_msg
410
 
411
  def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale,
412
  face_preserve, bbox_x1, bbox_y1, bbox_x2, bbox_y2,
 
425
 
426
  progress(0, desc="Starte Generierung mit ControlNet...")
427
 
 
 
 
428
  adj_strength = min(0.85, strength * 1.25)
429
 
 
430
  if face_preserve:
431
  controlnet_strength = adj_strength * 0.8
432
  print(f"🎯 ControlNet Modus: Umgebung beibehalten (Strength = {controlnet_strength:.3f})")
 
438
 
439
  print(f"🎯 Steps={steps}, ControlNet-Steps={controlnet_steps}, Strength={controlnet_strength:.3f}")
440
 
 
 
 
441
  progress(0.05, desc="Erstelle ControlNet Maps...")
442
 
443
  controlnet_output, inpaint_input = controlnet_processor.generate_with_controlnet(
 
454
  print(f"✅ ControlNet Output erhalten: {type(controlnet_output)}")
455
  print(f"✅ Inpaint Input erhalten: {type(inpaint_input)}")
456
 
 
 
 
457
  progress(0.3, desc="ControlNet abgeschlossen – starte Inpaint...")
458
 
459
  pipe = load_img2img()
 
465
  generator = torch.Generator(device=device).manual_seed(seed)
466
  print(f"Using seed: {seed}")
467
 
 
 
 
468
  mask = None
469
  if bbox_x1 and bbox_y1 and bbox_x2 and bbox_y2:
470
  orig_w, orig_h = image.size
 
482
  else:
483
  print("⚠️ Keine gültigen Koordinaten – keine Maske")
484
 
 
 
 
485
  from diffusers import EulerAncestralDiscreteScheduler
486
  if not isinstance(pipe.scheduler, EulerAncestralDiscreteScheduler):
487
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
 
521
  bbox = auto_detect_face_area(image)
522
  return bbox[0], bbox[1], bbox[2], bbox[3]
523
 
524
+ def update_model_settings(model_id):
525
+ """Aktualisiert die empfohlenen Einstellungen basierend auf Modellauswahl"""
526
+ config = MODEL_CONFIGS.get(model_id, MODEL_CONFIGS["runwayml/stable-diffusion-v1-5"])
527
+
528
+ return (
529
+ config["recommended_steps"], # steps
530
+ config["recommended_cfg"], # guidance_scale
531
+ f"📊 Empfohlene Einstellungen: {config['steps']} Steps, CFG {config['cfg']}"
532
+ )
533
+
534
  def main_ui():
535
  with gr.Blocks(
536
  title="AI Image Generator",
 
557
  background: #bbdefb;
558
  text-decoration: underline;
559
  }
560
+ .model-info-box {
561
+ background: #e8f4fd;
562
+ padding: 12px;
563
+ border-radius: 6px;
564
+ margin: 10px 0;
565
+ border-left: 4px solid #2196f3;
566
+ font-size: 14px;
567
+ }
568
+ #generate-button {
569
  background-color: #0080FF !important;
570
  border: none !important;
571
+ margin: 20px auto !important;
572
  display: block !important;
573
  font-weight: 600;
574
  width: 280px;
575
  }
576
+ #generate-button:hover {
577
+ background-color: #0066CC !important;
578
  }
579
  .hint-box {
580
  margin-top: 20px;
 
608
  font-weight: 600 !important;
609
  line-height: 1.4 !important;
610
  }
611
+ .status-message {
612
+ padding: 10px;
613
+ border-radius: 5px;
614
+ margin: 10px 0;
615
+ text-align: center;
616
+ font-weight: 500;
617
+ }
618
+ .status-success {
619
+ background-color: #d4edda;
620
+ color: #155724;
621
+ border: 1px solid #c3e6cb;
622
+ }
623
+ .status-error {
624
+ background-color: #f8d7da;
625
+ color: #721c24;
626
+ border: 1px solid #f5c6cb;
627
+ }
628
  """
629
  ) as demo:
630
 
631
  with gr.Column(visible=True) as content_area:
632
  with gr.Tab("Text zu Bild"):
633
+ gr.Markdown("## 🎨 Text zu Bild Generator")
634
 
635
  with gr.Row():
636
+ with gr.Column(scale=2):
637
+ # Modellauswahl Dropdown
638
+ model_dropdown = gr.Dropdown(
639
+ choices=[
640
+ (config["name"], model_id)
641
+ for model_id, config in MODEL_CONFIGS.items()
642
+ ],
643
+ value="runwayml/stable-diffusion-v1-5",
644
+ label="📁 Modellauswahl",
645
+ info="Wähle ein Modell basierend auf deinem Anwendungsfall"
646
+ )
647
+
648
+ # Modellinformationen Box
649
+ model_info_box = gr.Markdown(
650
+ value="<div class='model-info-box'>"
651
+ "**🏠 Stable Diffusion 1.5 (Universal)**<br>"
652
+ "Universal model, good all-rounder, reliable results<br>"
653
+ "Empfohlene Einstellungen: 35 Steps, CFG 7.5"
654
+ "</div>",
655
+ label="Modellinformationen"
656
+ )
657
+
658
+ with gr.Column(scale=3):
659
+ txt_input = gr.Textbox(
660
+ placeholder="z.B. ultra realistic mountain landscape at sunrise, soft mist over the valley, detailed foliage, crisp textures, depth of field, sunlight rays through clouds, shot on medium format camera, 8k, HDR, hyper-detailed, natural lighting, masterpiece",
661
+ lines=3,
662
+ label="🎯 Prompt (Englisch)",
663
+ info="Beschreibe detailliert, was du sehen möchtest. Negative Prompts werden automatisch generiert."
664
+ )
665
 
666
  with gr.Row():
667
  with gr.Column():
668
  txt_steps = gr.Slider(
669
  minimum=10, maximum=100, value=35, step=1,
670
+ label="⚙️ Inferenz-Schritte",
671
  info="Mehr Schritte = bessere Qualität, aber langsamer (20-50 empfohlen)"
672
  )
673
  with gr.Column():
674
  txt_guidance = gr.Slider(
675
  minimum=1.0, maximum=20.0, value=7.5, step=0.5,
676
+ label="🎛️ Prompt-Stärke (CFG Scale)",
677
  info="Wie stark der Prompt befolgt wird (7-12 für gute Balance)"
678
  )
679
 
680
+ # Status-Nachricht
681
+ status_output = gr.Markdown(
682
+ value="",
683
+ elem_classes="status-message"
684
+ )
685
+
686
+ generate_btn = gr.Button("🚀 Bild generieren", variant="primary", elem_id="generate-button")
687
+
688
+ with gr.Row():
689
+ txt_output = gr.Image(
690
+ label="🖼️ Generiertes Bild",
691
+ show_download_button=True,
692
+ type="pil",
693
+ height=400
694
+ )
695
+
696
+ # Event-Handler für Modelländerung
697
+ def update_model_info(model_id):
698
+ config = MODEL_CONFIGS.get(model_id, MODEL_CONFIGS["runwayml/stable-diffusion-v1-5"])
699
+ info_html = f"""
700
+ <div class='model-info-box'>
701
+ <strong>{config['name']}</strong><br>
702
+ {config['description']}<br>
703
+ <em>Empfohlene Einstellungen: {config['recommended_steps']} Steps, CFG {config['recommended_cfg']}</em>
704
+ </div>
705
+ """
706
+ return info_html, config["recommended_steps"], config["recommended_cfg"]
707
+
708
+ model_dropdown.change(
709
+ fn=update_model_info,
710
+ inputs=[model_dropdown],
711
+ outputs=[model_info_box, txt_steps, txt_guidance]
712
  )
713
 
714
  generate_btn.click(
715
  fn=text_to_image,
716
+ inputs=[txt_input, model_dropdown, txt_steps, txt_guidance],
717
+ outputs=[txt_output, status_output],
718
  concurrency_limit=1
719
  )
720
 
721
  with gr.Tab("Bild zu Bild"):
722
+ gr.Markdown("## 🖼️ Bild zu Bild Transformation")
723
 
724
  with gr.Row():
725
  with gr.Column():
726
  img_input = gr.Image(
727
  type="pil",
728
+ label="📤 Eingabebild",
729
  height=300,
730
  sources=["upload"],
731
  elem_id="image-upload"
 
740
 
741
  with gr.Row():
742
  face_preserve = gr.Checkbox(
743
+ label="🛡️ Schutzmodus",
744
  value=True,
745
+ info="🟢 AN: Alles AUSSERHALB des gelben Rahmens verändern | 🔴 AUS: Nur INNERHALB des gelben Rahmens verändern"
746
  )
747
 
748
  with gr.Row():
749
+ gr.Markdown("### 📐 Bildelementbereich anpassen")
750
 
751
  with gr.Row():
752
  with gr.Column():
753
  bbox_x1 = gr.Slider(
754
+ label="Links (x1)",
755
  minimum=0, maximum=512, value=100, step=1,
756
  info="Linke Kante des Bildelementbereichs"
757
  )
758
  with gr.Column():
759
  bbox_y1 = gr.Slider(
760
+ label="Oben (y1)",
761
  minimum=0, maximum=512, value=100, step=1,
762
  info="Obere Kante des Bildelementbereichs"
763
  )
764
  with gr.Row():
765
  with gr.Column():
766
  bbox_x2 = gr.Slider(
767
+ label="Rechts (x2)",
768
  minimum=0, maximum=512, value=300, step=1,
769
  info="Rechte Kante des Bildelementbereichs"
770
  )
771
  with gr.Column():
772
  bbox_y2 = gr.Slider(
773
+ label="Unten (y2)",
774
  minimum=0, maximum=512, value=300, step=1,
775
  info="Untere Kante des Bildelementbereichs"
776
  )
 
780
  img_prompt = gr.Textbox(
781
  placeholder="change background to beach with palm trees, keep person unchanged, sunny day",
782
  lines=2,
783
+ label="🎯 Transformations-Prompt (Englisch)",
784
  info="Was soll verändert werden? Sei spezifisch."
785
  )
786
  with gr.Column():
787
  img_neg_prompt = gr.Textbox(
788
  placeholder="blurry, deformed, ugly, bad anatomy, extra limbs, poorly drawn hands",
789
  lines=2,
790
+ label="🚫 Negativ-Prompt (Englisch)",
791
  info="Was soll vermieden werden? Unerwünschte Elemente auflisten."
792
  )
793
 
 
795
  with gr.Column():
796
  strength_slider = gr.Slider(
797
  minimum=0.1, maximum=0.9, value=0.4, step=0.05,
798
+ label="💪 Veränderungs-Stärke",
799
  info="0.1-0.3: Leichte Anpassungen, 0.4-0.6: Mittlere Veränderungen, 0.7-0.9: Starke Umgestaltung"
800
  )
801
  with gr.Column():
802
  img_steps = gr.Slider(
803
  minimum=10, maximum=100, value=35, step=1,
804
+ label="⚙️ Inferenz-Schritte",
805
  info="Anzahl der Verarbeitungsschritte (25-45 für gute Ergebnisse)"
806
  )
807
  with gr.Column():
808
  img_guidance = gr.Slider(
809
  minimum=1.0, maximum=20.0, value=7.5, step=0.5,
810
+ label="🎛️ Prompt-Stärke",
811
  info="Einfluss des Prompts auf das Ergebnis (6-10 für natürliche Ergebnisse)"
812
  )
813
 
814
  with gr.Row():
815
  gr.Markdown(
816
+ "### 📋 Hinweise:\n"
817
  "• **🆕 Automatische Bildelementerkennung** setzt Koordinaten beim Upload\n"
818
  "• **🆕 Live-Vorschau** zeigt farbige Rahmen je nach Modus (🔴 Rot / 🟢 Grün)\n"
819
  "• **🆕 Koordinaten-Schieberegler** für präzise Anpassung mit Live-Update\n"
820
  "• **Koordinaten nur bei erkennbaren Verzerrungen anpassen** (Bereiche leicht verschieben)"
821
  )
822
 
823
+ transform_btn = gr.Button("🔄 Bild transformieren", variant="primary")
824
 
825
  with gr.Row():
826
  img_output = gr.Image(
827
+ label="Transformiertes Bild",
828
  show_download_button=True,
829
+ type="pil",
830
+ height=400
831
  )
832
 
833
  img_input.change(
 
838
 
839
  coordinate_inputs = [img_input, bbox_x1, bbox_y1, bbox_x2, bbox_y2, face_preserve]
840
 
841
+ for slider in [bbox_x1, bbox_y1, bbox_x2, bbox_y2]:
842
+ slider.change(
843
+ fn=update_live_preview,
844
+ inputs=coordinate_inputs,
845
+ outputs=preview_output
846
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
847
 
848
  face_preserve.change(
849
  fn=update_live_preview,
 
866
 
867
  if __name__ == "__main__":
868
  demo = main_ui()
869
+ demo.queue(max_size=3)
870
  demo.launch(
871
  server_name="0.0.0.0",
872
  server_port=7860,