Update app.py
Browse files
app.py
CHANGED
|
@@ -674,6 +674,101 @@ def update_slider_for_image(image):
|
|
| 674 |
gr.update(maximum=max_height)
|
| 675 |
)
|
| 676 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 677 |
# === KORRIGIERTE HAUPTFUNKTION MIT RICHTIGEM COMPOSITING ===
|
| 678 |
def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale,
|
| 679 |
mode, bbox_x1, bbox_y1, bbox_x2, bbox_y2,
|
|
|
|
| 674 |
gr.update(maximum=max_height)
|
| 675 |
)
|
| 676 |
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
def text_to_image(prompt, model_id, steps, guidance_scale, progress=gr.Progress()):
|
| 680 |
+
try:
|
| 681 |
+
if not prompt or not prompt.strip():
|
| 682 |
+
return None, "Bitte einen Prompt eingeben"
|
| 683 |
+
|
| 684 |
+
print(f"🚀 Starte Generierung mit Modell: {model_id}")
|
| 685 |
+
print(f"📝 Prompt: {prompt}")
|
| 686 |
+
|
| 687 |
+
# Automatische negative Prompts generieren
|
| 688 |
+
auto_negatives = auto_negative_prompt(prompt)
|
| 689 |
+
print(f"🤖 Automatisch generierte Negative Prompts: {auto_negatives}")
|
| 690 |
+
|
| 691 |
+
start_time = time.time()
|
| 692 |
+
|
| 693 |
+
|
| 694 |
+
# Liste von Qualitätswörtern/Gewichten, die auf Benutzereingaben prüfen
|
| 695 |
+
quality_keywords = ['masterpiece', 'best quality', 'high quality', 'highly detailed',
|
| 696 |
+
'exquisite', 'ultra detailed', 'professional',
|
| 697 |
+
'perfect', 'excellent', 'amazing', 'stunning', 'beautiful']
|
| 698 |
+
|
| 699 |
+
# Prüfe, ob der Benutzer bereits Qualitätswörter/Gewichte verwendet hat
|
| 700 |
+
user_has_quality_words = False
|
| 701 |
+
|
| 702 |
+
# Konvertiere Prompt zu Kleinbuchstaben für die Prüfung
|
| 703 |
+
prompt_lower = prompt.lower()
|
| 704 |
+
|
| 705 |
+
# Prüfe auf einfache Qualitätswörter
|
| 706 |
+
for keyword in quality_keywords:
|
| 707 |
+
if keyword in prompt_lower:
|
| 708 |
+
user_has_quality_words = True
|
| 709 |
+
print(f"✓ Benutzer verwendet bereits Qualitätswort: {keyword}")
|
| 710 |
+
break
|
| 711 |
+
|
| 712 |
+
# Prüfe auf Gewichte (z.B. (word:1.5), [word], etc.)
|
| 713 |
+
weight_patterns = [r'\([^)]+:\d+(\.\d+)?\)', r'\[[^\]]+\]']
|
| 714 |
+
for pattern in weight_patterns:
|
| 715 |
+
if re.search(pattern, prompt):
|
| 716 |
+
user_has_quality_words = True
|
| 717 |
+
print("✓ Benutzer verwendet bereits Gewichte im Prompt")
|
| 718 |
+
break
|
| 719 |
+
|
| 720 |
+
# Prompt basierend auf Prüfung anpassen
|
| 721 |
+
if not user_has_quality_words:
|
| 722 |
+
enhanced_prompt = f"masterpiece, best quality, {prompt}"
|
| 723 |
+
print(f"🔄 Verbesserter Prompt: {enhanced_prompt}")
|
| 724 |
+
else:
|
| 725 |
+
enhanced_prompt = prompt
|
| 726 |
+
print("✓ Benutzerprompt wird unverändert verwendet")
|
| 727 |
+
|
| 728 |
+
print(f"Finaler Prompt für Generation: {enhanced_prompt}")
|
| 729 |
+
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
progress(0, desc="Lade Modell...")
|
| 733 |
+
pipe = load_txt2img(model_id)
|
| 734 |
+
|
| 735 |
+
seed = random.randint(0, 2**32 - 1)
|
| 736 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
| 737 |
+
print(f"🌱 Seed: {seed}")
|
| 738 |
+
|
| 739 |
+
callback = TextToImageProgressCallback(progress, steps)
|
| 740 |
+
|
| 741 |
+
print(f"⚙️ Einstellungen: Steps={steps}, CFG={guidance_scale}")
|
| 742 |
+
|
| 743 |
+
image = pipe(
|
| 744 |
+
prompt=enhanced_prompt,
|
| 745 |
+
negative_prompt=auto_negatives,
|
| 746 |
+
height=512,
|
| 747 |
+
width=512,
|
| 748 |
+
num_inference_steps=int(steps),
|
| 749 |
+
guidance_scale=guidance_scale,
|
| 750 |
+
generator=generator,
|
| 751 |
+
callback_on_step_end=callback,
|
| 752 |
+
callback_on_step_end_tensor_inputs=[],
|
| 753 |
+
).images[0]
|
| 754 |
+
|
| 755 |
+
end_time = time.time()
|
| 756 |
+
duration = end_time - start_time
|
| 757 |
+
print(f"✅ Bild generiert in {duration:.2f} Sekunden")
|
| 758 |
+
|
| 759 |
+
config = MODEL_CONFIGS.get(model_id, MODEL_CONFIGS["runwayml/stable-diffusion-v1-5"])
|
| 760 |
+
status_msg = f"✅ Generiert mit {config['name']} in {duration:.1f}s"
|
| 761 |
+
|
| 762 |
+
return image, status_msg
|
| 763 |
+
|
| 764 |
+
except Exception as e:
|
| 765 |
+
error_msg = f"❌ Fehler: {str(e)}"
|
| 766 |
+
print(f"❌ Fehler in text_to_image: {e}")
|
| 767 |
+
import traceback
|
| 768 |
+
traceback.print_exc()
|
| 769 |
+
return None, error_msg
|
| 770 |
+
|
| 771 |
+
|
| 772 |
# === KORRIGIERTE HAUPTFUNKTION MIT RICHTIGEM COMPOSITING ===
|
| 773 |
def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale,
|
| 774 |
mode, bbox_x1, bbox_y1, bbox_x2, bbox_y2,
|