Astridkraft's picture
Update app.py
4b8f3e4 verified
raw
history blame
23 kB
import gradio as gr
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
from diffusers import StableDiffusionInpaintPipeline
import torch
from PIL import Image, ImageDraw, ImageFont
import time
import os
import tempfile
import random
# === OPTIMIERTE EINSTELLUNGEN ===
device = "cuda" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if device == "cuda" else torch.float32
IMG_SIZE = 512
print(f"Running on: {device}")
# === TEXT INTEGRATION IMPORT ===
from text_integration import (
add_text_to_image,
create_text_integration_section_t2i,
create_text_integration_section_i2i,
capture_click,
update_text_preview_i2i,
update_text_preview_t2i
)
# === GESICHTSMASKEN-FUNKTIONEN ===
def create_face_mask(image, bbox_coords, face_preserve):
"""Erzeugt eine Gesichtsmaske - WEIßE Bereiche werden VERÄNDERT, SCHWARZE BLEIBEN"""
mask = Image.new("L", image.size, 0)
if bbox_coords and all(coord is not None for coord in bbox_coords):
x1, y1, x2, y2 = bbox_coords
draw = ImageDraw.Draw(mask)
if face_preserve:
draw.rectangle([0, 0, image.size[0], image.size[1]], fill=255)
draw.rectangle([x1, y1, x2, y2], fill=0)
print("Gesicht wird GESCHÜTZT - Umgebung wird verändert")
else:
draw.rectangle([x1, y1, x2, y2], fill=255)
print("Nur Gesicht wird verändert - Umgebung bleibt erhalten")
return mask
def auto_detect_face_area(image):
"""Optimierten Vorschlag für Gesichtsbereich ohne externe Bibliotheken"""
width, height = image.size
face_size = min(width, height) * 0.4
x1 = (width - face_size) / 2
y1 = (height - face_size) / 4
x2 = x1 + face_size
y2 = y1 + face_size * 1.2
x1, y1 = max(0, int(x1)), max(0, int(y1))
x2, y2 = min(width, int(x2)), min(height, int(y2))
print(f"Geschätzte Gesichtskoordinaten: [{x1}, {y1}, {x2}, {y2}]")
return [x1, y1, x2, y2]
# === PIPELINES ===
pipe_txt2img = None
pipe_img2img = None
def load_txt2img():
global pipe_txt2img
if pipe_txt2img is None:
print("Loading Text-to-Image model...")
pipe_txt2img = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
torch_dtype=torch_dtype,
use_safetensors=True,
safety_checker=None,
requires_safety_checker=False,
).to(device)
from diffusers import DPMSolverMultistepScheduler
pipe_txt2img.scheduler = DPMSolverMultistepScheduler.from_config(pipe_txt2img.scheduler.config)
pipe_txt2img.enable_attention_slicing()
return pipe_txt2img
def load_img2img():
global pipe_img2img
if pipe_img2img is None:
print("Loading Inpainting model...")
try:
pipe_img2img = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting",
torch_dtype=torch_dtype,
allow_pickle=False,
safety_checker=None,
).to(device)
except Exception as e:
print(f"Fehler beim Laden des Modells: {e}")
raise
from diffusers import DPMSolverMultistepScheduler
pipe_img2img.scheduler = DPMSolverMultistepScheduler.from_config(
pipe_img2img.scheduler.config,
algorithm_type="sde-dpmsolver++",
use_karras_sigmas=True,
timestep_spacing="trailing"
)
pipe_img2img.enable_attention_slicing()
pipe_img2img.enable_vae_tiling()
pipe_img2img.vae_slicing = True
return pipe_img2img
# === CALLBACK-FUNKTIONEN ===
class TextToImageProgressCallback:
def __init__(self, progress, total_steps):
self.progress = progress
self.total_steps = total_steps
self.current_step = 0
def __call__(self, pipe, step, timestep, callback_kwargs):
self.current_step = step + 1
progress_percent = (step / self.total_steps) * 100
self.progress(progress_percent / 100, desc="Generierung läuft - CPU benötigt bis zu 20 Minuten!")
return callback_kwargs
class ImageToImageProgressCallback:
def __init__(self, progress, total_steps, strength):
self.progress = progress
self.total_steps = total_steps
self.current_step = 0
self.strength = strength
self.actual_total_steps = None
def __call__(self, pipe, step, timestep, callback_kwargs):
self.current_step = step + 1
if self.actual_total_steps is None:
if self.strength < 1.0:
self.actual_total_steps = int(self.total_steps * self.strength)
else:
self.actual_total_steps = self.total_steps
print(f"🎯 INTERNE STEP-AUSGABE: Strength {self.strength}{self.actual_total_steps} tatsächliche Denoising-Schritte")
progress_percent = (step / self.actual_total_steps) * 100
self.progress(progress_percent / 100, desc="Generierung läuft - CPU benötigt bis zu 20 Minuten!")
return callback_kwargs
# === VORSCHAU-FUNKTIONEN ===
def create_preview_image(image, bbox_coords, face_preserve, mode_color):
"""Erstellt eine Vorschau mit farbigem Rahmen basierend auf dem Modus"""
if image is None:
return None
preview = image.copy()
draw = ImageDraw.Draw(preview)
if mode_color == "red":
border_color = (255, 0, 0, 180)
mode_text = "NUR BILDELEMENT VERÄNDERN"
else:
border_color = (0, 255, 0, 180)
mode_text = "BILDELEMENT BEIBEHALTEN"
border_width = 8
draw.rectangle([0, 0, preview.width-1, preview.height-1],
outline=border_color, width=border_width)
if bbox_coords and all(coord is not None for coord in bbox_coords):
x1, y1, x2, y2 = bbox_coords
box_color = (255, 255, 0, 200)
draw.rectangle([x1, y1, x2, y2], outline=box_color, width=3)
text_color = (255, 255, 255)
bg_color = (0, 0, 0, 160)
text_bbox = draw.textbbox((x1, y1 - 25), mode_text)
draw.rectangle([text_bbox[0]-5, text_bbox[1]-2, text_bbox[2]+5, text_bbox[3]+2],
fill=bg_color)
draw.text((x1, y1 - 25), mode_text, fill=text_color)
return preview
def update_live_preview(image, bbox_x1, bbox_y1, bbox_x2, bbox_y2, face_preserve):
"""Aktualisiert die Live-Vorschau bei Koordinaten-Änderungen"""
if image is None:
return None
bbox_coords = [bbox_x1, bbox_y1, bbox_x2, bbox_y2]
mode_color = "green" if face_preserve else "red"
return create_preview_image(image, bbox_coords, face_preserve, mode_color)
def process_image_upload(image):
"""Verarbeitet Bild-Upload und gibt Bild + Koordinaten zurück"""
if image is None:
return None, None, None, None, None
bbox = auto_detect_face_area(image)
bbox_x1, bbox_y1, bbox_x2, bbox_y2 = bbox
preview = create_preview_image(image, bbox, True, "green")
return preview, bbox_x1, bbox_y1, bbox_x2, bbox_y2
# === HAUPTPROZESSE ===
def text_to_image(prompt, steps, guidance_scale, progress=gr.Progress()):
try:
if not prompt or not prompt.strip():
return None
print(f"Starting generation for: {prompt}")
start_time = time.time()
progress(0, desc="Generierung läuft - CPU benötigt bis zu 20 Minuten!")
pipe = load_txt2img()
seed = random.randint(0, 2**32 - 1)
generator = torch.Generator(device=device).manual_seed(seed)
print(f"Using seed: {seed}")
callback = TextToImageProgressCallback(progress, steps)
image = pipe(
prompt=prompt,
height=IMG_SIZE,
width=IMG_SIZE,
num_inference_steps=int(steps),
guidance_scale=guidance_scale,
generator=generator,
callback_on_step_end=callback,
callback_on_step_end_tensor_inputs=[],
).images[0]
end_time = time.time()
print(f"Bild generiert in {end_time - start_time:.2f} Sekunden")
return image
except Exception as e:
print(f"Fehler: {e}")
import traceback
traceback.print_exc()
return None
def img_to_image(image, prompt, neg_prompt, strength, steps, guidance_scale, face_preserve, bbox_x1, bbox_y1, bbox_x2, bbox_y2, progress=gr.Progress()):
try:
if image is None:
return None
print(f"Img2Img Start → Strength: {strength}, Steps: {steps}, Guidance: {guidance_scale}")
start_time = time.time()
progress(0, desc="Generierung läuft - CPU benötigt bis zu 20 Minuten!")
pipe = load_img2img()
img_resized = image.convert("RGB").resize((IMG_SIZE, IMG_SIZE))
adj_strength = min(0.85, strength * 1.3)
adj_guidance = min(guidance_scale, 12.0)
seed = random.randint(0, 2**32 - 1)
generator = torch.Generator(device=device).manual_seed(seed)
print(f"Using seed: {seed}")
mask = None
bbox_coords = None
if bbox_x1 is not None and bbox_y1 is not None and bbox_x2 is not None and bbox_y2 is not None:
orig_width, orig_height = image.size
scale_x = IMG_SIZE / orig_width
scale_y = IMG_SIZE / orig_height
scaled_coords = [
int(bbox_x1 * scale_x),
int(bbox_y1 * scale_y),
int(bbox_x2 * scale_x),
int(bbox_y2 * scale_y)
]
bbox_coords = scaled_coords
if bbox_coords:
mask = create_face_mask(img_resized, bbox_coords, face_preserve)
callback = ImageToImageProgressCallback(progress, int(steps), adj_strength)
result = pipe(
prompt=prompt,
negative_prompt=neg_prompt,
image=img_resized,
mask_image=mask,
strength=adj_strength,
num_inference_steps=int(steps),
guidance_scale=adj_guidance,
generator=generator,
callback_on_step_end=callback,
callback_on_step_end_tensor_inputs=[],
)
end_time = time.time()
print(f"Bild transformiert in {end_time - start_time:.2f} Sekunden")
generated_image = result.images[0]
return generated_image
except Exception as e:
print(f"Fehler: {e}")
import traceback
traceback.print_exc()
return None
# === TEXT INTEGRATION HANDLER ===
def handle_text_integration_i2i(original_image, generated_image, text, text_x, text_y, target_selector):
"""Verwaltet Text-Integration für Bild-zu-Bild basierend auf Auswahl"""
if target_selector == "Originalbild":
target_image = original_image
else: # "Generiertes Bild"
target_image = generated_image
result = add_text_to_image(target_image, text, text_x, text_y)
# Rückgabe: Original bleibt unverändert, Text-Bild kommt in Download-Bereich
return original_image, result
def handle_text_integration_t2i(generated_image, text, text_x, text_y):
"""Verwaltet Text-Integration für Text-zu-Bild"""
result = add_text_to_image(generated_image, text, text_x, text_y)
return result
def main_ui():
with gr.Blocks(
title="AI Image Generator",
theme=gr.themes.Base(),
css="""
.info-box {
background-color: #f8f4f0;
padding: 15px;
border-radius: 8px;
border-left: 4px solid #8B7355;
margin: 20px 0;
}
.text-integration-section {
background: #e8f5e8;
padding: 15px;
border-radius: 8px;
margin: 15px 0;
border-left: 4px solid #4caf50;
}
"""
) as demo:
# --- Info-Bereich ---
gr.Markdown("# AI Image Generator")
with gr.Row():
with gr.Column(scale=1):
pass
with gr.Column(scale=1, min_width=300):
start_btn = gr.Button("Weiter zur Anwendung", variant="primary", size="lg")
with gr.Column(scale=1):
pass
# --- Hauptanwendungsbereich ---
with gr.Column(visible=False) as content_area:
# === TAB: TEXT ZU BILD ===
with gr.Tab("Text zu Bild"):
gr.Markdown("**Beschreibe dein gewünschtes Bild:**")
with gr.Row():
txt_input = gr.Textbox(
placeholder="z.B. ultra realistic mountain landscape at sunrise...",
lines=2,
label="Prompt (Englisch)"
)
with gr.Row():
with gr.Column():
txt_steps = gr.Slider(
minimum=10, maximum=100, value=35, step=1,
label="Inferenz-Schritte"
)
with gr.Column():
txt_guidance = gr.Slider(
minimum=1.0, maximum=20.0, value=7.5, step=0.5,
label="Prompt-Stärke"
)
generate_btn = gr.Button("Bild generieren", variant="primary")
txt_output = gr.Image(
label="Generiertes Bild",
show_download_button=True,
type="pil"
)
# TEXT INTEGRATION FÜR TEXT-zu-BILD
text_input_t2i, text_x_t2i, text_y_t2i, text_btn_t2i = create_text_integration_section_t2i()
# VORSCHAU FÜR TEXT POSITION
preview_t2i = gr.Image(
label="Vorschau für Textposition (Klicken/Tippen um Position zu wählen)",
interactive=True,
show_download_button=False,
type="pil"
)
# CLICK HANDLER FÜR TEXT-zu-BILD
preview_t2i.select(
fn=capture_click,
outputs=[text_x_t2i, text_y_t2i]
)
# LIVE-TEXT-VORSCHAU FÜR TEXT-ZU-BILD
text_input_t2i.change(
fn=update_text_preview_t2i,
inputs=[preview_t2i, text_input_t2i, text_x_t2i, text_y_t2i],
outputs=preview_t2i
)
text_x_t2i.change(
fn=update_text_preview_t2i,
inputs=[preview_t2i, text_input_t2i, text_x_t2i, text_y_t2i],
outputs=preview_t2i
)
text_y_t2i.change(
fn=update_text_preview_t2i,
inputs=[preview_t2i, text_input_t2i, text_x_t2i, text_y_t2i],
outputs=preview_t2i
)
# EVENT-HANDLER TEXT-zu-BILD
generate_btn.click(
fn=text_to_image,
inputs=[txt_input, txt_steps, txt_guidance],
outputs=[txt_output, preview_t2i],
concurrency_limit=1
)
text_btn_t2i.click(
fn=handle_text_integration_t2i,
inputs=[txt_output, text_input_t2i, text_x_t2i, text_y_t2i],
outputs=txt_output
)
# === TAB: BILD ZU BILD ===
with gr.Tab("Bild zu Bild"):
gr.Markdown("**Lade ein Bild hoch und beschreibe die gewünschte Veränderung:**")
with gr.Row():
with gr.Column():
img_input = gr.Image(
type="pil",
label="Eingabebild",
height=300,
sources=["upload"]
)
with gr.Column():
preview_output = gr.Image(
label="Live-Vorschau mit Maske (Klicken/Tippen für Textposition)",
height=300,
interactive=True,
show_download_button=False
)
with gr.Row():
face_preserve = gr.Checkbox(
label="Schutz",
value=True,
info="🟢 AN: Umgebung verändern | 🔴 AUS: Objekt verändern"
)
with gr.Row():
with gr.Column():
bbox_x1 = gr.Slider(label="Links (x1)", minimum=0, maximum=512, value=100, step=1)
with gr.Column():
bbox_y1 = gr.Slider(label="Oben (y1)", minimum=0, maximum=512, value=100, step=1)
with gr.Row():
with gr.Column():
bbox_x2 = gr.Slider(label="Rechts (x2)", minimum=0, maximum=512, value=300, step=1)
with gr.Column():
bbox_y2 = gr.Slider(label="Unten (y2)", minimum=0, maximum=512, value=300, step=1)
with gr.Row():
with gr.Column():
img_prompt = gr.Textbox(
placeholder="change background to beach with palm trees...",
lines=2,
label="Transformations-Prompt"
)
with gr.Column():
img_neg_prompt = gr.Textbox(
placeholder="blurry, deformed, ugly...",
lines=2,
label="Negativ-Prompt"
)
with gr.Row():
with gr.Column():
strength_slider = gr.Slider(minimum=0.1, maximum=0.9, value=0.4, step=0.05, label="Veränderungs-Stärke")
with gr.Column():
img_steps = gr.Slider(minimum=10, maximum=100, value=35, step=1, label="Inferenz-Schritte")
with gr.Column():
img_guidance = gr.Slider(minimum=1.0, maximum=20.0, value=7.5, step=0.5, label="Prompt-Stärke")
transform_btn = gr.Button("Bild transformieren", variant="primary")
with gr.Row():
img_output = gr.Image(
label="Transformiertes Bild",
show_download_button=True,
type="pil"
)
# TEXT INTEGRATION FÜR BILD-zu-BILD
text_input_i2i, text_x_i2i, text_y_i2i, target_selector, text_btn_i2i = create_text_integration_section_i2i()
# CLICK HANDLER FÜR BILD-zu-BILD
preview_output.select(
fn=capture_click,
outputs=[text_x_i2i, text_y_i2i]
)
# LIVE-TEXT-VORSCHAU FÜR BILD-ZU-BILD
text_input_i2i.change(
fn=update_text_preview_i2i,
inputs=[img_input, img_output, text_input_i2i, text_x_i2i, text_y_i2i, target_selector],
outputs=preview_output
)
text_x_i2i.change(
fn=update_text_preview_i2i,
inputs=[img_input, img_output, text_input_i2i, text_x_i2i, text_y_i2i, target_selector],
outputs=preview_output
)
text_y_i2i.change(
fn=update_text_preview_i2i,
inputs=[img_input, img_output, text_input_i2i, text_x_i2i, text_y_i2i, target_selector],
outputs=preview_output
)
target_selector.change(
fn=update_text_preview_i2i,
inputs=[img_input, img_output, text_input_i2i, text_x_i2i, text_y_i2i, target_selector],
outputs=preview_output
)
# EVENT-HANDLER BILD-zu-BILD
img_input.change(
fn=process_image_upload,
inputs=[img_input],
outputs=[preview_output, bbox_x1, bbox_y1, bbox_x2, bbox_y2]
)
coordinate_inputs = [img_input, bbox_x1, bbox_y1, bbox_x2, bbox_y2, face_preserve]
for coord in [bbox_x1, bbox_y1, bbox_x2, bbox_y2]:
coord.change(
fn=update_live_preview,
inputs=coordinate_inputs,
outputs=preview_output
)
face_preserve.change(
fn=update_live_preview,
inputs=coordinate_inputs,
outputs=preview_output
)
transform_btn.click(
fn=img_to_image,
inputs=[
img_input, img_prompt, img_neg_prompt,
strength_slider, img_steps, img_guidance,
face_preserve, bbox_x1, bbox_y1, bbox_x2, bbox_y2
],
outputs=img_output,
concurrency_limit=1
)
text_btn_i2i.click(
fn=handle_text_integration_i2i,
inputs=[img_input, img_output, text_input_i2i, text_x_i2i, text_y_i2i, target_selector],
outputs=[img_input, img_output]
)
# === START-BUTTON HANDLER ===
info_components = [child for child in demo.children if child != content_area]
start_btn.click(
fn=lambda: gr.update(visible=True),
inputs=None,
outputs=content_area
).then(
fn=lambda: [gr.update(visible=False) for _ in info_components],
inputs=None,
outputs=info_components
)
return demo
if __name__ == "__main__":
demo = main_ui()
demo.queue()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
max_file_size="10MB",
show_error=True,
share=False
)