File size: 2,754 Bytes
f0735ee
 
 
 
 
f450b0f
f0735ee
6769c24
 
 
 
 
 
 
 
 
f0735ee
 
f450b0f
f0735ee
 
 
 
 
 
 
 
 
 
f450b0f
f0735ee
 
 
 
 
f450b0f
f0735ee
 
 
 
 
 
6769c24
 
 
 
 
 
 
 
 
 
 
 
f0735ee
 
f450b0f
6769c24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import torch
from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel
from controlnet_aux import OpenposeDetector, ZoeDetector
from PIL import Image

print("Lade OpenPose_faceonly + Depth für perfekte Gesichter...")

# Preprocessors (einmalig laden) - KORRIGIERTE VERSION
try:
    # Moderne Version
    openpose_face = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
except:
    # Alternative/ältere Version
    from controlnet_aux.open_pose import OpenposeDetector
    openpose_face = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")

depth_processor = ZoeDetector.from_pretrained("lllyasviel/ControlNet")

# ControlNet Modelle (bleiben im VRAM)
controlnet_face = ControlNetModel.from_pretrained(
    "lllyasviel/control_v11p_sd15_openpose",
    torch_dtype=torch.float16
).to("cuda")

controlnet_depth = ControlNetModel.from_pretrained(
    "lllyasviel/control_v11f1e_sd15_depth",
    torch_dtype=torch.float16
).to("cuda")

# Pipeline-Cache
_facefix_pipe = None

def _get_facefix_pipeline(model_id: str):
    global _facefix_pipe
    if _facefix_pipe is None:
        print(f"Lade Face-Fix-Pipeline für Modell: {model_id}")
        _facefix_pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
            model_id,
            controlnet=[controlnet_face, controlnet_depth],
            torch_dtype=torch.float16,
            safety_checker=None,
        ).to("cuda")
        
        # Nur wenn verfügbar
        try:
            _facefix_pipe.enable_xformers_memory_efficient_attention()
        except:
            print("XFormers nicht verfügbar, überspringe...")
            
        try:
            _facefix_pipe.enable_model_cpu_offload()  # spart ~2 GB!
        except:
            print("CPU Offload nicht verfügbar, überspringe...")
            
    return _facefix_pipe

def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: int, model_id: str):
    try:
        pipe = _get_facefix_pipeline(model_id)

        # Control-Images erzeugen
        pose_img = openpose_face(image)
        depth_img = depth_processor(image)

        fixed = pipe(
            prompt=prompt,
            negative_prompt=negative_prompt,
            image=image,
            mask_image=None,  # Keine Maske für Face-Fix
            control_image=[pose_img, depth_img],
            controlnet_conditioning_scale=[0.85, 0.60],
            strength=0.42,
            num_inference_steps=20,
            guidance_scale=7.0,
            generator=torch.Generator("cuda").manual_seed(seed),
        ).images[0]

        return fixed
        
    except Exception as e:
        print(f"Face-Fix Fehler: {e}")
        # Bei Fehler das Originalbild zurückgeben
        return image