Stable-ControlNet-GPU / controlnet_facefix.py
Astridkraft's picture
Update controlnet_facefix.py
6769c24 verified
raw
history blame
2.75 kB
import torch
from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel
from controlnet_aux import OpenposeDetector, ZoeDetector
from PIL import Image
print("Lade OpenPose_faceonly + Depth für perfekte Gesichter...")
# Preprocessors (einmalig laden) - KORRIGIERTE VERSION
try:
# Moderne Version
openpose_face = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
except:
# Alternative/ältere Version
from controlnet_aux.open_pose import OpenposeDetector
openpose_face = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
depth_processor = ZoeDetector.from_pretrained("lllyasviel/ControlNet")
# ControlNet Modelle (bleiben im VRAM)
controlnet_face = ControlNetModel.from_pretrained(
"lllyasviel/control_v11p_sd15_openpose",
torch_dtype=torch.float16
).to("cuda")
controlnet_depth = ControlNetModel.from_pretrained(
"lllyasviel/control_v11f1e_sd15_depth",
torch_dtype=torch.float16
).to("cuda")
# Pipeline-Cache
_facefix_pipe = None
def _get_facefix_pipeline(model_id: str):
global _facefix_pipe
if _facefix_pipe is None:
print(f"Lade Face-Fix-Pipeline für Modell: {model_id}")
_facefix_pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
model_id,
controlnet=[controlnet_face, controlnet_depth],
torch_dtype=torch.float16,
safety_checker=None,
).to("cuda")
# Nur wenn verfügbar
try:
_facefix_pipe.enable_xformers_memory_efficient_attention()
except:
print("XFormers nicht verfügbar, überspringe...")
try:
_facefix_pipe.enable_model_cpu_offload() # spart ~2 GB!
except:
print("CPU Offload nicht verfügbar, überspringe...")
return _facefix_pipe
def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: int, model_id: str):
try:
pipe = _get_facefix_pipeline(model_id)
# Control-Images erzeugen
pose_img = openpose_face(image)
depth_img = depth_processor(image)
fixed = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
image=image,
mask_image=None, # Keine Maske für Face-Fix
control_image=[pose_img, depth_img],
controlnet_conditioning_scale=[0.85, 0.60],
strength=0.42,
num_inference_steps=20,
guidance_scale=7.0,
generator=torch.Generator("cuda").manual_seed(seed),
).images[0]
return fixed
except Exception as e:
print(f"Face-Fix Fehler: {e}")
# Bei Fehler das Originalbild zurückgeben
return image