Astridkraft commited on
Commit
7a300b4
·
verified ·
1 Parent(s): 0791ef6

Update controlnet_facefix.py

Browse files
Files changed (1) hide show
  1. controlnet_facefix.py +57 -73
controlnet_facefix.py CHANGED
@@ -1,83 +1,67 @@
1
  import torch
2
  from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel
3
- from controlnet_aux import OpenposeDetector, ZoeDetector
4
  from PIL import Image
 
5
 
6
- print("Lade OpenPose_faceonly + Depth für perfekte Gesichter...")
7
 
8
- # Preprocessors (einmalig laden) - KORRIGIERTE VERSION
9
  try:
10
- # Moderne Version
11
- openpose_face = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
12
- except:
13
- # Alternative/ältere Version
14
- from controlnet_aux.open_pose import OpenposeDetector
15
- openpose_face = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
16
-
17
- depth_processor = ZoeDetector.from_pretrained("lllyasviel/ControlNet")
18
-
19
- # ControlNet Modelle (bleiben im VRAM)
20
- controlnet_face = ControlNetModel.from_pretrained(
21
- "lllyasviel/control_v11p_sd15_openpose",
22
- torch_dtype=torch.float16
23
- ).to("cuda")
24
-
25
- controlnet_depth = ControlNetModel.from_pretrained(
26
- "lllyasviel/control_v11f1e_sd15_depth",
27
- torch_dtype=torch.float16
28
- ).to("cuda")
29
-
30
- # Pipeline-Cache
31
- _facefix_pipe = None
32
-
33
- def _get_facefix_pipeline(model_id: str):
34
- global _facefix_pipe
35
- if _facefix_pipe is None:
36
- print(f"Lade Face-Fix-Pipeline für Modell: {model_id}")
37
- _facefix_pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
38
- model_id,
39
- controlnet=[controlnet_face, controlnet_depth],
40
- torch_dtype=torch.float16,
41
- safety_checker=None,
42
- ).to("cuda")
43
-
44
- # Nur wenn verfügbar
45
  try:
46
- _facefix_pipe.enable_xformers_memory_efficient_attention()
47
- except:
48
- print("XFormers nicht verfügbar, überspringe...")
49
 
50
- try:
51
- _facefix_pipe.enable_model_cpu_offload() # spart ~2 GB!
52
- except:
53
- print("CPU Offload nicht verfügbar, überspringe...")
54
 
55
- return _facefix_pipe
56
-
57
- def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: int, model_id: str):
58
- try:
59
- pipe = _get_facefix_pipeline(model_id)
60
-
61
- # Control-Images erzeugen
62
- pose_img = openpose_face(image)
63
- depth_img = depth_processor(image)
64
-
65
- fixed = pipe(
66
- prompt=prompt,
67
- negative_prompt=negative_prompt,
68
- image=image,
69
- mask_image=None, # Keine Maske für Face-Fix
70
- control_image=[pose_img, depth_img],
71
- controlnet_conditioning_scale=[0.85, 0.60],
72
- strength=0.42,
73
- num_inference_steps=20,
74
- guidance_scale=7.0,
75
- generator=torch.Generator("cuda").manual_seed(seed),
76
- ).images[0]
77
-
78
- return fixed
79
-
80
- except Exception as e:
81
- print(f"Face-Fix Fehler: {e}")
82
- # Bei Fehler das Originalbild zurückgeben
83
  return image
 
1
  import torch
2
  from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel
3
+ from controlnet_aux import ZoeDetector
4
  from PIL import Image
5
+ import time
6
 
7
+ print("Lade Face-Fix System (Depth-basiert)...")
8
 
9
+ # Vereinfachte Version NUR mit Depth
10
  try:
11
+ depth_processor = ZoeDetector.from_pretrained("lllyasviel/ControlNet")
12
+ print("✅ Depth Processor geladen")
13
+
14
+ controlnet_depth = ControlNetModel.from_pretrained(
15
+ "lllyasviel/control_v11f1e_sd15_depth",
16
+ torch_dtype=torch.float16
17
+ ).to("cuda")
18
+ print("ControlNet Depth geladen")
19
+
20
+ _facefix_pipe = None
21
+
22
+ def _get_facefix_pipeline(model_id: str):
23
+ global _facefix_pipe
24
+ if _facefix_pipe is None:
25
+ print(f"Lade Face-Fix-Pipeline für Modell: {model_id}")
26
+ _facefix_pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
27
+ model_id,
28
+ controlnet=[controlnet_depth],
29
+ torch_dtype=torch.float16,
30
+ safety_checker=None,
31
+ ).to("cuda")
32
+ return _facefix_pipe
33
+
34
+ def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: int, model_id: str):
 
 
 
 
 
 
 
 
 
 
 
35
  try:
36
+ pipe = _get_facefix_pipeline(model_id)
37
+ depth_img = depth_processor(image)
 
38
 
39
+ face_prompt = prompt + ", perfect face, detailed skin"
40
+ face_negative = negative_prompt + ", deformed face"
 
 
41
 
42
+ fixed = pipe(
43
+ prompt=face_prompt,
44
+ negative_prompt=face_negative,
45
+ image=image,
46
+ mask_image=None,
47
+ control_image=[depth_img],
48
+ controlnet_conditioning_scale=[0.7],
49
+ strength=0.35,
50
+ num_inference_steps=20,
51
+ guidance_scale=7.0,
52
+ generator=torch.Generator("cuda").manual_seed(seed),
53
+ ).images[0]
54
+
55
+ return fixed
56
+
57
+ except Exception as e:
58
+ print(f"Face-Fix Error: {e}")
59
+ return image
60
+
61
+ except Exception as e:
62
+ print(f"❌ Face-Fix Setup fehlgeschlagen: {e}")
63
+
64
+ # Fallback: Dummy-Funktion die nichts macht
65
+ def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: int, model_id: str):
66
+ print("⚠️ Face-Fix nicht verfügbar, gebe Original zurück")
 
 
 
67
  return image