Astridkraft commited on
Commit
7a34179
·
verified ·
1 Parent(s): 45a3eda

Update controlnet_facefix.py

Browse files
Files changed (1) hide show
  1. controlnet_facefix.py +32 -30
controlnet_facefix.py CHANGED
@@ -1,4 +1,4 @@
1
- # controlnet_facefix.py - BASIEREND AUF DEINEM FUNKTIONIERENDEN CODE
2
  import torch
3
  from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel
4
  from PIL import Image
@@ -7,7 +7,7 @@ import cv2
7
  import numpy as np
8
 
9
  print("="*60)
10
- print("FACE-FIX BASIEREND AUF CONTROLNET_MODULE")
11
  print("="*60)
12
 
13
  # WICHTIG: Dieselben Modelle wie in controlnet_module.py!
@@ -25,9 +25,8 @@ def _initialize_components():
25
 
26
  try:
27
  print("1. Lade ControlNet Depth...")
28
- # GLEICHES MODELL wie in controlnet_module.py!
29
  _controlnet_depth = ControlNetModel.from_pretrained(
30
- "lllyasviel/sd-controlnet-depth", # ← HIER ÄNDERN!
31
  torch_dtype=torch.float16
32
  )
33
  print(" ✅ ControlNet Depth OK")
@@ -37,9 +36,8 @@ def _initialize_components():
37
 
38
  try:
39
  print("2. Lade ControlNet OpenPose...")
40
- # GLEICHES MODELL wie in controlnet_module.py!
41
  _controlnet_pose = ControlNetModel.from_pretrained(
42
- "lllyasviel/sd-controlnet-openpose", # ← HIER ÄNDERN!
43
  torch_dtype=torch.float16
44
  )
45
  print(" ✅ ControlNet OpenPose OK")
@@ -52,7 +50,7 @@ def _initialize_components():
52
  return True
53
 
54
  def _extract_depth_map(image):
55
- """GENAU DIESELBE FUNKTION wie in controlnet_module.py"""
56
  try:
57
  img_array = np.array(image.convert("RGB"))
58
  gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
@@ -64,7 +62,7 @@ def _extract_depth_map(image):
64
  return image.convert("RGB")
65
 
66
  def _extract_pose_simple(image):
67
- """Einfache Pose-Extraktion basierend auf controlnet_module.py"""
68
  try:
69
  img_array = np.array(image.convert("RGB"))
70
  edges = cv2.Canny(img_array, 100, 200)
@@ -75,24 +73,24 @@ def _extract_pose_simple(image):
75
  return image.convert("RGB")
76
 
77
  def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: int, model_id: str):
78
- """Face-Fix basierend auf deiner funktionierenden Logik"""
79
  print("\n" + "🎭"*50)
80
- print("FACE-FIX MIT BEKANNT FUNKTIONIERENDEN MODELLEN")
81
  print(f" Model: {model_id}")
82
  print(f" Seed: {seed}")
83
  print("🎭"*50)
84
 
85
  start_time = time.time()
86
 
87
- # 1. Komponenten initialisieren (mit bekannten Modellen)
88
  if not _initialize_components():
89
  print("❌ Komponenten konnten nicht geladen werden")
90
  return image
91
 
92
- # 2. Control Images erstellen (mit deinen funktionierenden Methoden)
93
  print("🎭 Erstelle Control Images...")
94
- depth_img = _extract_depth_map(image)
95
- pose_img = _extract_pose_simple(image)
96
 
97
  # 3. Pipeline erstellen
98
  global _pipeline
@@ -101,13 +99,13 @@ def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: i
101
  print("🔄 Lade Face-Fix Pipeline...")
102
  _pipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained(
103
  model_id,
104
- controlnet=[_controlnet_pose, _controlnet_depth], # OpenPose zuerst, dann Depth
105
  torch_dtype=torch.float16,
106
  safety_checker=None,
107
  requires_safety_checker=False,
108
  )
109
 
110
- # Optimierungen
111
  _pipeline.enable_attention_slicing()
112
  _pipeline.enable_vae_slicing()
113
 
@@ -122,21 +120,21 @@ def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: i
122
  print(f" Device: {device}")
123
  pipeline = _pipeline.to(device)
124
 
125
- # 5. Prompts optimieren
126
- face_prompt = f"{prompt}, perfect face, detailed skin, realistic eyes, sharp focus"
127
- face_negative = f"{negative_prompt}, deformed face, blurry face, bad anatomy"
128
 
129
- print("⚡ Führe Face-Fix aus...")
130
 
131
- # 6. Face-Fix ausführen
132
  result = pipeline(
133
- prompt=face_prompt,
134
- negative_prompt=face_negative,
135
- image=image,
136
- mask_image=None,
137
  control_image=[pose_img, depth_img],
138
- controlnet_conditioning_scale=[0.8, 0.6], # OpenPose stärker
139
- strength=0.4,
140
  num_inference_steps=20,
141
  guidance_scale=7.0,
142
  generator=torch.Generator(device).manual_seed(seed),
@@ -144,17 +142,21 @@ def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: i
144
  width=512,
145
  ).images[0]
146
 
 
 
 
 
147
  duration = time.time() - start_time
148
- print(f"\n✅✅✅ FACE-FIX ERFOLGREICH in {duration:.1f}s ✅✅✅")
149
 
150
  return result
151
 
152
  except Exception as e:
153
- print(f"\n❌❌❌ FACE-FIX FEHLGESCHLAGEN: {e} ❌❌❌")
154
  import traceback
155
  traceback.print_exc()
156
  return image
157
 
158
  print("="*60)
159
- print("FACE-FIX MODUL FERTIG INITIALISIERT")
160
  print("="*60)
 
1
+ # controlnet_facefix.py - EINFACHE VERSION (GANZES BILD VERBESSERN)
2
  import torch
3
  from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel
4
  from PIL import Image
 
7
  import numpy as np
8
 
9
  print("="*60)
10
+ print("FACE-FIX: GANZES BILD VERBESSERN")
11
  print("="*60)
12
 
13
  # WICHTIG: Dieselben Modelle wie in controlnet_module.py!
 
25
 
26
  try:
27
  print("1. Lade ControlNet Depth...")
 
28
  _controlnet_depth = ControlNetModel.from_pretrained(
29
+ "lllyasviel/sd-controlnet-depth", # ← FUNKTIONIERT BEI DIR!
30
  torch_dtype=torch.float16
31
  )
32
  print(" ✅ ControlNet Depth OK")
 
36
 
37
  try:
38
  print("2. Lade ControlNet OpenPose...")
 
39
  _controlnet_pose = ControlNetModel.from_pretrained(
40
+ "lllyasviel/sd-controlnet-openpose", # ← FUNKTIONIERT BEI DIR!
41
  torch_dtype=torch.float16
42
  )
43
  print(" ✅ ControlNet OpenPose OK")
 
50
  return True
51
 
52
  def _extract_depth_map(image):
53
+ """Depth Map wie in controlnet_module.py"""
54
  try:
55
  img_array = np.array(image.convert("RGB"))
56
  gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
 
62
  return image.convert("RGB")
63
 
64
  def _extract_pose_simple(image):
65
+ """Einfache Pose-Extraktion"""
66
  try:
67
  img_array = np.array(image.convert("RGB"))
68
  edges = cv2.Canny(img_array, 100, 200)
 
73
  return image.convert("RGB")
74
 
75
  def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: int, model_id: str):
76
+ """GANZES BILD verbessern mit ControlNets"""
77
  print("\n" + "🎭"*50)
78
+ print("FACE-FIX: VERBESSERE GANZES BILD")
79
  print(f" Model: {model_id}")
80
  print(f" Seed: {seed}")
81
  print("🎭"*50)
82
 
83
  start_time = time.time()
84
 
85
+ # 1. Komponenten initialisieren
86
  if not _initialize_components():
87
  print("❌ Komponenten konnten nicht geladen werden")
88
  return image
89
 
90
+ # 2. Control Images erstellen
91
  print("🎭 Erstelle Control Images...")
92
+ depth_img = _extract_depth_map(image).resize((512, 512))
93
+ pose_img = _extract_pose_simple(image).resize((512, 512))
94
 
95
  # 3. Pipeline erstellen
96
  global _pipeline
 
99
  print("🔄 Lade Face-Fix Pipeline...")
100
  _pipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained(
101
  model_id,
102
+ controlnet=[_controlnet_pose, _controlnet_depth],
103
  torch_dtype=torch.float16,
104
  safety_checker=None,
105
  requires_safety_checker=False,
106
  )
107
 
108
+ # Optimierungen für HF Spaces
109
  _pipeline.enable_attention_slicing()
110
  _pipeline.enable_vae_slicing()
111
 
 
120
  print(f" Device: {device}")
121
  pipeline = _pipeline.to(device)
122
 
123
+ # 5. Prompts für allgemeine Verbesserung
124
+ enhanced_prompt = f"{prompt}, high quality, detailed, sharp focus, professional photography"
125
+ enhanced_negative = f"{negative_prompt}, blurry, low quality, pixelated, artifacts"
126
 
127
+ print("⚡ Verbessere gesamtes Bild...")
128
 
129
+ # 6. GANZES BILD verbessern (mask_image=None)
130
  result = pipeline(
131
+ prompt=enhanced_prompt,
132
+ negative_prompt=enhanced_negative,
133
+ image=image.resize((512, 512)),
134
+ mask_image=None, # ← WICHTIG: None = ganzes Bild!
135
  control_image=[pose_img, depth_img],
136
+ controlnet_conditioning_scale=[0.7, 0.5], # Mittel für subtile Verbesserung
137
+ strength=0.3, # Niedrig für feine Anpassungen
138
  num_inference_steps=20,
139
  guidance_scale=7.0,
140
  generator=torch.Generator(device).manual_seed(seed),
 
142
  width=512,
143
  ).images[0]
144
 
145
+ # Zurück auf Originalgröße
146
+ if image.size != (512, 512):
147
+ result = result.resize(image.size)
148
+
149
  duration = time.time() - start_time
150
+ print(f"\n✅✅✅ BILDVERBESSERUNG ERFOLGREICH in {duration:.1f}s ✅✅✅")
151
 
152
  return result
153
 
154
  except Exception as e:
155
+ print(f"\n❌❌❌ FEHLER: {e} ❌❌❌")
156
  import traceback
157
  traceback.print_exc()
158
  return image
159
 
160
  print("="*60)
161
+ print("FACE-FIX MODUL FERTIG (GANZES BILD VERBESSERN)")
162
  print("="*60)