Update controlnet_module.py
Browse files- controlnet_module.py +69 -61
controlnet_module.py
CHANGED
|
@@ -143,67 +143,75 @@ class ControlNetProcessor:
|
|
| 143 |
return self.pipe_canny
|
| 144 |
|
| 145 |
def generate_with_controlnet(
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
):
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
def prepare_inpaint_input(self, image, keep_environment=False):
|
| 208 |
"""
|
| 209 |
Bereitet das Input-Bild für Inpaint vor
|
|
|
|
| 143 |
return self.pipe_canny
|
| 144 |
|
| 145 |
def generate_with_controlnet(
|
| 146 |
+
self, image, prompt, negative_prompt,
|
| 147 |
+
steps, guidance_scale, controlnet_strength,
|
| 148 |
+
progress=None, keep_environment=False
|
| 149 |
+
):
|
| 150 |
+
"""Generiert Bild mit ControlNet und Fortschrittsanzeige"""
|
| 151 |
+
try:
|
| 152 |
+
# --- KORREKTE LOGIK ---
|
| 153 |
+
if keep_environment:
|
| 154 |
+
# UMGEBUNG BEIBEHALTEN, PERSON ÄNDERN
|
| 155 |
+
controlnet_type = "canny" # ✅ Canny behält Umgebung
|
| 156 |
+
print("🎯 ControlNet Modus: Umgebung beibehalten (Canny Edge)")
|
| 157 |
+
conditioning_image = self.extract_canny_edges(image)
|
| 158 |
+
inpaint_input = image # ✅ ORIGINALBILD für Inpaint!
|
| 159 |
+
else:
|
| 160 |
+
# PERSON BEIBEHALTEN, UMGEBUNG ÄNDERN
|
| 161 |
+
controlnet_type = "openpose" # ✅ OpenPose behält Person
|
| 162 |
+
print("🎯 ControlNet Modus: Person beibehalten (OpenPose)")
|
| 163 |
+
conditioning_image = self.extract_pose(image)
|
| 164 |
+
inpaint_input = conditioning_image # ✅ POSE-MAP für Inpaint
|
| 165 |
+
|
| 166 |
+
pipe = self.load_controlnet_pipeline(controlnet_type)
|
| 167 |
+
|
| 168 |
+
# Zufälliger Seed
|
| 169 |
+
seed = random.randint(0, 2**32 - 1)
|
| 170 |
+
generator = torch.Generator(device=self.device).manual_seed(seed)
|
| 171 |
+
print(f"ControlNet Seed: {seed}")
|
| 172 |
+
|
| 173 |
+
# Fortschritt-Callback
|
| 174 |
+
callback = ControlNetProgressCallback(progress, int(steps)) if progress is not None else None
|
| 175 |
+
|
| 176 |
+
print("🔄 ControlNet: Starte Pipeline...")
|
| 177 |
+
|
| 178 |
+
# ControlNet Generierung
|
| 179 |
+
result = pipe(
|
| 180 |
+
prompt=prompt,
|
| 181 |
+
image=conditioning_image,
|
| 182 |
+
negative_prompt=negative_prompt,
|
| 183 |
+
num_inference_steps=int(steps),
|
| 184 |
+
guidance_scale=guidance_scale,
|
| 185 |
+
generator=generator,
|
| 186 |
+
controlnet_conditioning_scale=controlnet_strength,
|
| 187 |
+
height=512,
|
| 188 |
+
width=512,
|
| 189 |
+
output_type="pil",
|
| 190 |
+
callback_on_step_end=callback,
|
| 191 |
+
callback_on_step_end_tensor_inputs=[],
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
# Debug-Ausgabe Scheduler Steps
|
| 195 |
+
try:
|
| 196 |
+
scheduler = pipe.scheduler
|
| 197 |
+
if hasattr(scheduler, "timesteps"):
|
| 198 |
+
actual_steps = len(scheduler.timesteps)
|
| 199 |
+
print(f"🎯 CONTROLNET TATSÄCHLICHE STEPS: {actual_steps} (von {steps} angefordert)")
|
| 200 |
+
except Exception as e:
|
| 201 |
+
print(f"⚠️ Konnte ControlNet Scheduler-Info nicht auslesen: {e}")
|
| 202 |
+
|
| 203 |
+
print("✅ ControlNet abgeschlossen!")
|
| 204 |
+
|
| 205 |
+
# DREI Werte zurückgeben
|
| 206 |
+
return result.images[0], conditioning_image, inpaint_input
|
| 207 |
+
|
| 208 |
+
except Exception as e:
|
| 209 |
+
print(f"❌ Fehler in ControlNet: {e}")
|
| 210 |
+
import traceback
|
| 211 |
+
traceback.print_exc()
|
| 212 |
+
error_image = image.convert("RGB").resize((512, 512))
|
| 213 |
+
return error_image, error_image, error_image
|
| 214 |
+
|
| 215 |
def prepare_inpaint_input(self, image, keep_environment=False):
|
| 216 |
"""
|
| 217 |
Bereitet das Input-Bild für Inpaint vor
|