Update controlnet_facefix.py
Browse files- controlnet_facefix.py +20 -29
controlnet_facefix.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# controlnet_facefix.py - OPTIMIERT FÜR GESICHTSVERBESSERUNG
|
| 2 |
import torch
|
| 3 |
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
| 4 |
from PIL import Image
|
|
@@ -7,7 +7,7 @@ import cv2
|
|
| 7 |
import numpy as np
|
| 8 |
|
| 9 |
print("="*60)
|
| 10 |
-
print("FACE-FIX: GESICHTSVERBESSERUNG
|
| 11 |
print("="*60)
|
| 12 |
|
| 13 |
# WICHTIG: Dieselben Modelle wie in controlnet_module.py!
|
|
@@ -73,9 +73,9 @@ def _extract_pose_simple(image):
|
|
| 73 |
return image.convert("RGB")
|
| 74 |
|
| 75 |
def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: int, model_id: str):
|
| 76 |
-
"""
|
| 77 |
print("\n" + "🎭"*50)
|
| 78 |
-
print("FACE-FIX:
|
| 79 |
print(f" Model: {model_id}")
|
| 80 |
print(f" Original Seed: {seed}")
|
| 81 |
print("🎭"*50)
|
|
@@ -120,29 +120,19 @@ def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: i
|
|
| 120 |
print(f" Device: {device}")
|
| 121 |
pipeline = _pipeline.to(device)
|
| 122 |
|
| 123 |
-
# 5.
|
| 124 |
-
# -
|
| 125 |
-
|
| 126 |
-
# - Gleicher Seed für Konsistenz
|
| 127 |
-
# - Kürzerer, fokussierter Prompt nur für Qualität
|
| 128 |
|
| 129 |
-
#
|
| 130 |
-
face_quality_prompt = "perfect face, detailed skin, realistic eyes, sharp facial features, high quality"
|
| 131 |
-
|
| 132 |
-
# Negativer Prompt für Gesichtsfehler
|
| 133 |
-
face_negative = "deformed face, blurry face, bad eyes, asymmetric, low quality, mutated"
|
| 134 |
-
|
| 135 |
-
print("⚡ Verbessere Gesichter (OpenPose stärker, Depth schwächer)...")
|
| 136 |
-
|
| 137 |
-
# 6. OPTIMIERTE INFERENCE
|
| 138 |
result = pipeline(
|
| 139 |
-
prompt=face_quality_prompt, #
|
| 140 |
-
negative_prompt=
|
| 141 |
image=[pose_img, depth_img],
|
| 142 |
-
controlnet_conditioning_scale=[0.
|
| 143 |
-
num_inference_steps=
|
| 144 |
-
guidance_scale=
|
| 145 |
-
generator=torch.Generator(device).manual_seed(seed), # Gleicher Seed
|
| 146 |
height=512,
|
| 147 |
width=512,
|
| 148 |
).images[0]
|
|
@@ -152,10 +142,11 @@ def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: i
|
|
| 152 |
result = result.resize(image.size)
|
| 153 |
|
| 154 |
duration = time.time() - start_time
|
| 155 |
-
print(f"\n✅✅✅ GESICHTSVERBESSERUNG
|
| 156 |
-
print(f" - OpenPose
|
| 157 |
-
print(f" - Depth
|
| 158 |
-
print(f" - Steps:
|
|
|
|
| 159 |
print(f" - Gleicher Seed: {seed}")
|
| 160 |
|
| 161 |
return result
|
|
@@ -167,5 +158,5 @@ def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: i
|
|
| 167 |
return image
|
| 168 |
|
| 169 |
print("="*60)
|
| 170 |
-
print("FACE-FIX MODUL FERTIG (
|
| 171 |
print("="*60)
|
|
|
|
| 1 |
+
# controlnet_facefix.py - OPTIMIERT FÜR MINIMALE GESICHTSVERBESSERUNG
|
| 2 |
import torch
|
| 3 |
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
| 4 |
from PIL import Image
|
|
|
|
| 7 |
import numpy as np
|
| 8 |
|
| 9 |
print("="*60)
|
| 10 |
+
print("FACE-FIX: MINIMALE GESICHTSVERBESSERUNG")
|
| 11 |
print("="*60)
|
| 12 |
|
| 13 |
# WICHTIG: Dieselben Modelle wie in controlnet_module.py!
|
|
|
|
| 73 |
return image.convert("RGB")
|
| 74 |
|
| 75 |
def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: int, model_id: str):
|
| 76 |
+
"""MINIMALE Verbesserung: Fokussiert auf Gesichter, Hintergrund fast unverändert"""
|
| 77 |
print("\n" + "🎭"*50)
|
| 78 |
+
print("FACE-FIX: MINIMALE VERBESSERUNG (Fokus: Gesichter)")
|
| 79 |
print(f" Model: {model_id}")
|
| 80 |
print(f" Original Seed: {seed}")
|
| 81 |
print("🎭"*50)
|
|
|
|
| 120 |
print(f" Device: {device}")
|
| 121 |
pipeline = _pipeline.to(device)
|
| 122 |
|
| 123 |
+
# 5. EXTREM NIEDRIGE PARAMETER für minimale Änderung
|
| 124 |
+
# Nur Qualitäts-Prompt, kein Inhalts-Prompt
|
| 125 |
+
face_quality_prompt = "perfect face, detailed skin, sharp eyes"
|
|
|
|
|
|
|
| 126 |
|
| 127 |
+
# 6. OPTIMIERTE INFERENCE mit minimaler Stärke
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
result = pipeline(
|
| 129 |
+
prompt=face_quality_prompt, # KURZ und fokussiert
|
| 130 |
+
negative_prompt="deformed, blurry, low quality",
|
| 131 |
image=[pose_img, depth_img],
|
| 132 |
+
controlnet_conditioning_scale=[0.35, 0.12], # EXTREM NIEDRIG: OpenPose 0.35, Depth 0.12
|
| 133 |
+
num_inference_steps=6, # SEHR WENIG Steps
|
| 134 |
+
guidance_scale=3.0, # NIEDRIGER CFG
|
| 135 |
+
generator=torch.Generator(device).manual_seed(seed), # Gleicher Seed
|
| 136 |
height=512,
|
| 137 |
width=512,
|
| 138 |
).images[0]
|
|
|
|
| 142 |
result = result.resize(image.size)
|
| 143 |
|
| 144 |
duration = time.time() - start_time
|
| 145 |
+
print(f"\n✅✅✅ MINIMALE GESICHTSVERBESSERUNG in {duration:.1f}s ✅✅✅")
|
| 146 |
+
print(f" - OpenPose: 0.35 (sehr subtil)")
|
| 147 |
+
print(f" - Depth: 0.12 (kaum Hintergrund-Einfluss)")
|
| 148 |
+
print(f" - Steps: 6 (minimal)")
|
| 149 |
+
print(f" - CFG: 3.0 (niedrig)")
|
| 150 |
print(f" - Gleicher Seed: {seed}")
|
| 151 |
|
| 152 |
return result
|
|
|
|
| 158 |
return image
|
| 159 |
|
| 160 |
print("="*60)
|
| 161 |
+
print("FACE-FIX MODUL FERTIG (MINIMALE GESICHTSVERBESSERUNG)")
|
| 162 |
print("="*60)
|