File size: 8,009 Bytes
0c4f4cc
f0735ee
0c4f4cc
 
7a300b4
45a3eda
 
0c4f4cc
f0735ee
5e5594f
0c4f4cc
5e5594f
f0735ee
5e5594f
 
 
 
 
 
0c4f4cc
45a3eda
5e5594f
 
 
0c4f4cc
 
5e5594f
 
0c4f4cc
5e5594f
7755dba
45a3eda
5e5594f
0c4f4cc
 
 
 
 
 
 
5e5594f
0c4f4cc
5e5594f
 
0c4f4cc
 
5e5594f
45a3eda
 
c37ab74
0c4f4cc
 
c37ab74
0c4f4cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45a3eda
0c4f4cc
 
45a3eda
 
c37ab74
0c4f4cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5e5594f
 
c37ab74
0c4f4cc
c37ab74
0c4f4cc
 
 
 
 
c37ab74
0c4f4cc
 
 
 
5e5594f
 
7a300b4
0c4f4cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5e5594f
0c4f4cc
7a300b4
0c4f4cc
c37ab74
 
 
 
0c4f4cc
 
 
7a300b4
0c4f4cc
5e5594f
 
 
0c4f4cc
2562888
7a300b4
0c4f4cc
7a300b4
 
5e5594f
 
6769c24
5e5594f
 
7a300b4
0c4f4cc
7a300b4
5e5594f
0c4f4cc
7a300b4
5e5594f
 
 
 
 
0c4f4cc
 
 
 
c37ab74
0c4f4cc
 
 
 
 
5e5594f
 
0c4f4cc
 
 
 
 
 
 
5e5594f
 
 
 
0c4f4cc
c37ab74
 
7a34179
0c4f4cc
 
 
c37ab74
0c4f4cc
 
 
c37ab74
0c4f4cc
 
 
 
 
5e5594f
0c4f4cc
5e5594f
 
0c4f4cc
 
5e5594f
 
0c4f4cc
5e5594f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
# controlnet_facefix.py - PURE QUALITY ENHANCEMENT WITH MINIMAL CHANGE
import torch
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, AutoencoderKL
from PIL import Image, ImageFilter, ImageEnhance
import time
import cv2
import numpy as np
from torchvision import transforms

print("="*60)
print("FACE-FIX: REINE QUALITÄTSVERBESSERUNG - MINIMALE ÄNDERUNG")
print("="*60)

_components_loaded = False
_controlnet_depth = None  
_controlnet_pose = None
_pipeline = None

def _initialize_components():
    """Lade nur notwendige Komponenten"""
    global _components_loaded, _controlnet_depth, _controlnet_pose
    
    if _components_loaded:
        return True
    
    print("⚠️ Lade nur OpenPose (Depth wird deaktiviert)...")
    
    try:
        # NUR OPENPOSE - Depth verändert zu viel
        _controlnet_pose = ControlNetModel.from_pretrained(
            "lllyasviel/sd-controlnet-openpose",
            torch_dtype=torch.float16
        )
        print("✅ OpenPose geladen")
        
        # Depth wird NICHT geladen - es verändert den Hintergrund zu stark
        _controlnet_depth = None
        
        _components_loaded = True
        return True
    except Exception as e:
        print(f"❌ Fehler: {e}")
        return False

def _extract_precise_pose(image):
    """SEHR PRÄZISE Pose-Extraktion nur für Gesicht"""
    try:
        img_array = np.array(image.convert("RGB"))
        gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
        
        # EXTREM NIEDRIGE Thresholds für minimale Kanten
        edges = cv2.Canny(gray, 15, 45)  # Nur feinste Kanten
        
        # Face detection für Fokus
        face_cascade = cv2.CascadeClassifier(
            cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'
        )
        faces = face_cascade.detectMultiScale(gray, 1.1, 4)
        
        # Erstelle leere Pose Map
        pose_map = np.zeros_like(img_array)
        
        # Nur Gesichtskanten einfügen
        if len(faces) > 0:
            for (x, y, w, h) in faces:
                # Extrahiere Gesichtsregion
                face_region = edges[y:y+h, x:x+w]
                # Nur 10% der stärksten Kanten behalten
                threshold = np.percentile(face_region[face_region > 0], 90)
                face_region[face_region < threshold] = 0
                pose_map[y:y+h, x:x+w, 0] = face_region
        else:
            # Falls kein Gesicht erkannt, minimale Kanten
            pose_map[..., 0] = edges * 0.3  # Noch schwächer
        
        return Image.fromarray(pose_map)
    except:
        # Fallback: minimale Kanten
        gray = cv2.cvtColor(np.array(image.convert("RGB")), cv2.COLOR_RGB2GRAY)
        edges = cv2.Canny(gray, 10, 30) * 0.2  # Sehr schwach
        return Image.fromarray(edges).convert("RGB")

def _apply_face_enhancement(image):
    """EINFACHE Face Enhancement ohne AI"""
    try:
        img_array = np.array(image.convert("RGB"))
        
        # 1. Scharfe Kanten (nur leicht)
        sharpened = cv2.filter2D(img_array, -1, 
            np.array([[-0.5, -0.5, -0.5],
                      [-0.5,  5.0, -0.5],
                      [-0.5, -0.5, -0.5]]) / 3.0)
        
        # 2. Leichter De-Noise
        denoised = cv2.fastNlMeansDenoisingColored(sharpened, None, 3, 3, 7, 21)
        
        # 3. Kontrast leicht erhöhen
        lab = cv2.cvtColor(denoised, cv2.COLOR_RGB2LAB)
        l, a, b = cv2.split(lab)
        clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(8,8))
        l = clahe.apply(l)
        enhanced = cv2.merge([l, a, b])
        enhanced = cv2.cvtColor(enhanced, cv2.COLOR_LAB2RGB)
        
        return Image.fromarray(enhanced)
    except:
        return image

def apply_facefix(image: Image.Image, prompt: str, negative_prompt: str, seed: int, model_id: str):
    """
    SUPER-SUBTILE QUALITÄTSVERBESSERUNG
    
    Strategie:
    1. NUR OpenPose (kein Depth - das verändert zu viel)
    2. SEHR niedrige ControlNet-Stärke
    3. Fast kein CFG Scale
    4. Identischer Prompt
    """
    print("\n" + "🎯"*50)
    print("SUBTILE QUALITÄTSVERBESSERUNG")
    print(f"  Größe: {image.size}")
    print("🎯"*50)
    
    start_time = time.time()
    
    # OPTION 1: Einfache non-AI Verbesserung (empfohlen)
    print("\n⚡ OPTION 1: Einfache non-AI Verbesserung...")
    enhanced = _apply_face_enhancement(image)
    
    # Optional: AI-Verbesserung nur wenn nötig
    use_ai_enhancement = False  # Auf False setzen für minimale Änderung
    
    if not use_ai_enhancement:
        duration = time.time() - start_time
        print(f"✅ Non-AI Verbesserung in {duration:.1f}s")
        return enhanced
    
    # OPTION 2: Minimale AI-Verbesserung (falls gewünscht)
    print("⚠️  Starte MINIMALE AI-Verbesserung...")
    
    if not _initialize_components():
        return enhanced
    
    # Control Map vorbereiten
    original_size = image.size
    control_size = (512, 512)
    resized_image = image.resize(control_size, Image.Resampling.LANCZOS)
    
    # MINIMALE Pose Map
    pose_img = _extract_precise_pose(resized_image)
    pose_img.save("debug_minimal_pose.png")
    
    # Pipeline (nur falls nicht geladen)
    global _pipeline
    if _pipeline is None:
        try:
            print("🔄 Lade Pipeline...")
            _pipeline = StableDiffusionControlNetPipeline.from_pretrained(
                model_id,
                controlnet=[_controlnet_pose],  # NUR OpenPose!
                torch_dtype=torch.float16,
                safety_checker=None,
                requires_safety_checker=False,
            )
            
            _pipeline.enable_attention_slicing()
            _pipeline.enable_vae_slicing()
            
            print("✅ Pipeline geladen")
        except Exception as e:
            print(f"❌ Pipeline Fehler: {e}")
            return enhanced
    
    try:
        device = "cuda" if torch.cuda.is_available() else "cpu"
        print(f"  Device: {device}")
        pipeline = _pipeline.to(device)
        
        # KRITISCHE ÄNDERUNGEN:
        # 1. GLEICHER PROMPT wie ursprünglich
        # 2. SEHR niedrige ControlNet-Stärke
        # 3. FAST KEIN CFG
        
        print("\n⚙️  EXTREM SUBTILE PARAMETER:")
        print("   • OpenPose Strength: 0.3 (SEHR NIEDRIG)")
        print("   • Steps: 15 (wenig)")
        print("   • CFG: 2.0 (fast kein Guidance)")
        print("   • Gleicher Seed")
        
        result = pipeline(
            prompt=prompt,  # WICHTIG: GLEICHER PROMPT!
            negative_prompt=f"{negative_prompt}, deformed, blurry",
            image=[pose_img],  # Nur Pose
            controlnet_conditioning_scale=[0.3],  # EXTREM NIEDRIG
            num_inference_steps=15,               # WENIG Steps
            guidance_scale=2.0,                   # FAST KEIN CFG
            generator=torch.Generator(device).manual_seed(seed + 100),  # Leicht anderer Seed
            height=512,
            width=512,
        ).images[0]
        
        # Zurück auf Originalgröße
        if original_size != (512, 512):
            result = result.resize(original_size, Image.Resampling.LANCZOS)
        
        # 50/50 Blend mit Original für noch weniger Änderung
        result_array = np.array(result).astype(float)
        original_array = np.array(image).astype(float)
        
        # 70% Original, 30% AI-result
        blended = (original_array * 0.7 + result_array * 0.3).astype(np.uint8)
        final_result = Image.fromarray(blended)
        
        duration = time.time() - start_time
        print(f"\n✅ SUBTILE VERBESSERUNG in {duration:.1f}s")
        print(f"   • 70% Original, 30% AI")
        print(f"   • OpenPose: 0.3")
        print(f"   • CFG: 2.0")
        
        return final_result
        
    except Exception as e:
        print(f"\n❌ AI-Verbesserung fehlgeschlagen: {e}")
        return enhanced

print("="*60)
print("FACE-FIX: REINE QUALITÄTSVERBESSERUNG")
print("="*60)