Spaces:
Runtime error
Runtime error
Update generator.py
Browse files- generator.py +385 -581
generator.py
CHANGED
|
@@ -1,30 +1,32 @@
|
|
| 1 |
"""
|
| 2 |
-
Generation logic for Pixagram AI Pixel Art Generator
|
|
|
|
| 3 |
"""
|
| 4 |
import torch
|
| 5 |
import numpy as np
|
| 6 |
import cv2
|
| 7 |
from PIL import Image
|
| 8 |
-
import
|
| 9 |
-
from torchvision import transforms
|
| 10 |
|
| 11 |
from config import (
|
| 12 |
-
device, dtype, TRIGGER_WORD,
|
| 13 |
-
ADAPTIVE_THRESHOLDS, ADAPTIVE_PARAMS, CAPTION_CONFIG
|
| 14 |
)
|
| 15 |
from utils import (
|
| 16 |
-
sanitize_text, enhanced_color_match, color_match,
|
| 17 |
-
|
| 18 |
)
|
| 19 |
from models import (
|
| 20 |
-
load_face_analysis, load_depth_detector, load_controlnets,
|
| 21 |
-
load_sdxl_pipeline, load_lora,
|
| 22 |
setup_scheduler, optimize_pipeline, load_caption_model, set_clip_skip
|
| 23 |
)
|
|
|
|
|
|
|
| 24 |
|
| 25 |
|
| 26 |
class RetroArtConverter:
|
| 27 |
-
"""Main class for retro art generation"""
|
| 28 |
|
| 29 |
def __init__(self):
|
| 30 |
self.device = device
|
|
@@ -33,38 +35,29 @@ class RetroArtConverter:
|
|
| 33 |
'custom_checkpoint': False,
|
| 34 |
'lora': False,
|
| 35 |
'instantid': False,
|
| 36 |
-
'zoe_depth': False
|
| 37 |
-
'ip_adapter': False
|
| 38 |
}
|
| 39 |
|
| 40 |
-
# Initialize
|
| 41 |
-
self.
|
| 42 |
|
| 43 |
-
# Load
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
self.zoe_depth, zoe_success = load_depth_detector()
|
| 45 |
self.models_loaded['zoe_depth'] = zoe_success
|
| 46 |
|
| 47 |
-
# Load ControlNets
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
self.
|
| 51 |
-
self.models_loaded['instantid'] = instantid_success
|
| 52 |
-
|
| 53 |
-
# Load image encoder
|
| 54 |
-
if self.instantid_enabled:
|
| 55 |
-
self.image_encoder = load_image_encoder()
|
| 56 |
-
else:
|
| 57 |
-
self.image_encoder = None
|
| 58 |
|
| 59 |
-
|
| 60 |
-
if self.instantid_enabled and self.controlnet_instantid is not None:
|
| 61 |
-
controlnets = [self.controlnet_instantid, controlnet_depth]
|
| 62 |
-
print(f"Initializing with multiple ControlNets: InstantID + Depth")
|
| 63 |
-
else:
|
| 64 |
-
controlnets = controlnet_depth
|
| 65 |
-
print(f"Initializing with single ControlNet: Depth only")
|
| 66 |
|
| 67 |
-
# Load SDXL pipeline
|
| 68 |
self.pipe, checkpoint_success = load_sdxl_pipeline(controlnets)
|
| 69 |
self.models_loaded['custom_checkpoint'] = checkpoint_success
|
| 70 |
|
|
@@ -72,48 +65,28 @@ class RetroArtConverter:
|
|
| 72 |
lora_success = load_lora(self.pipe)
|
| 73 |
self.models_loaded['lora'] = lora_success
|
| 74 |
|
| 75 |
-
# Setup IP-Adapter
|
| 76 |
-
if self.instantid_enabled and self.image_encoder is not None:
|
| 77 |
-
self.image_proj_model, ip_adapter_success = setup_ip_adapter(self.pipe, self.image_encoder)
|
| 78 |
-
self.models_loaded['ip_adapter'] = ip_adapter_success
|
| 79 |
-
else:
|
| 80 |
-
print("[INFO] Face preservation: InstantID ControlNet keypoints only")
|
| 81 |
-
self.models_loaded['ip_adapter'] = False
|
| 82 |
-
self.image_proj_model = None
|
| 83 |
-
|
| 84 |
# Setup Compel
|
| 85 |
self.compel, self.use_compel = setup_compel(self.pipe)
|
| 86 |
|
| 87 |
-
# Setup
|
| 88 |
setup_scheduler(self.pipe)
|
| 89 |
|
| 90 |
-
# Optimize
|
| 91 |
optimize_pipeline(self.pipe)
|
| 92 |
|
| 93 |
-
# Load caption model
|
| 94 |
self.caption_processor, self.caption_model, self.caption_enabled, self.caption_model_type = load_caption_model()
|
| 95 |
|
| 96 |
-
# Report caption model status
|
| 97 |
-
if self.caption_enabled and self.caption_model is not None:
|
| 98 |
-
if self.caption_model_type == "git":
|
| 99 |
-
print(" [OK] Using GIT for detailed captions")
|
| 100 |
-
elif self.caption_model_type == "blip":
|
| 101 |
-
print(" [OK] Using BLIP for standard captions")
|
| 102 |
-
else:
|
| 103 |
-
print(" [OK] Caption model loaded")
|
| 104 |
-
|
| 105 |
-
|
| 106 |
# Set CLIP skip
|
| 107 |
set_clip_skip(self.pipe)
|
| 108 |
|
| 109 |
-
#
|
| 110 |
-
self.using_multiple_controlnets = isinstance(controlnets, list)
|
| 111 |
-
print(f"Pipeline initialized with {'multiple' if self.using_multiple_controlnets else 'single'} ControlNet(s)")
|
| 112 |
-
|
| 113 |
-
# Print model status
|
| 114 |
self._print_status()
|
| 115 |
|
| 116 |
-
|
|
|
|
|
|
|
|
|
|
| 117 |
|
| 118 |
def _print_status(self):
|
| 119 |
"""Print model loading status"""
|
|
@@ -121,142 +94,106 @@ class RetroArtConverter:
|
|
| 121 |
for model, loaded in self.models_loaded.items():
|
| 122 |
status = "[OK] LOADED" if loaded else "[FALLBACK/DISABLED]"
|
| 123 |
print(f"{model}: {status}")
|
|
|
|
|
|
|
|
|
|
| 124 |
print("===================\n")
|
| 125 |
-
|
| 126 |
-
print("=== UPGRADE VERIFICATION ===")
|
| 127 |
-
try:
|
| 128 |
-
from resampler_enhanced import EnhancedResampler
|
| 129 |
-
from ip_attention_processor_enhanced import EnhancedIPAttnProcessor2_0
|
| 130 |
-
|
| 131 |
-
resampler_check = isinstance(self.image_proj_model, EnhancedResampler) if hasattr(self, 'image_proj_model') and self.image_proj_model is not None else False
|
| 132 |
-
custom_attn_check = any(isinstance(p, EnhancedIPAttnProcessor2_0) for p in self.pipe.unet.attn_processors.values()) if hasattr(self, 'pipe') else False
|
| 133 |
-
|
| 134 |
-
print(f"Enhanced Perceiver Resampler: {'[OK] ACTIVE' if resampler_check else '[INFO] Not active'}")
|
| 135 |
-
print(f"Enhanced IP-Adapter Attention: {'[OK] ACTIVE' if custom_attn_check else '[INFO] Not active'}")
|
| 136 |
-
|
| 137 |
-
if resampler_check and custom_attn_check:
|
| 138 |
-
print("[SUCCESS] Face preservation upgrade fully active")
|
| 139 |
-
print(" Expected improvement: +10-15% face similarity")
|
| 140 |
-
elif resampler_check or custom_attn_check:
|
| 141 |
-
print("[PARTIAL] Some upgrades active")
|
| 142 |
-
else:
|
| 143 |
-
print("[INFO] Using standard components")
|
| 144 |
-
except Exception as e:
|
| 145 |
-
print(f"[INFO] Verification skipped: {e}")
|
| 146 |
-
print("============================\n")
|
| 147 |
|
| 148 |
def get_depth_map(self, image):
|
| 149 |
-
|
| 150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
try:
|
| 152 |
-
if
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
orig_width, orig_height = image.size
|
| 156 |
-
# **FIX 1 START: Ensure all size variables are standard Python int**
|
| 157 |
-
orig_width = int(orig_width)
|
| 158 |
-
orig_height = int(orig_height)
|
| 159 |
-
|
| 160 |
-
# FIXED: Use multiples of 64 (not 32)
|
| 161 |
-
target_width = int((orig_width // 64) * 64)
|
| 162 |
-
target_height = int((orig_height // 64) * 64)
|
| 163 |
|
| 164 |
-
|
| 165 |
-
|
| 166 |
|
| 167 |
-
#
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
|
| 177 |
-
#
|
| 178 |
-
|
| 179 |
-
|
|
|
|
| 180 |
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
depth_image = depth_image.resize((int(orig_width), int(orig_height)), Image.LANCZOS)
|
| 185 |
-
# **FIX 1 END**
|
| 186 |
|
| 187 |
-
|
| 188 |
-
return depth_image
|
| 189 |
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
|
| 201 |
def add_trigger_word(self, prompt):
|
| 202 |
"""Add trigger word to prompt if not present"""
|
| 203 |
if TRIGGER_WORD.lower() not in prompt.lower():
|
| 204 |
-
# **FIX 3 START: Handle empty or blank prompt**
|
| 205 |
if not prompt or not prompt.strip():
|
| 206 |
return TRIGGER_WORD
|
| 207 |
-
# **FIX 3 END**
|
| 208 |
return f"{TRIGGER_WORD}, {prompt}"
|
| 209 |
return prompt
|
| 210 |
|
| 211 |
-
def extract_multi_scale_face(self, face_crop, face):
|
| 212 |
-
"""
|
| 213 |
-
Extract face features at multiple scales for better detail.
|
| 214 |
-
+1-2% improvement in face preservation.
|
| 215 |
-
"""
|
| 216 |
-
try:
|
| 217 |
-
multi_scale_embeds = []
|
| 218 |
-
|
| 219 |
-
for scale in MULTI_SCALE_FACTORS:
|
| 220 |
-
# Resize
|
| 221 |
-
w, h = face_crop.size
|
| 222 |
-
scaled_size = (int(w * scale), int(h * scale))
|
| 223 |
-
scaled_crop = face_crop.resize(scaled_size, Image.LANCZOS)
|
| 224 |
-
|
| 225 |
-
# Pad/crop back to original
|
| 226 |
-
scaled_crop = scaled_crop.resize((w, h), Image.LANCZOS)
|
| 227 |
-
|
| 228 |
-
# Extract features
|
| 229 |
-
scaled_array = cv2.cvtColor(np.array(scaled_crop), cv2.COLOR_RGB2BGR)
|
| 230 |
-
scaled_faces = self.face_app.get(scaled_array)
|
| 231 |
-
|
| 232 |
-
if len(scaled_faces) > 0:
|
| 233 |
-
multi_scale_embeds.append(scaled_faces[0].normed_embedding)
|
| 234 |
-
|
| 235 |
-
# Average embeddings
|
| 236 |
-
if len(multi_scale_embeds) > 0:
|
| 237 |
-
averaged = np.mean(multi_scale_embeds, axis=0)
|
| 238 |
-
# Renormalize
|
| 239 |
-
averaged = averaged / np.linalg.norm(averaged)
|
| 240 |
-
print(f"[MULTI-SCALE] Combined {len(multi_scale_embeds)} scales")
|
| 241 |
-
return averaged
|
| 242 |
-
|
| 243 |
-
return face.normed_embedding
|
| 244 |
-
|
| 245 |
-
except Exception as e:
|
| 246 |
-
print(f"[MULTI-SCALE] Failed: {e}, using single scale")
|
| 247 |
-
return face.normed_embedding
|
| 248 |
-
|
| 249 |
def detect_face_quality(self, face):
|
| 250 |
-
"""
|
| 251 |
-
Detect face quality and adaptively adjust parameters.
|
| 252 |
-
+2-3% consistency improvement.
|
| 253 |
-
"""
|
| 254 |
try:
|
| 255 |
bbox = face.bbox
|
| 256 |
face_size = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
|
| 257 |
det_score = float(face.det_score) if hasattr(face, 'det_score') else 1.0
|
| 258 |
|
| 259 |
-
# Small face -> boost
|
| 260 |
if face_size < ADAPTIVE_THRESHOLDS['small_face_size']:
|
| 261 |
return ADAPTIVE_PARAMS['small_face'].copy()
|
| 262 |
|
|
@@ -264,7 +201,7 @@ class RetroArtConverter:
|
|
| 264 |
elif det_score < ADAPTIVE_THRESHOLDS['low_confidence']:
|
| 265 |
return ADAPTIVE_PARAMS['low_confidence'].copy()
|
| 266 |
|
| 267 |
-
# Check for profile
|
| 268 |
elif hasattr(face, 'pose') and len(face.pose) > 1:
|
| 269 |
try:
|
| 270 |
yaw = float(face.pose[1])
|
|
@@ -273,455 +210,322 @@ class RetroArtConverter:
|
|
| 273 |
except (ValueError, TypeError, IndexError):
|
| 274 |
pass
|
| 275 |
|
| 276 |
-
# Good quality face - use provided parameters
|
| 277 |
return None
|
| 278 |
|
| 279 |
except Exception as e:
|
| 280 |
print(f"[ADAPTIVE] Quality detection failed: {e}")
|
| 281 |
return None
|
| 282 |
|
| 283 |
-
def
|
| 284 |
-
|
| 285 |
-
depth_control_scale, consistency_mode=True):
|
| 286 |
-
"""
|
| 287 |
-
Enhanced parameter validation with stricter rules for consistency.
|
| 288 |
-
"""
|
| 289 |
-
if consistency_mode:
|
| 290 |
-
print("[CONSISTENCY] Applying strict parameter validation...")
|
| 291 |
-
adjustments = []
|
| 292 |
-
|
| 293 |
-
# Rule 1: Strong inverse relationship between identity and LORA
|
| 294 |
-
if identity_preservation > 1.2:
|
| 295 |
-
original_lora = lora_scale
|
| 296 |
-
lora_scale = min(lora_scale, 1.0)
|
| 297 |
-
if abs(lora_scale - original_lora) > 0.01:
|
| 298 |
-
adjustments.append(f"LORA: {original_lora:.2f}->{lora_scale:.2f} (high identity)")
|
| 299 |
-
|
| 300 |
-
# Rule 2: Strength-based profile activation
|
| 301 |
-
if strength < 0.5:
|
| 302 |
-
# Maximum preservation mode
|
| 303 |
-
if identity_preservation < 1.3:
|
| 304 |
-
original_identity = identity_preservation
|
| 305 |
-
identity_preservation = 1.3
|
| 306 |
-
adjustments.append(f"Identity: {original_identity:.2f}->{identity_preservation:.2f} (max preservation)")
|
| 307 |
-
if lora_scale > 0.9:
|
| 308 |
-
original_lora = lora_scale
|
| 309 |
-
lora_scale = 0.9
|
| 310 |
-
adjustments.append(f"LORA: {original_lora:.2f}->{lora_scale:.2f} (max preservation)")
|
| 311 |
-
if guidance_scale > 1.3:
|
| 312 |
-
original_cfg = guidance_scale
|
| 313 |
-
guidance_scale = 1.3
|
| 314 |
-
adjustments.append(f"CFG: {original_cfg:.2f}->{guidance_scale:.2f} (max preservation)")
|
| 315 |
-
|
| 316 |
-
elif strength > 0.7:
|
| 317 |
-
# Artistic transformation mode
|
| 318 |
-
if identity_preservation > 1.0:
|
| 319 |
-
original_identity = identity_preservation
|
| 320 |
-
identity_preservation = 1.0
|
| 321 |
-
adjustments.append(f"Identity: {original_identity:.2f}->{identity_preservation:.2f} (artistic mode)")
|
| 322 |
-
if lora_scale < 1.2:
|
| 323 |
-
original_lora = lora_scale
|
| 324 |
-
lora_scale = 1.2
|
| 325 |
-
adjustments.append(f"LORA: {original_lora:.2f}->{lora_scale:.2f} (artistic mode)")
|
| 326 |
-
|
| 327 |
-
# Rule 3: CFG-LORA relationship
|
| 328 |
-
if guidance_scale > 1.4 and lora_scale > 1.2:
|
| 329 |
-
original_lora = lora_scale
|
| 330 |
-
lora_scale = 1.1
|
| 331 |
-
adjustments.append(f"LORA: {original_lora:.2f}->{lora_scale:.2f} (high CFG detected)")
|
| 332 |
-
|
| 333 |
-
# Rule 4: LCM sweet spot enforcement
|
| 334 |
-
original_cfg = guidance_scale
|
| 335 |
-
guidance_scale = max(1.0, min(guidance_scale, 1.5))
|
| 336 |
-
if abs(guidance_scale - original_cfg) > 0.01:
|
| 337 |
-
adjustments.append(f"CFG: {original_cfg:.2f}->{guidance_scale:.2f} (LCM optimal)")
|
| 338 |
-
|
| 339 |
-
# Rule 5: ControlNet balance
|
| 340 |
-
total_control = identity_control_scale + depth_control_scale
|
| 341 |
-
if total_control > 1.7:
|
| 342 |
-
scale_factor = 1.7 / total_control
|
| 343 |
-
original_id_ctrl = identity_control_scale
|
| 344 |
-
original_depth_ctrl = depth_control_scale
|
| 345 |
-
identity_control_scale *= scale_factor
|
| 346 |
-
depth_control_scale *= scale_factor
|
| 347 |
-
adjustments.append(f"ControlNets balanced: ID {original_id_ctrl:.2f}->{identity_control_scale:.2f}, Depth {original_depth_ctrl:.2f}->{depth_control_scale:.2f}")
|
| 348 |
-
|
| 349 |
-
# Report adjustments
|
| 350 |
-
if adjustments:
|
| 351 |
-
print(" [OK] Applied adjustments:")
|
| 352 |
-
for adj in adjustments:
|
| 353 |
-
print(f" - {adj}")
|
| 354 |
-
else:
|
| 355 |
-
print(" [OK] Parameters already optimal")
|
| 356 |
-
|
| 357 |
-
return strength, guidance_scale, lora_scale, identity_preservation, identity_control_scale, depth_control_scale
|
| 358 |
-
|
| 359 |
-
def generate_caption(self, image, max_length=None, num_beams=None):
|
| 360 |
-
"""Generate a descriptive caption for the image (supports BLIP-2, GIT, BLIP)."""
|
| 361 |
if not self.caption_enabled or self.caption_model is None:
|
| 362 |
return None
|
| 363 |
|
| 364 |
-
# Set defaults based on model type
|
| 365 |
-
if max_length is None:
|
| 366 |
-
if self.caption_model_type == "blip2":
|
| 367 |
-
max_length = 50 # BLIP-2 can handle longer captions
|
| 368 |
-
elif self.caption_model_type == "git":
|
| 369 |
-
max_length = 40 # GIT also produces good long captions
|
| 370 |
-
else:
|
| 371 |
-
max_length = CAPTION_CONFIG['max_length'] # BLIP base (20)
|
| 372 |
-
|
| 373 |
-
if num_beams is None:
|
| 374 |
-
num_beams = CAPTION_CONFIG['num_beams']
|
| 375 |
-
|
| 376 |
try:
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
min_length=10, # Encourage longer captions
|
| 387 |
-
length_penalty=1.0,
|
| 388 |
-
repetition_penalty=1.5,
|
| 389 |
-
early_stopping=True
|
| 390 |
-
)
|
| 391 |
-
|
| 392 |
-
caption = self.caption_processor.decode(output[0], skip_special_tokens=True)
|
| 393 |
-
|
| 394 |
-
elif self.caption_model_type == "git":
|
| 395 |
-
# GIT specific processing
|
| 396 |
-
inputs = self.caption_processor(images=image, return_tensors="pt").to(self.device, self.dtype)
|
| 397 |
-
|
| 398 |
-
with torch.no_grad():
|
| 399 |
-
output = self.caption_model.generate(
|
| 400 |
-
pixel_values=inputs.pixel_values,
|
| 401 |
-
max_length=max_length,
|
| 402 |
-
num_beams=num_beams,
|
| 403 |
-
min_length=10,
|
| 404 |
-
length_penalty=1.0,
|
| 405 |
-
repetition_penalty=1.5,
|
| 406 |
-
early_stopping=True
|
| 407 |
-
)
|
| 408 |
-
|
| 409 |
-
caption = self.caption_processor.batch_decode(output, skip_special_tokens=True)[0]
|
| 410 |
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 424 |
|
| 425 |
-
return caption
|
| 426 |
-
|
| 427 |
except Exception as e:
|
| 428 |
-
print(f"
|
| 429 |
return None
|
| 430 |
|
| 431 |
def generate_retro_art(
|
| 432 |
self,
|
| 433 |
input_image,
|
| 434 |
-
prompt="
|
| 435 |
-
negative_prompt="
|
| 436 |
num_inference_steps=12,
|
| 437 |
-
guidance_scale=1.
|
| 438 |
-
depth_control_scale=0.
|
| 439 |
identity_control_scale=0.85,
|
| 440 |
lora_scale=1.0,
|
| 441 |
-
identity_preservation=
|
| 442 |
-
strength=0.
|
| 443 |
enable_color_matching=False,
|
| 444 |
consistency_mode=True,
|
| 445 |
seed=-1
|
| 446 |
):
|
| 447 |
-
"""Generate retro art with
|
| 448 |
|
| 449 |
-
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
if not negative_prompt or not negative_prompt.strip():
|
| 455 |
-
negative_prompt = ""
|
| 456 |
-
# **FIX 3 END**
|
| 457 |
-
|
| 458 |
-
# Apply parameter validation
|
| 459 |
-
if consistency_mode:
|
| 460 |
-
print("\n[CONSISTENCY] Validating and adjusting parameters...")
|
| 461 |
-
strength, guidance_scale, lora_scale, identity_preservation, identity_control_scale, depth_control_scale = \
|
| 462 |
-
self.validate_and_adjust_parameters(
|
| 463 |
-
strength, guidance_scale, lora_scale, identity_preservation,
|
| 464 |
-
identity_control_scale, depth_control_scale, consistency_mode
|
| 465 |
-
)
|
| 466 |
-
|
| 467 |
-
# Add trigger word (handles blank prompt fix)
|
| 468 |
-
prompt = self.add_trigger_word(prompt)
|
| 469 |
-
|
| 470 |
-
# Calculate optimal size with flexible aspect ratio support
|
| 471 |
-
original_width, original_height = input_image.size
|
| 472 |
-
target_width, target_height = calculate_optimal_size(original_width, original_height)
|
| 473 |
-
|
| 474 |
-
print(f"Resizing from {original_width}x{original_height} to {target_width}x{target_height}")
|
| 475 |
-
print(f"Prompt: {prompt}")
|
| 476 |
-
print(f"Img2Img Strength: {strength}")
|
| 477 |
-
|
| 478 |
-
# Resize with high quality
|
| 479 |
-
resized_image = input_image.resize((int(target_width), int(target_height)), Image.LANCZOS)
|
| 480 |
-
|
| 481 |
-
# Generate depth map
|
| 482 |
-
print("Generating Zoe depth map...")
|
| 483 |
-
depth_image = self.get_depth_map(resized_image)
|
| 484 |
-
if depth_image.size != (target_width, target_height):
|
| 485 |
-
depth_image = depth_image.resize((int(target_width), int(target_height)), Image.LANCZOS)
|
| 486 |
-
|
| 487 |
-
# Handle face detection
|
| 488 |
-
using_multiple_controlnets = self.using_multiple_controlnets
|
| 489 |
-
face_kps_image = None
|
| 490 |
-
face_embeddings = None
|
| 491 |
-
face_crop_enhanced = None
|
| 492 |
-
has_detected_faces = False
|
| 493 |
-
face_bbox_original = None
|
| 494 |
-
|
| 495 |
-
if using_multiple_controlnets and self.face_app is not None:
|
| 496 |
-
print("Detecting faces and extracting keypoints...")
|
| 497 |
-
img_array = cv2.cvtColor(np.array(resized_image), cv2.COLOR_RGB2BGR)
|
| 498 |
-
faces = self.face_app.get(img_array)
|
| 499 |
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 503 |
|
| 504 |
-
# Get largest face
|
| 505 |
-
face = sorted(faces, key=lambda x:
|
| 506 |
|
| 507 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 508 |
adaptive_params = self.detect_face_quality(face)
|
| 509 |
-
if adaptive_params
|
| 510 |
print(f"[ADAPTIVE] {adaptive_params['reason']}")
|
| 511 |
-
identity_preservation = adaptive_params
|
| 512 |
-
identity_control_scale = adaptive_params
|
| 513 |
-
guidance_scale = adaptive_params
|
| 514 |
-
lora_scale = adaptive_params
|
| 515 |
-
|
| 516 |
-
# Extract face embeddings
|
| 517 |
-
face_embeddings_base = face.normed_embedding
|
| 518 |
-
|
| 519 |
-
# Extract face crop
|
| 520 |
-
bbox = face.bbox.astype(int)
|
| 521 |
-
x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]
|
| 522 |
-
face_bbox_original = [x1, y1, x2, y2]
|
| 523 |
-
|
| 524 |
-
# Add padding
|
| 525 |
-
face_width = x2 - x1
|
| 526 |
-
face_height = y2 - y1
|
| 527 |
-
padding_x = int(face_width * 0.3)
|
| 528 |
-
padding_y = int(face_height * 0.3)
|
| 529 |
-
x1 = max(0, x1 - padding_x)
|
| 530 |
-
y1 = max(0, y1 - padding_y)
|
| 531 |
-
x2 = min(resized_image.width, x2 + padding_x)
|
| 532 |
-
y2 = min(resized_image.height, y2 + padding_y)
|
| 533 |
-
|
| 534 |
-
# Crop face region
|
| 535 |
-
face_crop = resized_image.crop((x1, y1, x2, y2))
|
| 536 |
-
|
| 537 |
-
# MULTI-SCALE PROCESSING
|
| 538 |
-
face_embeddings = self.extract_multi_scale_face(face_crop, face)
|
| 539 |
-
|
| 540 |
-
# Enhance face crop
|
| 541 |
-
face_crop_enhanced = enhance_face_crop(face_crop)
|
| 542 |
-
|
| 543 |
-
# Draw keypoints
|
| 544 |
-
face_kps = face.kps
|
| 545 |
-
face_kps_image = draw_kps(resized_image, face_kps)
|
| 546 |
-
|
| 547 |
-
# ENHANCED: Extract comprehensive facial attributes
|
| 548 |
-
from utils import get_facial_attributes, build_enhanced_prompt
|
| 549 |
-
facial_attrs = get_facial_attributes(face)
|
| 550 |
-
|
| 551 |
-
# Update prompt with detected attributes
|
| 552 |
-
prompt = build_enhanced_prompt(prompt, facial_attrs, TRIGGER_WORD)
|
| 553 |
-
|
| 554 |
-
# Legacy output for compatibility
|
| 555 |
-
age = facial_attrs['age']
|
| 556 |
-
gender_code = facial_attrs['gender']
|
| 557 |
-
det_score = facial_attrs['quality']
|
| 558 |
-
|
| 559 |
-
gender_str = 'M' if gender_code == 1 else ('F' if gender_code == 0 else 'N/A')
|
| 560 |
-
print(f"Face info: bbox={face.bbox}, age={age if age else 'N/A'}, gender={gender_str}")
|
| 561 |
-
print(f"Face crop size: {face_crop.size}, enhanced: {face_crop_enhanced.size if face_crop_enhanced else 'N/A'}")
|
| 562 |
-
|
| 563 |
-
# Set LORA scale
|
| 564 |
-
if hasattr(self.pipe, 'set_adapters') and self.models_loaded['lora']:
|
| 565 |
-
try:
|
| 566 |
-
self.pipe.set_adapters(["retroart"], adapter_weights=[lora_scale])
|
| 567 |
-
print(f"LORA scale: {lora_scale}")
|
| 568 |
-
except Exception as e:
|
| 569 |
-
print(f"Could not set LORA scale: {e}")
|
| 570 |
-
|
| 571 |
-
# Prepare generation kwargs
|
| 572 |
-
pipe_kwargs = {
|
| 573 |
-
"image": resized_image,
|
| 574 |
-
"strength": strength,
|
| 575 |
-
"num_inference_steps": num_inference_steps,
|
| 576 |
-
"guidance_scale": guidance_scale,
|
| 577 |
-
}
|
| 578 |
-
|
| 579 |
-
# Setup generator with seed control
|
| 580 |
-
if seed == -1:
|
| 581 |
-
generator = torch.Generator(device=self.device)
|
| 582 |
-
actual_seed = generator.seed()
|
| 583 |
-
print(f"[SEED] Using random seed: {actual_seed}")
|
| 584 |
-
else:
|
| 585 |
-
generator = torch.Generator(device=self.device).manual_seed(seed)
|
| 586 |
-
actual_seed = seed
|
| 587 |
-
print(f"[SEED] Using fixed seed: {actual_seed}")
|
| 588 |
-
|
| 589 |
-
pipe_kwargs["generator"] = generator
|
| 590 |
-
|
| 591 |
-
# Use Compel for prompt encoding if available
|
| 592 |
-
if self.use_compel and self.compel is not None:
|
| 593 |
-
try:
|
| 594 |
-
print("Encoding prompts with Compel...")
|
| 595 |
-
conditioning = self.compel(prompt)
|
| 596 |
-
negative_conditioning = self.compel(negative_prompt)
|
| 597 |
|
| 598 |
-
|
| 599 |
-
|
| 600 |
-
|
| 601 |
-
pipe_kwargs["negative_pooled_prompt_embeds"] = negative_conditioning[1]
|
| 602 |
|
| 603 |
-
print("[OK] Using Compel-encoded prompts")
|
| 604 |
except Exception as e:
|
| 605 |
-
print(f"
|
| 606 |
-
|
| 607 |
-
pipe_kwargs["negative_prompt"] = negative_prompt
|
| 608 |
-
else:
|
| 609 |
-
pipe_kwargs["prompt"] = prompt
|
| 610 |
-
pipe_kwargs["negative_prompt"] = negative_prompt
|
| 611 |
-
|
| 612 |
-
# Add CLIP skip
|
| 613 |
-
if hasattr(self.pipe, 'text_encoder'):
|
| 614 |
-
pipe_kwargs["clip_skip"] = 2
|
| 615 |
-
|
| 616 |
-
# Configure ControlNet inputs
|
| 617 |
-
if using_multiple_controlnets and has_detected_faces and face_kps_image is not None:
|
| 618 |
-
print("Using InstantID (keypoints) + Depth ControlNets")
|
| 619 |
-
control_images = [face_kps_image, depth_image]
|
| 620 |
-
conditioning_scales = [identity_control_scale, depth_control_scale]
|
| 621 |
|
| 622 |
-
|
| 623 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 624 |
|
| 625 |
-
#
|
| 626 |
-
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
|
| 630 |
-
|
| 631 |
-
|
| 632 |
-
|
| 633 |
-
|
| 634 |
-
|
| 635 |
-
|
| 636 |
-
|
| 637 |
-
|
| 638 |
-
|
| 639 |
-
|
| 640 |
-
|
| 641 |
-
|
| 642 |
-
|
| 643 |
-
|
| 644 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 645 |
|
| 646 |
-
|
| 647 |
-
|
| 648 |
-
|
|
|
|
| 649 |
|
| 650 |
-
|
| 651 |
-
|
| 652 |
-
|
| 653 |
-
|
| 654 |
-
|
| 655 |
-
|
| 656 |
-
|
| 657 |
-
|
| 658 |
-
face_proj_embeds = torch.cat([
|
| 659 |
-
torch.zeros_like(face_proj_embeds), # Negative
|
| 660 |
-
face_proj_embeds # Positive
|
| 661 |
-
], dim=0)
|
| 662 |
-
|
| 663 |
-
# Concatenate: [batch, text_tokens, 2048] + [batch, 16, 2048]
|
| 664 |
-
combined_embeds = torch.cat([original_embeds, face_proj_embeds], dim=1)
|
| 665 |
-
pipe_kwargs['prompt_embeds'] = combined_embeds
|
| 666 |
-
|
| 667 |
-
print(f" - Text embeds: {original_embeds.shape}")
|
| 668 |
-
print(f" - Combined embeds: {combined_embeds.shape}")
|
| 669 |
-
print(f" [OK] Face embeddings concatenated successfully!")
|
| 670 |
-
|
| 671 |
-
else:
|
| 672 |
-
print(f" [WARNING] Can't concatenate - no prompt_embeds (use Compel)")
|
| 673 |
|
| 674 |
-
|
| 675 |
-
|
| 676 |
-
|
| 677 |
-
# No need for dummy embeddings with concatenation approach
|
| 678 |
-
|
| 679 |
-
elif using_multiple_controlnets and not has_detected_faces:
|
| 680 |
-
print("Multiple ControlNets available but no faces detected, using depth only")
|
| 681 |
-
control_images = [depth_image, depth_image]
|
| 682 |
-
conditioning_scales = [0.0, depth_control_scale]
|
| 683 |
|
| 684 |
-
|
| 685 |
-
|
| 686 |
-
|
| 687 |
-
|
| 688 |
-
|
| 689 |
-
|
| 690 |
-
|
| 691 |
-
|
| 692 |
-
|
| 693 |
-
|
| 694 |
-
|
| 695 |
-
|
| 696 |
-
|
| 697 |
-
|
| 698 |
-
|
| 699 |
-
|
| 700 |
-
|
| 701 |
-
|
| 702 |
-
|
| 703 |
-
|
| 704 |
-
|
| 705 |
-
|
| 706 |
-
|
| 707 |
-
|
| 708 |
-
|
| 709 |
-
|
| 710 |
-
|
| 711 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 712 |
generated_image = color_match(generated_image, resized_image, mode='mkl')
|
| 713 |
print("[OK] Standard color matching applied")
|
| 714 |
-
|
| 715 |
-
|
| 716 |
-
|
| 717 |
-
|
| 718 |
-
try:
|
| 719 |
-
generated_image = color_match(generated_image, resized_image, mode='mkl')
|
| 720 |
-
print("[OK] Standard color matching applied")
|
| 721 |
-
except Exception as e:
|
| 722 |
-
print(f"Color matching failed: {e}")
|
| 723 |
|
| 724 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 725 |
|
| 726 |
|
| 727 |
-
print("[OK] Generator class ready")
|
|
|
|
| 1 |
"""
|
| 2 |
+
Generation logic for Pixagram AI Pixel Art Generator
|
| 3 |
+
CORRECTED VERSION - Following examplewithface.py pattern
|
| 4 |
"""
|
| 5 |
import torch
|
| 6 |
import numpy as np
|
| 7 |
import cv2
|
| 8 |
from PIL import Image
|
| 9 |
+
import gc
|
|
|
|
| 10 |
|
| 11 |
from config import (
|
| 12 |
+
device, dtype, TRIGGER_WORD,
|
| 13 |
+
ADAPTIVE_THRESHOLDS, ADAPTIVE_PARAMS, CAPTION_CONFIG
|
| 14 |
)
|
| 15 |
from utils import (
|
| 16 |
+
sanitize_text, enhanced_color_match, color_match,
|
| 17 |
+
get_demographic_description, calculate_optimal_size, safe_image_size
|
| 18 |
)
|
| 19 |
from models import (
|
| 20 |
+
load_face_analysis, load_depth_detector, load_controlnets,
|
| 21 |
+
load_sdxl_pipeline, load_lora, setup_compel,
|
| 22 |
setup_scheduler, optimize_pipeline, load_caption_model, set_clip_skip
|
| 23 |
)
|
| 24 |
+
from pipeline_stable_diffusion_xl_instantid_img2img import draw_kps
|
| 25 |
+
from memory_utils import MemoryManager, ModelOffloader
|
| 26 |
|
| 27 |
|
| 28 |
class RetroArtConverter:
|
| 29 |
+
"""Main class for retro art generation with InstantID"""
|
| 30 |
|
| 31 |
def __init__(self):
|
| 32 |
self.device = device
|
|
|
|
| 35 |
'custom_checkpoint': False,
|
| 36 |
'lora': False,
|
| 37 |
'instantid': False,
|
| 38 |
+
'zoe_depth': False
|
|
|
|
| 39 |
}
|
| 40 |
|
| 41 |
+
# Initialize memory manager
|
| 42 |
+
self.memory_manager = MemoryManager(device=device, dtype=dtype, verbose=True)
|
| 43 |
|
| 44 |
+
# Load face analysis (like examplewithface.py line 113)
|
| 45 |
+
self.face_app, face_detection_success = load_face_analysis()
|
| 46 |
+
if not face_detection_success or self.face_app is None:
|
| 47 |
+
raise RuntimeError("[ERROR] Face detection is required! Check InsightFace installation.")
|
| 48 |
+
|
| 49 |
+
# Load depth detector (starts on CPU) - single assignment, no alias
|
| 50 |
self.zoe_depth, zoe_success = load_depth_detector()
|
| 51 |
self.models_loaded['zoe_depth'] = zoe_success
|
| 52 |
|
| 53 |
+
# Load ControlNets AS LIST
|
| 54 |
+
controlnet_instantid, controlnet_depth = load_controlnets()
|
| 55 |
+
controlnets = [controlnet_instantid, controlnet_depth]
|
| 56 |
+
self.models_loaded['instantid'] = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
|
| 58 |
+
print("Initializing InstantID pipeline with Face + Depth ControlNets")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
+
# Load SDXL pipeline with InstantID (handles IP-Adapter internally)
|
| 61 |
self.pipe, checkpoint_success = load_sdxl_pipeline(controlnets)
|
| 62 |
self.models_loaded['custom_checkpoint'] = checkpoint_success
|
| 63 |
|
|
|
|
| 65 |
lora_success = load_lora(self.pipe)
|
| 66 |
self.models_loaded['lora'] = lora_success
|
| 67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
# Setup Compel
|
| 69 |
self.compel, self.use_compel = setup_compel(self.pipe)
|
| 70 |
|
| 71 |
+
# Setup scheduler
|
| 72 |
setup_scheduler(self.pipe)
|
| 73 |
|
| 74 |
+
# Optimize
|
| 75 |
optimize_pipeline(self.pipe)
|
| 76 |
|
| 77 |
+
# Load caption model (starts on CPU)
|
| 78 |
self.caption_processor, self.caption_model, self.caption_enabled, self.caption_model_type = load_caption_model()
|
| 79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
# Set CLIP skip
|
| 81 |
set_clip_skip(self.pipe)
|
| 82 |
|
| 83 |
+
# Print status
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
self._print_status()
|
| 85 |
|
| 86 |
+
# Initial memory cleanup
|
| 87 |
+
self.memory_manager.cleanup_memory(aggressive=True)
|
| 88 |
+
|
| 89 |
+
print(" [OK] RetroArtConverter initialized with optimized memory management!")
|
| 90 |
|
| 91 |
def _print_status(self):
|
| 92 |
"""Print model loading status"""
|
|
|
|
| 94 |
for model, loaded in self.models_loaded.items():
|
| 95 |
status = "[OK] LOADED" if loaded else "[FALLBACK/DISABLED]"
|
| 96 |
print(f"{model}: {status}")
|
| 97 |
+
print("InstantID Pipeline: [OK] ACTIVE")
|
| 98 |
+
print("IP-Adapter: [OK] Built into pipeline")
|
| 99 |
+
print(f"Face Detection: [OK] {'READY' if self.face_app else 'UNAVAILABLE'}")
|
| 100 |
print("===================\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
|
| 102 |
def get_depth_map(self, image):
|
| 103 |
+
"""Generate depth map using Zoe Depth with optimized GPU usage"""
|
| 104 |
+
if self.zoe_depth is not None:
|
| 105 |
+
try:
|
| 106 |
+
if image.mode != 'RGB':
|
| 107 |
+
image = image.convert('RGB')
|
| 108 |
+
|
| 109 |
+
# Use safe size helper to avoid numpy.int64 issues
|
| 110 |
+
orig_width, orig_height = safe_image_size(image)
|
| 111 |
+
|
| 112 |
+
# Use multiples of 64
|
| 113 |
+
target_width = int((orig_width // 64) * 64)
|
| 114 |
+
target_height = int((orig_height // 64) * 64)
|
| 115 |
+
|
| 116 |
+
target_width = int(max(64, target_width))
|
| 117 |
+
target_height = int(max(64, target_height))
|
| 118 |
+
|
| 119 |
+
size_for_depth = (int(target_width), int(target_height))
|
| 120 |
+
image_for_depth = image.resize(size_for_depth, Image.LANCZOS)
|
| 121 |
+
|
| 122 |
+
# Move depth model to GPU temporarily
|
| 123 |
try:
|
| 124 |
+
if torch.cuda.is_available():
|
| 125 |
+
self.zoe_depth = self.zoe_depth.to(self.device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
|
| 127 |
+
# Generate depth map
|
| 128 |
+
depth_output = self.zoe_depth(image_for_depth, detect_resolution=512, image_resolution=1024)
|
| 129 |
|
| 130 |
+
# Handle different output types
|
| 131 |
+
if isinstance(depth_output, Image.Image):
|
| 132 |
+
depth_image = depth_output
|
| 133 |
+
elif isinstance(depth_output, np.ndarray):
|
| 134 |
+
depth_image = Image.fromarray(depth_output.astype(np.uint8))
|
| 135 |
+
elif isinstance(depth_output, torch.Tensor):
|
| 136 |
+
depth_array = depth_output.cpu().numpy()
|
| 137 |
+
if depth_array.ndim == 3 and depth_array.shape[0] == 3:
|
| 138 |
+
depth_array = depth_array.transpose(1, 2, 0)
|
| 139 |
+
depth_image = Image.fromarray((depth_array * 255).astype(np.uint8))
|
| 140 |
+
else:
|
| 141 |
+
print(f"[DEPTH] Unexpected output type: {type(depth_output)}")
|
| 142 |
+
depth_image = image_for_depth.convert('L').convert('RGB')
|
| 143 |
|
| 144 |
+
# Move back to CPU to free GPU memory
|
| 145 |
+
if torch.cuda.is_available():
|
| 146 |
+
self.zoe_depth = self.zoe_depth.to("cpu")
|
| 147 |
+
torch.cuda.empty_cache()
|
| 148 |
|
| 149 |
+
except Exception as inner_e:
|
| 150 |
+
print(f"[DEPTH] GPU processing failed: {inner_e}, trying on CPU")
|
| 151 |
+
self.zoe_depth = self.zoe_depth.to("cpu")
|
|
|
|
|
|
|
| 152 |
|
| 153 |
+
depth_output = self.zoe_depth(image_for_depth, detect_resolution=512, image_resolution=1024)
|
|
|
|
| 154 |
|
| 155 |
+
if isinstance(depth_output, Image.Image):
|
| 156 |
+
depth_image = depth_output
|
| 157 |
+
elif isinstance(depth_output, np.ndarray):
|
| 158 |
+
depth_image = Image.fromarray(depth_output.astype(np.uint8))
|
| 159 |
+
else:
|
| 160 |
+
depth_image = image_for_depth.convert('L').convert('RGB')
|
| 161 |
+
|
| 162 |
+
# Ensure depth image is RGB
|
| 163 |
+
if depth_image.mode != 'RGB':
|
| 164 |
+
depth_image = depth_image.convert('RGB')
|
| 165 |
+
|
| 166 |
+
if depth_image.size != image.size:
|
| 167 |
+
depth_image = depth_image.resize(image.size, Image.LANCZOS)
|
| 168 |
+
|
| 169 |
+
print(f"[DEPTH] Generated depth map: {depth_image.size}")
|
| 170 |
+
return depth_image
|
| 171 |
+
|
| 172 |
+
except Exception as e:
|
| 173 |
+
print(f"[DEPTH] Generation failed: {e}, using grayscale fallback")
|
| 174 |
+
fallback = image.convert('L').convert('RGB')
|
| 175 |
+
return fallback
|
| 176 |
+
else:
|
| 177 |
+
print("[DEPTH] Detector not available, using grayscale")
|
| 178 |
+
fallback = image.convert('L').convert('RGB')
|
| 179 |
+
return fallback
|
| 180 |
|
| 181 |
def add_trigger_word(self, prompt):
|
| 182 |
"""Add trigger word to prompt if not present"""
|
| 183 |
if TRIGGER_WORD.lower() not in prompt.lower():
|
|
|
|
| 184 |
if not prompt or not prompt.strip():
|
| 185 |
return TRIGGER_WORD
|
|
|
|
| 186 |
return f"{TRIGGER_WORD}, {prompt}"
|
| 187 |
return prompt
|
| 188 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 189 |
def detect_face_quality(self, face):
|
| 190 |
+
"""Detect face quality and adaptively adjust parameters"""
|
|
|
|
|
|
|
|
|
|
| 191 |
try:
|
| 192 |
bbox = face.bbox
|
| 193 |
face_size = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
|
| 194 |
det_score = float(face.det_score) if hasattr(face, 'det_score') else 1.0
|
| 195 |
|
| 196 |
+
# Small face -> boost preservation
|
| 197 |
if face_size < ADAPTIVE_THRESHOLDS['small_face_size']:
|
| 198 |
return ADAPTIVE_PARAMS['small_face'].copy()
|
| 199 |
|
|
|
|
| 201 |
elif det_score < ADAPTIVE_THRESHOLDS['low_confidence']:
|
| 202 |
return ADAPTIVE_PARAMS['low_confidence'].copy()
|
| 203 |
|
| 204 |
+
# Check for profile view
|
| 205 |
elif hasattr(face, 'pose') and len(face.pose) > 1:
|
| 206 |
try:
|
| 207 |
yaw = float(face.pose[1])
|
|
|
|
| 210 |
except (ValueError, TypeError, IndexError):
|
| 211 |
pass
|
| 212 |
|
|
|
|
| 213 |
return None
|
| 214 |
|
| 215 |
except Exception as e:
|
| 216 |
print(f"[ADAPTIVE] Quality detection failed: {e}")
|
| 217 |
return None
|
| 218 |
|
| 219 |
+
def generate_caption(self, image):
|
| 220 |
+
"""Generate caption for image with optimized GPU usage"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
if not self.caption_enabled or self.caption_model is None:
|
| 222 |
return None
|
| 223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
try:
|
| 225 |
+
# Move caption model to GPU temporarily
|
| 226 |
+
original_device = "cpu"
|
| 227 |
+
if hasattr(self.caption_model, 'device'):
|
| 228 |
+
original_device = str(self.caption_model.device)
|
| 229 |
+
|
| 230 |
+
try:
|
| 231 |
+
# Move to GPU for processing
|
| 232 |
+
if torch.cuda.is_available():
|
| 233 |
+
self.caption_model = self.caption_model.to(self.device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 234 |
|
| 235 |
+
if self.caption_model_type == 'git':
|
| 236 |
+
inputs = self.caption_processor(images=image, return_tensors="pt").to(self.device)
|
| 237 |
+
generated_ids = self.caption_model.generate(**inputs, max_length=CAPTION_CONFIG['max_length'])
|
| 238 |
+
caption = self.caption_processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 239 |
+
elif self.caption_model_type == 'blip':
|
| 240 |
+
inputs = self.caption_processor(image, return_tensors="pt").to(self.device)
|
| 241 |
+
generated_ids = self.caption_model.generate(**inputs, max_length=CAPTION_CONFIG['max_length'])
|
| 242 |
+
caption = self.caption_processor.decode(generated_ids[0], skip_special_tokens=True)
|
| 243 |
+
else:
|
| 244 |
+
return None
|
| 245 |
+
|
| 246 |
+
# Move back to CPU to free GPU memory
|
| 247 |
+
if torch.cuda.is_available() and "cpu" in original_device:
|
| 248 |
+
self.caption_model = self.caption_model.to("cpu")
|
| 249 |
+
torch.cuda.empty_cache()
|
| 250 |
+
|
| 251 |
+
except Exception as gpu_error:
|
| 252 |
+
print(f"[CAPTION] GPU processing failed: {gpu_error}, trying on CPU")
|
| 253 |
+
self.caption_model = self.caption_model.to("cpu")
|
| 254 |
+
|
| 255 |
+
if self.caption_model_type == 'git':
|
| 256 |
+
inputs = self.caption_processor(images=image, return_tensors="pt")
|
| 257 |
+
generated_ids = self.caption_model.generate(**inputs, max_length=CAPTION_CONFIG['max_length'])
|
| 258 |
+
caption = self.caption_processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 259 |
+
elif self.caption_model_type == 'blip':
|
| 260 |
+
inputs = self.caption_processor(image, return_tensors="pt")
|
| 261 |
+
generated_ids = self.caption_model.generate(**inputs, max_length=CAPTION_CONFIG['max_length'])
|
| 262 |
+
caption = self.caption_processor.decode(generated_ids[0], skip_special_tokens=True)
|
| 263 |
+
else:
|
| 264 |
+
return None
|
| 265 |
|
| 266 |
+
return sanitize_text(caption)
|
|
|
|
| 267 |
except Exception as e:
|
| 268 |
+
print(f"[CAPTION] Generation failed: {e}")
|
| 269 |
return None
|
| 270 |
|
| 271 |
def generate_retro_art(
|
| 272 |
self,
|
| 273 |
input_image,
|
| 274 |
+
prompt=" ",
|
| 275 |
+
negative_prompt=" ",
|
| 276 |
num_inference_steps=12,
|
| 277 |
+
guidance_scale=1.3,
|
| 278 |
+
depth_control_scale=0.75,
|
| 279 |
identity_control_scale=0.85,
|
| 280 |
lora_scale=1.0,
|
| 281 |
+
identity_preservation=1.2,
|
| 282 |
+
strength=0.50,
|
| 283 |
enable_color_matching=False,
|
| 284 |
consistency_mode=True,
|
| 285 |
seed=-1
|
| 286 |
):
|
| 287 |
+
"""Generate retro art with InstantID face preservation"""
|
| 288 |
|
| 289 |
+
try:
|
| 290 |
+
# Add trigger word
|
| 291 |
+
prompt = self.add_trigger_word(prompt)
|
| 292 |
+
prompt = sanitize_text(prompt)
|
| 293 |
+
negative_prompt = sanitize_text(negative_prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 294 |
|
| 295 |
+
print(f"[PROMPT] {prompt}")
|
| 296 |
+
|
| 297 |
+
# Calculate optimal size
|
| 298 |
+
orig_width, orig_height = safe_image_size(input_image)
|
| 299 |
+
optimal_width, optimal_height = calculate_optimal_size(orig_width, orig_height)
|
| 300 |
+
|
| 301 |
+
# Resize image
|
| 302 |
+
resized_image = input_image.resize((optimal_width, optimal_height), Image.LANCZOS)
|
| 303 |
+
print(f"[SIZE] Resized to {optimal_width}x{optimal_height}")
|
| 304 |
+
|
| 305 |
+
# Generate depth map
|
| 306 |
+
depth_image = self.get_depth_map(resized_image)
|
| 307 |
+
|
| 308 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 309 |
+
# FACE DETECTION
|
| 310 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 311 |
+
has_detected_faces = False
|
| 312 |
+
face_kps_image = None
|
| 313 |
+
face_embeddings = None
|
| 314 |
+
face_bbox_original = None
|
| 315 |
+
|
| 316 |
+
# FACE DETECTION (examplewithface.py line 321-327)
|
| 317 |
+
try:
|
| 318 |
+
image_array = cv2.cvtColor(np.array(resized_image), cv2.COLOR_RGB2BGR)
|
| 319 |
+
faces = self.face_app.get(image_array)
|
| 320 |
+
|
| 321 |
+
if len(faces) == 0:
|
| 322 |
+
raise ValueError("No faces detected in image")
|
| 323 |
|
| 324 |
+
# Get largest face (examplewithface.py line 322)
|
| 325 |
+
face = sorted(faces, key=lambda x:(x['bbox'][2]-x['bbox'][0])*(x['bbox'][3]-x['bbox'][1]))[-1]
|
| 326 |
|
| 327 |
+
# Get embeddings and keypoints
|
| 328 |
+
face_embeddings = face['embedding']
|
| 329 |
+
face_kps_image = draw_kps(resized_image, face['kps'])
|
| 330 |
+
face_bbox_original = face.get('bbox', None)
|
| 331 |
+
|
| 332 |
+
# Adaptive parameter adjustment
|
| 333 |
adaptive_params = self.detect_face_quality(face)
|
| 334 |
+
if adaptive_params:
|
| 335 |
print(f"[ADAPTIVE] {adaptive_params['reason']}")
|
| 336 |
+
identity_preservation = adaptive_params.get('identity_preservation', identity_preservation)
|
| 337 |
+
identity_control_scale = adaptive_params.get('identity_control_scale', identity_control_scale)
|
| 338 |
+
guidance_scale = adaptive_params.get('guidance_scale', guidance_scale)
|
| 339 |
+
lora_scale = adaptive_params.get('lora_scale', lora_scale)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 340 |
|
| 341 |
+
print(f"[FACE] Detected face with {face.get('det_score', 1.0):.2f} confidence")
|
| 342 |
+
print(f"[FACE] Embeddings shape: {face_embeddings.shape}")
|
| 343 |
+
has_detected_faces = True
|
|
|
|
| 344 |
|
|
|
|
| 345 |
except Exception as e:
|
| 346 |
+
print(f"[FACE] Face detection failed: {str(e)[:100]}")
|
| 347 |
+
raise ValueError(f"No face found in image. Only face images work. Error: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 348 |
|
| 349 |
+
# Fuse LORA with scale (following working example approach)
|
| 350 |
+
if self.models_loaded['lora']:
|
| 351 |
+
try:
|
| 352 |
+
from models import fuse_lora_with_scale
|
| 353 |
+
fuse_lora_with_scale(self.pipe, lora_scale)
|
| 354 |
+
print(f"[LORA] Fused with scale: {lora_scale}")
|
| 355 |
+
except Exception as e:
|
| 356 |
+
print(f"[LORA] Could not fuse: {e}")
|
| 357 |
|
| 358 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 359 |
+
# PIPELINE CONFIGURATION
|
| 360 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 361 |
+
pipe_kwargs = {
|
| 362 |
+
"image": resized_image,
|
| 363 |
+
"strength": strength,
|
| 364 |
+
"num_inference_steps": num_inference_steps,
|
| 365 |
+
"guidance_scale": guidance_scale,
|
| 366 |
+
"cross_attention_kwargs": {"scale": lora_scale},
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
# Setup generator with seed
|
| 370 |
+
if seed == -1:
|
| 371 |
+
generator = torch.Generator(device=self.device)
|
| 372 |
+
actual_seed = generator.seed()
|
| 373 |
+
print(f"[SEED] Random: {actual_seed}")
|
| 374 |
+
else:
|
| 375 |
+
generator = torch.Generator(device=self.device).manual_seed(seed)
|
| 376 |
+
actual_seed = seed
|
| 377 |
+
print(f"[SEED] Fixed: {actual_seed}")
|
| 378 |
+
|
| 379 |
+
pipe_kwargs["generator"] = generator
|
| 380 |
+
|
| 381 |
+
# Use Compel for prompt encoding
|
| 382 |
+
if self.use_compel and self.compel is not None:
|
| 383 |
+
try:
|
| 384 |
+
conditioning = self.compel(prompt)
|
| 385 |
+
negative_conditioning = self.compel(negative_prompt)
|
| 386 |
|
| 387 |
+
pipe_kwargs["prompt_embeds"] = conditioning[0]
|
| 388 |
+
pipe_kwargs["pooled_prompt_embeds"] = conditioning[1]
|
| 389 |
+
pipe_kwargs["negative_prompt_embeds"] = negative_conditioning[0]
|
| 390 |
+
pipe_kwargs["negative_pooled_prompt_embeds"] = negative_conditioning[1]
|
| 391 |
|
| 392 |
+
print("[OK] Using Compel-encoded prompts")
|
| 393 |
+
except Exception as e:
|
| 394 |
+
print(f"[COMPEL] Failed, using standard prompts: {e}")
|
| 395 |
+
pipe_kwargs["prompt"] = prompt
|
| 396 |
+
pipe_kwargs["negative_prompt"] = negative_prompt
|
| 397 |
+
else:
|
| 398 |
+
pipe_kwargs["prompt"] = prompt
|
| 399 |
+
pipe_kwargs["negative_prompt"] = negative_prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 400 |
|
| 401 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 402 |
+
# CONTROLNET + IP-ADAPTER CONFIGURATION
|
| 403 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 404 |
|
| 405 |
+
if has_detected_faces and face_kps_image is not None and face_embeddings is not None:
|
| 406 |
+
print("â•" * 60)
|
| 407 |
+
print("MODE: InstantID (Face Keypoints + Depth + IP-Adapter)")
|
| 408 |
+
print("â•" * 60)
|
| 409 |
+
|
| 410 |
+
# Set IP-Adapter scale
|
| 411 |
+
self.pipe.set_ip_adapter_scale(identity_preservation)
|
| 412 |
+
print(f" [IP-ADAPTER] Scale set to: {identity_preservation}")
|
| 413 |
+
|
| 414 |
+
# Control images: [face keypoints, depth map]
|
| 415 |
+
pipe_kwargs["control_image"] = [face_kps_image, depth_image]
|
| 416 |
+
|
| 417 |
+
# ControlNet scales: [identity keypoints, depth]
|
| 418 |
+
pipe_kwargs["controlnet_conditioning_scale"] = [
|
| 419 |
+
identity_control_scale,
|
| 420 |
+
depth_control_scale
|
| 421 |
+
]
|
| 422 |
+
|
| 423 |
+
# Control guidance timing
|
| 424 |
+
pipe_kwargs["control_guidance_start"] = [0.0, 0.0]
|
| 425 |
+
pipe_kwargs["control_guidance_end"] = [1.0, 1.0]
|
| 426 |
+
|
| 427 |
+
# Pass raw face embeddings - pipeline handles everything
|
| 428 |
+
pipe_kwargs["image_embeds"] = face_embeddings
|
| 429 |
+
|
| 430 |
+
print(f" [CONTROLNET] Identity scale: {identity_control_scale}")
|
| 431 |
+
print(f" [CONTROLNET] Depth scale: {depth_control_scale}")
|
| 432 |
+
print(f" [EMBEDDINGS] Shape: {face_embeddings.shape} (raw)")
|
| 433 |
+
print(" [INFO] Pipeline will handle: Resampler → Concatenation → Attention")
|
| 434 |
+
print("â•" * 60)
|
| 435 |
+
|
| 436 |
+
elif has_detected_faces and face_kps_image is not None:
|
| 437 |
+
print("â•" * 60)
|
| 438 |
+
print("MODE: InstantID Keypoints Only (no embeddings)")
|
| 439 |
+
print("â•" * 60)
|
| 440 |
+
|
| 441 |
+
# Disable IP-Adapter
|
| 442 |
+
self.pipe.set_ip_adapter_scale(0.0)
|
| 443 |
+
print(" [IP-ADAPTER] Disabled (no embeddings)")
|
| 444 |
+
|
| 445 |
+
# Use keypoints + depth
|
| 446 |
+
pipe_kwargs["control_image"] = [face_kps_image, depth_image]
|
| 447 |
+
pipe_kwargs["controlnet_conditioning_scale"] = [
|
| 448 |
+
identity_control_scale,
|
| 449 |
+
depth_control_scale
|
| 450 |
+
]
|
| 451 |
+
pipe_kwargs["control_guidance_start"] = [0.0, 0.0]
|
| 452 |
+
pipe_kwargs["control_guidance_end"] = [1.0, 1.0]
|
| 453 |
+
|
| 454 |
+
# Pass zero embeddings
|
| 455 |
+
zero_embeddings = np.zeros(512, dtype=np.float32)
|
| 456 |
+
pipe_kwargs["image_embeds"] = zero_embeddings
|
| 457 |
+
|
| 458 |
+
print(" [INFO] Using keypoints for structure only (zero embeddings)")
|
| 459 |
+
print("â•" * 60)
|
| 460 |
+
|
| 461 |
+
else:
|
| 462 |
+
print("â•" * 60)
|
| 463 |
+
print("MODE: Depth Only (no face detection)")
|
| 464 |
+
print("â•" * 60)
|
| 465 |
+
|
| 466 |
+
# Disable IP-Adapter
|
| 467 |
+
self.pipe.set_ip_adapter_scale(0.0)
|
| 468 |
+
print(" [IP-ADAPTER] Disabled (no face)")
|
| 469 |
+
|
| 470 |
+
# Use depth only
|
| 471 |
+
pipe_kwargs["control_image"] = [depth_image, depth_image]
|
| 472 |
+
pipe_kwargs["controlnet_conditioning_scale"] = [0.0, depth_control_scale]
|
| 473 |
+
pipe_kwargs["control_guidance_start"] = [0.0, 0.0]
|
| 474 |
+
pipe_kwargs["control_guidance_end"] = [1.0, 1.0]
|
| 475 |
+
|
| 476 |
+
# Pass zero embeddings
|
| 477 |
+
zero_embeddings = np.zeros(512, dtype=np.float32)
|
| 478 |
+
pipe_kwargs["image_embeds"] = zero_embeddings
|
| 479 |
+
|
| 480 |
+
print(f" [CONTROLNET] Depth scale: {depth_control_scale}")
|
| 481 |
+
print(" [INFO] Generating without face preservation (zero embeddings)")
|
| 482 |
+
print("â•" * 60)
|
| 483 |
+
|
| 484 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 485 |
+
# GENERATION
|
| 486 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 487 |
+
print(f"\nGenerating: Steps={num_inference_steps}, CFG={guidance_scale}, Strength={strength}")
|
| 488 |
+
|
| 489 |
+
result = self.pipe(**pipe_kwargs)
|
| 490 |
+
|
| 491 |
+
generated_image = result.images[0]
|
| 492 |
+
|
| 493 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 494 |
+
# POST-PROCESSING
|
| 495 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 496 |
+
if enable_color_matching and has_detected_faces:
|
| 497 |
+
print("Applying enhanced face-aware color matching...")
|
| 498 |
+
try:
|
| 499 |
+
if face_bbox_original is not None:
|
| 500 |
+
generated_image = enhanced_color_match(
|
| 501 |
+
generated_image,
|
| 502 |
+
resized_image,
|
| 503 |
+
face_bbox=face_bbox_original
|
| 504 |
+
)
|
| 505 |
+
print("[OK] Enhanced color matching applied")
|
| 506 |
+
else:
|
| 507 |
+
generated_image = color_match(generated_image, resized_image, mode='mkl')
|
| 508 |
+
print("[OK] Standard color matching applied")
|
| 509 |
+
except Exception as e:
|
| 510 |
+
print(f"[COLOR] Matching failed: {e}")
|
| 511 |
+
elif enable_color_matching:
|
| 512 |
+
print("Applying standard color matching...")
|
| 513 |
+
try:
|
| 514 |
generated_image = color_match(generated_image, resized_image, mode='mkl')
|
| 515 |
print("[OK] Standard color matching applied")
|
| 516 |
+
except Exception as e:
|
| 517 |
+
print(f"[COLOR] Matching failed: {e}")
|
| 518 |
+
|
| 519 |
+
return generated_image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 520 |
|
| 521 |
+
finally:
|
| 522 |
+
# Memory cleanup
|
| 523 |
+
self.memory_manager.cleanup_memory(aggressive=True)
|
| 524 |
+
|
| 525 |
+
# Final memory status
|
| 526 |
+
if self.memory_manager.verbose:
|
| 527 |
+
print("[MEMORY] Final status after generation:")
|
| 528 |
+
self.memory_manager.print_memory_status()
|
| 529 |
|
| 530 |
|
| 531 |
+
print("[OK] Generator class ready with cleaned code")
|