Spaces:
Runtime error
Runtime error
File size: 18,450 Bytes
f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 a70cb97 fe30f16 69e6233 a70cb97 69e6233 fe30f16 a70cb97 fe30f16 69e6233 fe30f16 f179fb3 fe30f16 a70cb97 69e6233 a70cb97 f15847d a70cb97 69e6233 a70cb97 69e6233 f15847d a70cb97 f15847d a70cb97 f15847d a70cb97 69e6233 a70cb97 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 f179fb3 fe30f16 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 |
"""
Utility functions for Pixagram AI Pixel Art Generator
"""
import numpy as np
import cv2
import math
from PIL import Image, ImageEnhance, ImageFilter, ImageDraw
from config import COLOR_MATCH_CONFIG, FACE_MASK_CONFIG, AGE_BRACKETS
def sanitize_text(text):
"""
Remove or replace problematic characters (emojis, special unicode)
that might cause encoding errors.
"""
if not text:
return text
try:
# Encode/decode to remove invalid bytes
text = text.encode('utf-8', errors='ignore').decode('utf-8')
# Keep only characters within safe unicode range
text = ''.join(char for char in text if ord(char) < 65536)
except Exception as e:
print(f"[WARNING] Text sanitization warning: {e}")
return text
def color_match_lab(target, source, preserve_saturation=True):
"""
LAB color space matching for better skin tones with saturation preservation.
GENTLE version to prevent color fading.
Args:
target: Target image to adjust
source: Source image to match colors from
preserve_saturation: If True, preserves original saturation levels
"""
try:
target_lab = cv2.cvtColor(target.astype(np.uint8), cv2.COLOR_RGB2LAB).astype(np.float32)
source_lab = cv2.cvtColor(source.astype(np.uint8), cv2.COLOR_RGB2LAB).astype(np.float32)
result_lab = np.copy(target_lab)
# Very gentle L channel matching
t_mean, t_std = target_lab[:,:,0].mean(), target_lab[:,:,0].std()
s_mean, s_std = source_lab[:,:,0].mean(), source_lab[:,:,0].std()
if t_std > 1e-6:
matched = (target_lab[:,:,0] - t_mean) * (s_std / t_std) * 0.5 + s_mean
result_lab[:,:,0] = target_lab[:,:,0] * (1 - COLOR_MATCH_CONFIG['lab_lightness_blend']) + matched * COLOR_MATCH_CONFIG['lab_lightness_blend']
if preserve_saturation:
# Minimal adjustment to A and B channels
for i in [1, 2]:
t_mean, t_std = target_lab[:,:,i].mean(), target_lab[:,:,i].std()
s_mean, s_std = source_lab[:,:,i].mean(), source_lab[:,:,i].std()
if t_std > 1e-6:
matched = (target_lab[:,:,i] - t_mean) * (s_std / t_std) + s_mean
blend_factor = COLOR_MATCH_CONFIG['lab_color_blend_preserved']
result_lab[:,:,i] = target_lab[:,:,i] * (1 - blend_factor) + matched * blend_factor
else:
# Gentle full matching
for i in [1, 2]:
t_mean, t_std = target_lab[:,:,i].mean(), target_lab[:,:,i].std()
s_mean, s_std = source_lab[:,:,i].mean(), source_lab[:,:,i].std()
if t_std > 1e-6:
matched = (target_lab[:,:,i] - t_mean) * (s_std / t_std) + s_mean
blend_factor = COLOR_MATCH_CONFIG['lab_color_blend_full']
result_lab[:,:,i] = target_lab[:,:,i] * (1 - blend_factor) + matched * blend_factor
return cv2.cvtColor(result_lab.astype(np.uint8), cv2.COLOR_LAB2RGB)
except Exception as e:
print(f"LAB conversion error: {e}")
return target.astype(np.uint8)
def enhance_saturation(image, boost=1.05):
"""
Minimal saturation enhancement (disabled by default).
Args:
image: PIL Image
boost: Saturation multiplier (1.0 = no change, >1.0 = more saturated)
"""
if boost <= 1.0:
return image
enhancer = ImageEnhance.Color(image)
return enhancer.enhance(boost)
def enhanced_color_match(target_img, source_img, face_bbox=None, preserve_vibrance=False):
"""
Enhanced color matching with face-aware processing.
Very gentle to prevent color fading.
Args:
target_img: Generated image to adjust
source_img: Original image to match colors from
face_bbox: Optional [x1, y1, x2, y2] for face region
preserve_vibrance: If True, adds minimal saturation boost (disabled by default)
"""
try:
target = np.array(target_img).astype(np.float32)
source = np.array(source_img).astype(np.float32)
if face_bbox is not None:
# Create face mask
x1, y1, x2, y2 = [int(c) for c in face_bbox]
x1, y1 = max(0, x1), max(0, y1)
x2, y2 = min(target.shape[1], x2), min(target.shape[0], y2)
face_mask = np.zeros((target.shape[0], target.shape[1]), dtype=np.float32)
face_mask[y1:y2, x1:x2] = 1.0
# Blur mask for smooth transition
face_mask = cv2.GaussianBlur(
face_mask,
COLOR_MATCH_CONFIG['gaussian_blur_kernel'],
COLOR_MATCH_CONFIG['gaussian_blur_sigma']
)
face_mask = face_mask[:, :, np.newaxis]
# Match colors for face region with saturation preservation
if y2 > y1 and x2 > x1:
face_result = color_match_lab(
target[y1:y2, x1:x2],
source[y1:y2, x1:x2],
preserve_saturation=True
)
target[y1:y2, x1:x2] = face_result
# Blend with original using mask
result = target * face_mask + target * (1 - face_mask)
else:
result = color_match_lab(target, source, preserve_saturation=True)
else:
# Standard LAB color matching with saturation preservation
result = color_match_lab(target, source, preserve_saturation=True)
result_img = Image.fromarray(result.astype(np.uint8))
# NO saturation boost by default
if preserve_vibrance:
result_img = enhance_saturation(result_img, boost=COLOR_MATCH_CONFIG['saturation_boost'])
return result_img
except Exception as e:
print(f"Enhanced color matching failed: {e}, returning target image")
return target_img
def color_match(target_img, source_img, mode='mkl'):
"""
Legacy color matching function - kept for compatibility.
Use enhanced_color_match for better results.
"""
try:
target = np.array(target_img).astype(np.float32)
source = np.array(source_img).astype(np.float32)
if mode == 'simple':
result = np.zeros_like(target)
for i in range(3):
t_mean, t_std = target[:,:,i].mean(), target[:,:,i].std()
s_mean, s_std = source[:,:,i].mean(), source[:,:,i].std()
result[:,:,i] = (target[:,:,i] - t_mean) * (s_std / (t_std + 1e-6)) + s_mean
result[:,:,i] = np.clip(result[:,:,i], 0, 255)
elif mode == 'mkl':
result = color_match_lab(target, source)
else: # pdf mode
result = np.zeros_like(target)
for i in range(3):
result[:,:,i] = np.interp(
target[:,:,i].flatten(),
np.linspace(target[:,:,i].min(), target[:,:,i].max(), 256),
np.linspace(source[:,:,i].min(), source[:,:,i].max(), 256)
).reshape(target[:,:,i].shape)
return Image.fromarray(result.astype(np.uint8))
except Exception as e:
print(f"Color matching failed: {e}, returning target image")
return target_img
def create_face_mask(image, face_bbox, feather=None):
"""
Create a soft mask around the detected face for better blending.
Args:
image: PIL Image
face_bbox: [x1, y1, x2, y2]
feather: blur radius for soft edges (uses config default if None)
"""
if feather is None:
feather = FACE_MASK_CONFIG['feather']
mask = Image.new('L', image.size, 0)
draw = ImageDraw.Draw(mask)
# Expand bbox slightly
x1, y1, x2, y2 = face_bbox
padding = int((x2 - x1) * FACE_MASK_CONFIG['padding'])
x1 = max(0, x1 - padding)
y1 = max(0, y1 - padding)
x2 = min(image.width, x2 + padding)
y2 = min(image.height, y2 + padding)
# Draw ellipse for more natural face shape
draw.ellipse([x1, y1, x2, y2], fill=255)
# Apply gaussian blur for soft edges
mask = mask.filter(ImageFilter.GaussianBlur(feather))
return mask
def draw_kps(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]):
"""Draw facial keypoints on image for InstantID ControlNet"""
stickwidth = 4
limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
kps = np.array(kps)
w, h = image_pil.size
out_img = np.zeros([h, w, 3])
for i in range(len(limbSeq)):
index = limbSeq[i]
color = color_list[index[0]]
x = kps[index][:, 0]
y = kps[index][:, 1]
length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
polygon = cv2.ellipse2Poly(
(int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1
)
out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
out_img = (out_img * 0.6).astype(np.uint8)
for idx_kp, kp in enumerate(kps):
color = color_list[idx_kp]
x, y = kp
out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
out_img_pil = Image.fromarray(out_img.astype(np.uint8))
return out_img_pil
def get_facial_attributes(face):
"""
Extract comprehensive facial attributes.
Returns dict with age, gender, expression, quality metrics.
"""
attributes = {
'age': None,
'gender': None,
'expression': None,
'quality': 1.0,
'pose_angle': 0,
'description': []
}
# Age extraction
try:
if hasattr(face, 'age'):
age = int(face.age)
attributes['age'] = age
for min_age, max_age, label in AGE_BRACKETS:
if min_age <= age < max_age:
attributes['description'].append(label)
break
except (ValueError, TypeError, AttributeError) as e:
print(f"[WARNING] Age extraction failed: {e}")
# Gender extraction
try:
if hasattr(face, 'gender'):
gender_code = int(face.gender)
attributes['gender'] = gender_code
if gender_code == 1:
attributes['description'].append("male")
elif gender_code == 0:
attributes['description'].append("female")
except (ValueError, TypeError, AttributeError) as e:
print(f"[WARNING] Gender extraction failed: {e}")
# Expression/emotion detection (if available)
try:
if hasattr(face, 'emotion'):
# Some InsightFace models provide emotion
emotion = face.emotion
if isinstance(emotion, (list, tuple)) and len(emotion) > 0:
emotions = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear']
emotion_idx = int(np.argmax(emotion))
emotion_name = emotions[emotion_idx] if emotion_idx < len(emotions) else 'neutral'
confidence = float(emotion[emotion_idx])
if confidence > 0.4: # Only add if confident
if emotion_name == 'happiness':
attributes['expression'] = 'smiling'
attributes['description'].append('smiling')
elif emotion_name not in ['neutral']:
attributes['expression'] = emotion_name
except (ValueError, TypeError, AttributeError, IndexError) as e:
# Expression not available in this model
pass
# Pose angle (profile detection)
try:
if hasattr(face, 'pose'):
pose = face.pose
if len(pose) > 1:
yaw = float(pose[1])
attributes['pose_angle'] = abs(yaw)
except (ValueError, TypeError, AttributeError, IndexError):
pass
# Detection quality
try:
if hasattr(face, 'det_score'):
attributes['quality'] = float(face.det_score)
except (ValueError, TypeError, AttributeError):
pass
return attributes
def build_enhanced_prompt(base_prompt, facial_attributes, trigger_word):
"""
Build enhanced prompt with facial attributes intelligently integrated.
"""
prompt = base_prompt
descriptions = facial_attributes['description']
if not descriptions:
return base_prompt
# Check if demographics already in prompt
prompt_lower = prompt.lower()
has_demographics = any(desc.lower() in prompt_lower for desc in descriptions)
if not has_demographics:
# Insert after trigger word for better integration
demographic_str = ", ".join(descriptions) + " person"
prompt = prompt.replace(
trigger_word,
f"{trigger_word}, {demographic_str}",
1
)
age = facial_attributes.get('age')
quality = facial_attributes.get('quality')
expression = facial_attributes.get('expression')
print(f"[FACE] Detected: {', '.join(descriptions)}")
print(f" Age: {age if age else 'N/A'}, Quality: {quality:.2f}")
if expression:
print(f" Expression: {expression}")
return prompt
def get_demographic_description(age, gender_code):
"""
Legacy function - kept for compatibility.
Use get_facial_attributes() for new code.
"""
demo_desc = []
if age is not None:
try:
age_int = int(age)
for min_age, max_age, label in AGE_BRACKETS:
if min_age <= age_int < max_age:
demo_desc.append(label)
break
except (ValueError, TypeError):
pass
if gender_code is not None:
try:
if int(gender_code) == 1:
demo_desc.append("male")
elif int(gender_code) == 0:
demo_desc.append("female")
except (ValueError, TypeError):
pass
return demo_desc
def calculate_optimal_size(original_width, original_height, recommended_sizes=None, max_dimension=1536):
"""
Calculate optimal size maintaining aspect ratio with dimensions as multiples of 8.
This updated version supports ANY aspect ratio (not just predefined ones),
while ensuring dimensions are multiples of 8 and keeping total pixels reasonable.
Args:
original_width: Original image width
original_height: Original image height
recommended_sizes: Optional list of (width, height) tuples (legacy support)
max_dimension: Maximum allowed dimension (default 1536)
Returns:
Tuple of (optimal_width, optimal_height) as multiples of 8
"""
aspect_ratio = original_width / original_height
# Legacy mode: use recommended sizes if provided
if recommended_sizes is not None:
best_match = None
best_diff = float('inf')
for width, height in recommended_sizes:
rec_aspect = width / height
diff = abs(rec_aspect - aspect_ratio)
if diff < best_diff:
best_diff = diff
best_match = (width, height)
# Ensure dimensions are multiples of 8
width, height = best_match
width = int((width // 64) * 64)
height = int((height // 64) * 64)
return width, height
# NEW: Support any aspect ratio
# Strategy: Keep aspect ratio, scale to reasonable total pixels, round to multiples of 8
# Target total pixels (around 1 megapixel for SDXL, adjustable)
target_pixels = 1024 * 1024 # ~1MP, good balance for SDXL
# Calculate dimensions that maintain aspect ratio and hit target pixels
# width * height = target_pixels
# width / height = aspect_ratio
# => width = aspect_ratio * height
# => aspect_ratio * height^2 = target_pixels
# => height = sqrt(target_pixels / aspect_ratio)
optimal_height = math.sqrt(target_pixels / aspect_ratio)
optimal_width = optimal_height * aspect_ratio
# Ensure we don't exceed max_dimension
if optimal_width > max_dimension:
optimal_width = max_dimension
optimal_height = optimal_width / aspect_ratio
if optimal_height > max_dimension:
optimal_height = max_dimension
optimal_width = optimal_height * aspect_ratio
# Round to nearest multiple of 8
width = int(round(optimal_width / 64) * 64)
height = int(round(optimal_height / 64) * 64)
# Ensure minimum size (at least 512 on shortest side)
min_dimension = 512
if min(width, height) < min_dimension:
if width < height:
width = min_dimension
height = int(round((width / aspect_ratio) / 64) * 64)
else:
height = min_dimension
width = int(round((height * aspect_ratio) / 64) * 64)
# Final safety check: ensure multiples of 8
width = max(8, int((width // 64) * 64))
height = max(8, int((height // 64) * 64))
print(f"[SIZING] Aspect ratio: {aspect_ratio:.3f}, Output: {width}x{height} ({width*height/1e6:.2f}MP)")
return width, height
def enhance_face_crop(face_crop):
"""
Multi-stage enhancement for better feature preservation.
Args:
face_crop: PIL Image of face region
Returns:
Enhanced PIL Image
"""
# Stage 1: Resize to optimal size for CLIP (224x224)
face_crop_resized = face_crop.resize((224, 224), Image.LANCZOS)
# Stage 2: Enhance sharpness (helps with facial features)
enhancer = ImageEnhance.Sharpness(face_crop_resized)
face_crop_sharp = enhancer.enhance(1.5)
# Stage 3: Enhance contrast slightly (helps with lighting)
enhancer = ImageEnhance.Contrast(face_crop_sharp)
face_crop_enhanced = enhancer.enhance(1.1)
# Stage 4: Slight brightness adjustment to normalize lighting
enhancer = ImageEnhance.Brightness(face_crop_enhanced)
face_crop_final = enhancer.enhance(1.05)
return face_crop_final
print("[OK] Utilities loaded")
|