Spaces:
Running
Running
Update app.py
#1
by
Seniordev22
- opened
app.py
CHANGED
|
@@ -1,122 +1,121 @@
|
|
| 1 |
-
|
| 2 |
-
from fastapi.responses import StreamingResponse
|
| 3 |
-
import io
|
| 4 |
-
import logging
|
| 5 |
import torch
|
| 6 |
import cv2
|
| 7 |
import numpy as np
|
| 8 |
from PIL import Image
|
|
|
|
| 9 |
from transformers import SegformerImageProcessor, SegformerForSemanticSegmentation
|
| 10 |
-
|
| 11 |
-
import os
|
| 12 |
-
# Logging setup
|
| 13 |
logging.basicConfig(level=logging.INFO)
|
| 14 |
logger = logging.getLogger(__name__)
|
| 15 |
-
|
| 16 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
hair_class_id = 13
|
| 20 |
ear_class_ids = [7, 8]
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
version="1.0"
|
| 25 |
-
)
|
| 26 |
-
@app.on_event("startup")
|
| 27 |
-
async def startup_event():
|
| 28 |
-
global processor, model
|
| 29 |
-
logger.info(f"Loading SegFormer model on {device}...")
|
| 30 |
-
processor = SegformerImageProcessor.from_pretrained("jonathandinu/face-parsing")
|
| 31 |
-
model = SegformerForSemanticSegmentation.from_pretrained("jonathandinu/face-parsing")
|
| 32 |
-
model.to(device)
|
| 33 |
-
model.eval()
|
| 34 |
-
logger.info("Model loaded successfully!")
|
| 35 |
-
def make_realistic_bald(image_bytes: bytes) -> bytes:
|
| 36 |
-
"""
|
| 37 |
-
Tera original processor code - bytes input leke bald image bytes return karta hai
|
| 38 |
-
(local version se direct replace kiya, halo reduce wale changes bhi rakhe)
|
| 39 |
-
"""
|
| 40 |
try:
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
orig_w, orig_h = image.size
|
| 44 |
-
original_np = np.array(image)
|
| 45 |
original_bgr = cv2.cvtColor(original_np, cv2.COLOR_RGB2BGR)
|
| 46 |
-
logger.info(f"Processing
|
| 47 |
-
|
| 48 |
-
MAX_PROCESS_DIM = 2048
|
| 49 |
scale_factor = 1.0
|
| 50 |
working_np = original_np
|
| 51 |
working_bgr = original_bgr
|
| 52 |
working_h, working_w = orig_h, orig_w
|
|
|
|
| 53 |
if max(orig_w, orig_h) > MAX_PROCESS_DIM:
|
| 54 |
-
logger.info(f"
|
| 55 |
scale_factor = MAX_PROCESS_DIM / max(orig_w, orig_h)
|
| 56 |
working_w = int(orig_w * scale_factor)
|
| 57 |
working_h = int(orig_h * scale_factor)
|
| 58 |
-
working_np = cv2.resize(original_np, (working_w, working_h),
|
| 59 |
working_bgr = cv2.cvtColor(working_np, cv2.COLOR_RGB2BGR)
|
| 60 |
-
|
| 61 |
pil_working = Image.fromarray(working_np)
|
| 62 |
inputs = processor(images=pil_working, return_tensors="pt").to(device)
|
| 63 |
with torch.no_grad():
|
| 64 |
outputs = model(**inputs)
|
| 65 |
logits = outputs.logits
|
|
|
|
| 66 |
upsampled_logits = torch.nn.functional.interpolate(
|
| 67 |
logits, size=(working_h, working_w), mode="bilinear", align_corners=False
|
| 68 |
)
|
| 69 |
parsing = upsampled_logits.argmax(dim=1).squeeze(0).cpu().numpy()
|
|
|
|
| 70 |
hair_mask = (parsing == hair_class_id).astype(np.uint8)
|
| 71 |
-
|
| 72 |
ears_mask = np.zeros_like(hair_mask)
|
| 73 |
for cls in ear_class_ids:
|
| 74 |
ears_mask[parsing == cls] = 1
|
|
|
|
| 75 |
ear_y, ear_x = np.where(ears_mask)
|
| 76 |
if len(ear_y) > 0:
|
| 77 |
ear_top_y = ear_y.min()
|
| 78 |
ear_height = ear_y.max() - ear_top_y + 1
|
| 79 |
kernel_v = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 30))
|
| 80 |
ears_protected = cv2.dilate(ears_mask, kernel_v, iterations=2)
|
|
|
|
| 81 |
top_margin = max(8, int(ear_height * 0.12))
|
| 82 |
top_start = max(0, ear_top_y - top_margin)
|
|
|
|
| 83 |
ear_x_min, ear_x_max = ear_x.min(), ear_x.max()
|
| 84 |
ear_width = ear_x_max - ear_x_min + 1
|
| 85 |
x_margin = int(ear_width * 0.35)
|
| 86 |
protected_left = max(0, ear_x_min - x_margin)
|
| 87 |
protected_right = min(working_w, ear_x_max + x_margin)
|
|
|
|
| 88 |
limited_top_mask = np.zeros_like(ears_mask)
|
| 89 |
limited_top_mask[top_start:ear_top_y + 8, protected_left:protected_right] = 1
|
| 90 |
kernel_h = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (17, 5))
|
| 91 |
limited_top_mask = cv2.dilate(limited_top_mask, kernel_h, iterations=1)
|
|
|
|
| 92 |
ears_protected = np.logical_or(ears_protected, limited_top_mask).astype(np.uint8)
|
|
|
|
| 93 |
hair_above_ears = np.zeros_like(hair_mask)
|
| 94 |
above_ear_line = max(0, ear_top_y - int(ear_height * 0.65))
|
| 95 |
hair_above_ears[:above_ear_line, :] = hair_mask[:above_ear_line, :]
|
| 96 |
ears_protected[hair_above_ears == 1] = 0
|
| 97 |
else:
|
| 98 |
ears_protected = np.zeros_like(hair_mask)
|
|
|
|
| 99 |
hair_mask_final = hair_mask.copy()
|
| 100 |
hair_mask_final[ears_protected == 1] = 0
|
|
|
|
| 101 |
if hair_mask[:int(working_h * 0.25), :].sum() > 60:
|
| 102 |
hair_mask_final[:int(working_h * 0.25), :] = np.maximum(
|
| 103 |
hair_mask_final[:int(working_h * 0.25), :], hair_mask[:int(working_h * 0.25), :]
|
| 104 |
)
|
| 105 |
-
|
| 106 |
kernel_s = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (13, 13))
|
| 107 |
hair_mask_final = cv2.morphologyEx(hair_mask_final, cv2.MORPH_CLOSE, kernel_s, iterations=2)
|
| 108 |
hair_mask_final = cv2.dilate(hair_mask_final, kernel_s, iterations=1)
|
|
|
|
| 109 |
blurred = cv2.GaussianBlur(hair_mask_final.astype(np.float32), (9, 9), 3)
|
| 110 |
hair_mask_final = (blurred > 0.28).astype(np.uint8)
|
|
|
|
| 111 |
kernel_edge = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
|
| 112 |
hair_mask_final = cv2.dilate(hair_mask_final, kernel_edge, iterations=1)
|
|
|
|
| 113 |
hair_pixels = np.sum(hair_mask_final)
|
| 114 |
-
logger.info(f"
|
| 115 |
-
|
| 116 |
final_mask = hair_mask_final.copy()
|
| 117 |
use_extended_mask = False
|
| 118 |
if hair_pixels > 380000:
|
| 119 |
-
logger.info("
|
| 120 |
use_extended_mask = True
|
| 121 |
big_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (25, 25))
|
| 122 |
extended = cv2.dilate(hair_mask_final, big_kernel, iterations=1)
|
|
@@ -125,50 +124,44 @@ def make_realistic_bald(image_bytes: bytes) -> bytes:
|
|
| 125 |
upper[:upper_end, :] = 1
|
| 126 |
extended = np.logical_or(extended, upper).astype(np.uint8)
|
| 127 |
extended[ears_protected == 1] = 0
|
|
|
|
| 128 |
if np.mean(working_np) < 110:
|
| 129 |
hsv = cv2.cvtColor(working_np, cv2.COLOR_RGB2HSV)
|
| 130 |
dark_lower = np.array([0, 0, 0])
|
| 131 |
dark_upper = np.array([180, 70, 90])
|
| 132 |
dark_mask = cv2.inRange(hsv, dark_lower, dark_upper)
|
| 133 |
extended = np.logical_or(extended, (dark_mask > 127)).astype(np.uint8)
|
|
|
|
| 134 |
extended = cv2.morphologyEx(extended, cv2.MORPH_CLOSE, kernel_s, iterations=1)
|
| 135 |
extended[int(working_h * 0.75):, :] = 0
|
| 136 |
-
extra_pixels = np.sum(extended)
|
| 137 |
-
logger.info(f" Extended mask size: {extra_pixels:,} (+{extra_pixels - hair_pixels:,})")
|
| 138 |
final_mask = extended
|
| 139 |
-
|
| 140 |
if use_extended_mask or hair_pixels > 420000:
|
| 141 |
-
radius = 18
|
| 142 |
-
inpaint_flag = cv2.INPAINT_TELEA
|
| 143 |
elif hair_pixels > 220000:
|
| 144 |
-
radius = 15
|
| 145 |
-
inpaint_flag = cv2.INPAINT_TELEA
|
| 146 |
else:
|
| 147 |
-
radius = 10
|
| 148 |
-
|
| 149 |
-
logger.info(f"
|
| 150 |
-
inpainted_bgr = cv2.inpaint(working_bgr, final_mask * 255,
|
| 151 |
-
inpaintRadius=radius, flags=inpaint_flag)
|
| 152 |
inpainted_rgb = cv2.cvtColor(inpainted_bgr, cv2.COLOR_BGR2RGB)
|
|
|
|
| 153 |
result_small = working_np.copy()
|
| 154 |
result_small[final_mask == 1] = inpainted_rgb[final_mask == 1]
|
| 155 |
-
|
| 156 |
if use_extended_mask or hair_pixels > 280000:
|
| 157 |
-
logger.info("
|
| 158 |
-
regions = [
|
| 159 |
-
(0.18, 0.30, 0.34, 0.66),
|
| 160 |
-
(0.32, 0.47, 0.35, 0.65)
|
| 161 |
-
]
|
| 162 |
colors = []
|
| 163 |
for y1r, y2r, x1r, x2r in regions:
|
| 164 |
-
y1 = int(working_h * y1r)
|
| 165 |
-
|
| 166 |
-
x1 = int(working_w * x1r)
|
| 167 |
-
x2 = int(working_w * x2r)
|
| 168 |
if y2 > y1 + 40 and x2 > x1 + 80:
|
| 169 |
crop = working_np[y1:y2, x1:x2]
|
| 170 |
if crop.size > 0:
|
| 171 |
colors.append(np.median(crop, axis=(0,1)).astype(np.float32))
|
|
|
|
| 172 |
if colors:
|
| 173 |
target_color = np.mean(colors, axis=0)
|
| 174 |
brightness = np.mean(target_color)
|
|
@@ -179,71 +172,68 @@ def make_realistic_bald(image_bytes: bytes) -> bytes:
|
|
| 179 |
diff = target_color - current_mean
|
| 180 |
corrected = np.clip(bald_area + diff * strength, 0, 255).astype(np.uint8)
|
| 181 |
result_small[final_mask == 1] = corrected
|
| 182 |
-
|
| 183 |
if hair_pixels > 90000 or use_extended_mask:
|
| 184 |
blurred_bald = cv2.GaussianBlur(result_small, (5, 5), 0.8)
|
| 185 |
result_small[final_mask == 1] = cv2.addWeighted(
|
| 186 |
-
result_small[final_mask == 1], 0.65,
|
| 187 |
-
blurred_bald[final_mask == 1], 0.35, 0
|
| 188 |
)
|
| 189 |
-
|
| 190 |
if scale_factor < 1.0:
|
| 191 |
-
logger.info("Upscaling
|
| 192 |
result = cv2.resize(result_small, (orig_w, orig_h), interpolation=cv2.INTER_LANCZOS4)
|
| 193 |
else:
|
| 194 |
result = result_small
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
logger.info(f"Image read successful, size: {len(contents) / 1024:.2f} KB")
|
| 210 |
-
bald_bytes = make_realistic_bald(contents)
|
| 211 |
-
logger.info(f"Bald processing done, output size: {len(bald_bytes) / 1024:.2f} KB")
|
| 212 |
-
bald_io = io.BytesIO(bald_bytes)
|
| 213 |
-
bald_io.seek(0)
|
| 214 |
-
return StreamingResponse(
|
| 215 |
-
bald_io,
|
| 216 |
-
media_type="image/jpeg",
|
| 217 |
-
headers={"Content-Disposition": "attachment; filename=bald_version.jpg"}
|
| 218 |
-
)
|
| 219 |
-
except ValueError as ve:
|
| 220 |
-
error_detail = str(ve).strip()
|
| 221 |
-
logger.warning(f"ValueError: {error_detail}")
|
| 222 |
-
if "NO_HAIR" in error_detail.upper() or "NO_HAIR_DETECTED" in error_detail.upper():
|
| 223 |
-
raise HTTPException(status_code=400, detail="NO_HAIR_DETECTED")
|
| 224 |
-
raise HTTPException(status_code=400, detail=error_detail or "Processing mein kuch galat hua")
|
| 225 |
except Exception as e:
|
| 226 |
-
logger.error(f"
|
| 227 |
-
raise
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
|
|
|
|
|
|
|
|
|
| 2 |
import torch
|
| 3 |
import cv2
|
| 4 |
import numpy as np
|
| 5 |
from PIL import Image
|
| 6 |
+
import logging
|
| 7 |
from transformers import SegformerImageProcessor, SegformerForSemanticSegmentation
|
| 8 |
+
|
|
|
|
|
|
|
| 9 |
logging.basicConfig(level=logging.INFO)
|
| 10 |
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 13 |
+
logger.info(f"Using device: {device}")
|
| 14 |
+
|
| 15 |
+
print("Loading SegFormer face-parsing model...")
|
| 16 |
+
processor = SegformerImageProcessor.from_pretrained("jonathandinu/face-parsing")
|
| 17 |
+
model = SegformerForSemanticSegmentation.from_pretrained("jonathandinu/face-parsing")
|
| 18 |
+
model.to(device)
|
| 19 |
+
model.eval()
|
| 20 |
+
logger.info("Model loaded!")
|
| 21 |
+
|
| 22 |
hair_class_id = 13
|
| 23 |
ear_class_ids = [7, 8]
|
| 24 |
+
|
| 25 |
+
def make_realistic_bald(input_image: Image.Image) -> tuple[Image.Image, Image.Image, Image.Image]:
|
| 26 |
+
# (Yeh pura function tera perfect logic wala β same as before, no change)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
try:
|
| 28 |
+
orig_w, orig_h = input_image.size
|
| 29 |
+
original_np = np.array(input_image)
|
|
|
|
|
|
|
| 30 |
original_bgr = cv2.cvtColor(original_np, cv2.COLOR_RGB2BGR)
|
| 31 |
+
logger.info(f"Processing: {orig_w}x{orig_h}")
|
| 32 |
+
|
| 33 |
+
MAX_PROCESS_DIM = 2048
|
| 34 |
scale_factor = 1.0
|
| 35 |
working_np = original_np
|
| 36 |
working_bgr = original_bgr
|
| 37 |
working_h, working_w = orig_h, orig_w
|
| 38 |
+
|
| 39 |
if max(orig_w, orig_h) > MAX_PROCESS_DIM:
|
| 40 |
+
logger.info(f"Downscaling to max {MAX_PROCESS_DIM}px")
|
| 41 |
scale_factor = MAX_PROCESS_DIM / max(orig_w, orig_h)
|
| 42 |
working_w = int(orig_w * scale_factor)
|
| 43 |
working_h = int(orig_h * scale_factor)
|
| 44 |
+
working_np = cv2.resize(original_np, (working_w, working_h), cv2.INTER_AREA)
|
| 45 |
working_bgr = cv2.cvtColor(working_np, cv2.COLOR_RGB2BGR)
|
| 46 |
+
|
| 47 |
pil_working = Image.fromarray(working_np)
|
| 48 |
inputs = processor(images=pil_working, return_tensors="pt").to(device)
|
| 49 |
with torch.no_grad():
|
| 50 |
outputs = model(**inputs)
|
| 51 |
logits = outputs.logits
|
| 52 |
+
|
| 53 |
upsampled_logits = torch.nn.functional.interpolate(
|
| 54 |
logits, size=(working_h, working_w), mode="bilinear", align_corners=False
|
| 55 |
)
|
| 56 |
parsing = upsampled_logits.argmax(dim=1).squeeze(0).cpu().numpy()
|
| 57 |
+
|
| 58 |
hair_mask = (parsing == hair_class_id).astype(np.uint8)
|
| 59 |
+
|
| 60 |
ears_mask = np.zeros_like(hair_mask)
|
| 61 |
for cls in ear_class_ids:
|
| 62 |
ears_mask[parsing == cls] = 1
|
| 63 |
+
|
| 64 |
ear_y, ear_x = np.where(ears_mask)
|
| 65 |
if len(ear_y) > 0:
|
| 66 |
ear_top_y = ear_y.min()
|
| 67 |
ear_height = ear_y.max() - ear_top_y + 1
|
| 68 |
kernel_v = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 30))
|
| 69 |
ears_protected = cv2.dilate(ears_mask, kernel_v, iterations=2)
|
| 70 |
+
|
| 71 |
top_margin = max(8, int(ear_height * 0.12))
|
| 72 |
top_start = max(0, ear_top_y - top_margin)
|
| 73 |
+
|
| 74 |
ear_x_min, ear_x_max = ear_x.min(), ear_x.max()
|
| 75 |
ear_width = ear_x_max - ear_x_min + 1
|
| 76 |
x_margin = int(ear_width * 0.35)
|
| 77 |
protected_left = max(0, ear_x_min - x_margin)
|
| 78 |
protected_right = min(working_w, ear_x_max + x_margin)
|
| 79 |
+
|
| 80 |
limited_top_mask = np.zeros_like(ears_mask)
|
| 81 |
limited_top_mask[top_start:ear_top_y + 8, protected_left:protected_right] = 1
|
| 82 |
kernel_h = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (17, 5))
|
| 83 |
limited_top_mask = cv2.dilate(limited_top_mask, kernel_h, iterations=1)
|
| 84 |
+
|
| 85 |
ears_protected = np.logical_or(ears_protected, limited_top_mask).astype(np.uint8)
|
| 86 |
+
|
| 87 |
hair_above_ears = np.zeros_like(hair_mask)
|
| 88 |
above_ear_line = max(0, ear_top_y - int(ear_height * 0.65))
|
| 89 |
hair_above_ears[:above_ear_line, :] = hair_mask[:above_ear_line, :]
|
| 90 |
ears_protected[hair_above_ears == 1] = 0
|
| 91 |
else:
|
| 92 |
ears_protected = np.zeros_like(hair_mask)
|
| 93 |
+
|
| 94 |
hair_mask_final = hair_mask.copy()
|
| 95 |
hair_mask_final[ears_protected == 1] = 0
|
| 96 |
+
|
| 97 |
if hair_mask[:int(working_h * 0.25), :].sum() > 60:
|
| 98 |
hair_mask_final[:int(working_h * 0.25), :] = np.maximum(
|
| 99 |
hair_mask_final[:int(working_h * 0.25), :], hair_mask[:int(working_h * 0.25), :]
|
| 100 |
)
|
| 101 |
+
|
| 102 |
kernel_s = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (13, 13))
|
| 103 |
hair_mask_final = cv2.morphologyEx(hair_mask_final, cv2.MORPH_CLOSE, kernel_s, iterations=2)
|
| 104 |
hair_mask_final = cv2.dilate(hair_mask_final, kernel_s, iterations=1)
|
| 105 |
+
|
| 106 |
blurred = cv2.GaussianBlur(hair_mask_final.astype(np.float32), (9, 9), 3)
|
| 107 |
hair_mask_final = (blurred > 0.28).astype(np.uint8)
|
| 108 |
+
|
| 109 |
kernel_edge = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
|
| 110 |
hair_mask_final = cv2.dilate(hair_mask_final, kernel_edge, iterations=1)
|
| 111 |
+
|
| 112 |
hair_pixels = np.sum(hair_mask_final)
|
| 113 |
+
logger.info(f"Hair pixels (resized): {hair_pixels:,}")
|
| 114 |
+
|
| 115 |
final_mask = hair_mask_final.copy()
|
| 116 |
use_extended_mask = False
|
| 117 |
if hair_pixels > 380000:
|
| 118 |
+
logger.info("Large hair β extended mask")
|
| 119 |
use_extended_mask = True
|
| 120 |
big_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (25, 25))
|
| 121 |
extended = cv2.dilate(hair_mask_final, big_kernel, iterations=1)
|
|
|
|
| 124 |
upper[:upper_end, :] = 1
|
| 125 |
extended = np.logical_or(extended, upper).astype(np.uint8)
|
| 126 |
extended[ears_protected == 1] = 0
|
| 127 |
+
|
| 128 |
if np.mean(working_np) < 110:
|
| 129 |
hsv = cv2.cvtColor(working_np, cv2.COLOR_RGB2HSV)
|
| 130 |
dark_lower = np.array([0, 0, 0])
|
| 131 |
dark_upper = np.array([180, 70, 90])
|
| 132 |
dark_mask = cv2.inRange(hsv, dark_lower, dark_upper)
|
| 133 |
extended = np.logical_or(extended, (dark_mask > 127)).astype(np.uint8)
|
| 134 |
+
|
| 135 |
extended = cv2.morphologyEx(extended, cv2.MORPH_CLOSE, kernel_s, iterations=1)
|
| 136 |
extended[int(working_h * 0.75):, :] = 0
|
|
|
|
|
|
|
| 137 |
final_mask = extended
|
| 138 |
+
|
| 139 |
if use_extended_mask or hair_pixels > 420000:
|
| 140 |
+
radius, flag = 18, cv2.INPAINT_TELEA
|
|
|
|
| 141 |
elif hair_pixels > 220000:
|
| 142 |
+
radius, flag = 15, cv2.INPAINT_TELEA
|
|
|
|
| 143 |
else:
|
| 144 |
+
radius, flag = 10, cv2.INPAINT_NS
|
| 145 |
+
|
| 146 |
+
logger.info(f"Inpainting radius={radius}")
|
| 147 |
+
inpainted_bgr = cv2.inpaint(working_bgr, final_mask * 255, inpaintRadius=radius, flags=flag)
|
|
|
|
| 148 |
inpainted_rgb = cv2.cvtColor(inpainted_bgr, cv2.COLOR_BGR2RGB)
|
| 149 |
+
|
| 150 |
result_small = working_np.copy()
|
| 151 |
result_small[final_mask == 1] = inpainted_rgb[final_mask == 1]
|
| 152 |
+
|
| 153 |
if use_extended_mask or hair_pixels > 280000:
|
| 154 |
+
logger.info("Skin color correction")
|
| 155 |
+
regions = [(0.18, 0.30, 0.34, 0.66), (0.32, 0.47, 0.35, 0.65)]
|
|
|
|
|
|
|
|
|
|
| 156 |
colors = []
|
| 157 |
for y1r, y2r, x1r, x2r in regions:
|
| 158 |
+
y1, y2 = int(working_h * y1r), int(working_h * y2r)
|
| 159 |
+
x1, x2 = int(working_w * x1r), int(working_w * x2r)
|
|
|
|
|
|
|
| 160 |
if y2 > y1 + 40 and x2 > x1 + 80:
|
| 161 |
crop = working_np[y1:y2, x1:x2]
|
| 162 |
if crop.size > 0:
|
| 163 |
colors.append(np.median(crop, axis=(0,1)).astype(np.float32))
|
| 164 |
+
|
| 165 |
if colors:
|
| 166 |
target_color = np.mean(colors, axis=0)
|
| 167 |
brightness = np.mean(target_color)
|
|
|
|
| 172 |
diff = target_color - current_mean
|
| 173 |
corrected = np.clip(bald_area + diff * strength, 0, 255).astype(np.uint8)
|
| 174 |
result_small[final_mask == 1] = corrected
|
| 175 |
+
|
| 176 |
if hair_pixels > 90000 or use_extended_mask:
|
| 177 |
blurred_bald = cv2.GaussianBlur(result_small, (5, 5), 0.8)
|
| 178 |
result_small[final_mask == 1] = cv2.addWeighted(
|
| 179 |
+
result_small[final_mask == 1], 0.65, blurred_bald[final_mask == 1], 0.35, 0
|
|
|
|
| 180 |
)
|
| 181 |
+
|
| 182 |
if scale_factor < 1.0:
|
| 183 |
+
logger.info("Upscaling to original size")
|
| 184 |
result = cv2.resize(result_small, (orig_w, orig_h), interpolation=cv2.INTER_LANCZOS4)
|
| 185 |
else:
|
| 186 |
result = result_small
|
| 187 |
+
|
| 188 |
+
result_pil = Image.fromarray(result)
|
| 189 |
+
|
| 190 |
+
comparison = np.hstack((original_np, result))
|
| 191 |
+
comparison_pil = Image.fromarray(comparison)
|
| 192 |
+
|
| 193 |
+
final_mask_big = cv2.resize(final_mask.astype(np.uint8) * 255, (orig_w, orig_h), cv2.INTER_NEAREST) > 127
|
| 194 |
+
mask_vis = np.zeros_like(original_np)
|
| 195 |
+
mask_vis[final_mask_big] = [255, 70, 70]
|
| 196 |
+
mask_overlay = cv2.addWeighted(original_np, 0.78, mask_vis, 0.22, 0)
|
| 197 |
+
mask_pil = Image.fromarray(mask_overlay)
|
| 198 |
+
|
| 199 |
+
return result_pil, comparison_pil, mask_pil
|
| 200 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 201 |
except Exception as e:
|
| 202 |
+
logger.error(f"Error: {str(e)}", exc_info=True)
|
| 203 |
+
raise gr.Error(f"Processing failed: {str(e)}. Try smaller image.")
|
| 204 |
+
|
| 205 |
+
with gr.Blocks(title="Make Me Bald π§βπ¦²", theme=gr.themes.Soft()) as demo:
|
| 206 |
+
gr.Markdown("# Realistic Bald Maker π₯")
|
| 207 |
+
gr.Markdown("Upload face photo β get bald version with natural skin blending. Ears protected, no weird halos!")
|
| 208 |
+
|
| 209 |
+
with gr.Row():
|
| 210 |
+
input_img = gr.Image(type="pil", label="Your Photo", sources=["upload", "webcam"])
|
| 211 |
+
output_bald = gr.Image(label="Bald Version")
|
| 212 |
+
|
| 213 |
+
with gr.Row():
|
| 214 |
+
comparison = gr.Image(label="Before vs After")
|
| 215 |
+
mask_overlay = gr.Image(label="Hair Mask Overlay (red = removed area)")
|
| 216 |
+
|
| 217 |
+
btn = gr.Button("Make Bald π", variant="primary")
|
| 218 |
+
|
| 219 |
+
btn.click(
|
| 220 |
+
fn=make_realistic_bald,
|
| 221 |
+
inputs=input_img,
|
| 222 |
+
outputs=[output_bald, comparison, mask_overlay],
|
| 223 |
+
api_name="make_bald"
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
gr.Examples(
|
| 227 |
+
examples=[["example1.jpg"], ["example2.jpg"]], # agar examples folder mein daale to
|
| 228 |
+
inputs=input_img,
|
| 229 |
+
label="Try these examples"
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
gr.Markdown("""
|
| 233 |
+
**Tips:**
|
| 234 |
+
- Best results on clear front-facing photos.
|
| 235 |
+
- Large images auto-resized for speed (then upscaled).
|
| 236 |
+
- If no hair detected β try another photo.
|
| 237 |
+
""")
|
| 238 |
+
|
| 239 |
+
# NO demo.launch() here β HF Spaces handles it automatically!
|