Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,34 +8,26 @@ import math
|
|
| 8 |
# --- Asset Setup ---
|
| 9 |
# Initialize MediaPipe Face Mesh for landmark detection
|
| 10 |
mp_face_mesh = mp.solutions.face_mesh
|
| 11 |
-
# Use a with block for resource management, though for a long-running app, a global instance is fine.
|
| 12 |
-
# For simplicity in Gradio, we'll keep the global instance.
|
| 13 |
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, min_detection_confidence=0.5)
|
| 14 |
|
| 15 |
|
| 16 |
-
# ---
|
| 17 |
-
# These functions are designed to be fast by using NumPy and OpenCV operations.
|
| 18 |
-
|
| 19 |
def apply_grayscale(img_np):
|
| 20 |
if img_np is None: return None
|
| 21 |
-
# Use OpenCV for a slightly faster conversion
|
| 22 |
return cv2.cvtColor(cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
|
| 23 |
|
| 24 |
def apply_sepia(img_np):
|
| 25 |
if img_np is None: return None
|
| 26 |
-
# Keep the classic NumPy implementation
|
| 27 |
sepia_matrix = np.array([[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]).T
|
| 28 |
-
sepia_img =
|
| 29 |
return np.clip(sepia_img, 0, 255).astype(np.uint8)
|
| 30 |
|
| 31 |
def apply_invert(img_np):
|
| 32 |
if img_np is None: return None
|
| 33 |
-
# OpenCV's bitwise_not is efficient
|
| 34 |
return cv2.bitwise_not(img_np)
|
| 35 |
|
| 36 |
def apply_posterize(img_np):
|
| 37 |
if img_np is None: return None
|
| 38 |
-
# NumPy implementation is clear and effective
|
| 39 |
bits = 4
|
| 40 |
shift = 8 - bits
|
| 41 |
return ((img_np >> shift) << shift).astype(np.uint8)
|
|
@@ -43,248 +35,207 @@ def apply_posterize(img_np):
|
|
| 43 |
def apply_solarize(img_np):
|
| 44 |
if img_np is None: return None
|
| 45 |
threshold = 128
|
| 46 |
-
# Using np.where is more concise
|
| 47 |
return np.where(img_np > threshold, 255 - img_np, img_np).astype(np.uint8)
|
| 48 |
|
| 49 |
def apply_vignette(img_np):
|
| 50 |
if img_np is None: return None
|
| 51 |
rows, cols = img_np.shape[:2]
|
| 52 |
-
# Generate a kernel representing the gradient
|
| 53 |
kernel_x = cv2.getGaussianKernel(cols, int(cols * 0.5))
|
| 54 |
kernel_y = cv2.getGaussianKernel(rows, int(rows * 0.5))
|
| 55 |
kernel = kernel_y * kernel_x.T
|
| 56 |
-
# Normalize the kernel
|
| 57 |
mask = 255 * kernel / np.max(kernel)
|
| 58 |
-
# Apply the mask to each channel
|
| 59 |
return np.clip(img_np * (mask[:, :, np.newaxis] / 255.0), 0, 255).astype(np.uint8)
|
| 60 |
|
| 61 |
def apply_contour(img_np):
|
| 62 |
if img_np is None: return None
|
| 63 |
gray = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
|
| 64 |
-
# Use Canny edge detection and invert the result
|
| 65 |
edges = cv2.Canny(gray, 100, 200)
|
| 66 |
return cv2.cvtColor(255 - edges, cv2.COLOR_GRAY2RGB)
|
| 67 |
|
| 68 |
def apply_sharpen(img_np):
|
| 69 |
if img_np is None: return None
|
| 70 |
-
# A common sharpening kernel
|
| 71 |
kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
|
| 72 |
return cv2.filter2D(img_np, -1, kernel)
|
| 73 |
|
| 74 |
-
# --- New Artistic Filters ---
|
| 75 |
-
|
| 76 |
def apply_cartoon(img_np):
|
| 77 |
-
"""Applies a cartoon effect using bilateral filtering and adaptive thresholding."""
|
| 78 |
if img_np is None: return None
|
| 79 |
-
# 1. Edge detection
|
| 80 |
gray = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
|
| 81 |
gray = cv2.medianBlur(gray, 5)
|
| 82 |
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
|
| 83 |
-
# 2. Color quantization
|
| 84 |
color = cv2.bilateralFilter(img_np, 9, 250, 250)
|
| 85 |
-
|
| 86 |
-
cartoon = cv2.bitwise_and(color, color, mask=edges)
|
| 87 |
-
return cartoon
|
| 88 |
|
| 89 |
def apply_sketch(img_np):
|
| 90 |
-
"""Applies a pencil sketch effect."""
|
| 91 |
if img_np is None: return None
|
| 92 |
-
# Convert to grayscale
|
| 93 |
gray_img = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
|
| 94 |
-
# Invert the grayscale image
|
| 95 |
invert_img = 255 - gray_img
|
| 96 |
-
# Apply Gaussian blur
|
| 97 |
blur_img = cv2.GaussianBlur(invert_img, (21, 21), 0)
|
| 98 |
-
# Invert the blurred image
|
| 99 |
invert_blur_img = 255 - blur_img
|
| 100 |
-
# Create the sketch by dividing the grayscale image by the inverted-blurred image
|
| 101 |
sketch_img = cv2.divide(gray_img, invert_blur_img, scale=256.0)
|
| 102 |
-
# Convert back to 3-channel RGB for display
|
| 103 |
return cv2.cvtColor(sketch_img, cv2.COLOR_GRAY2RGB)
|
| 104 |
|
| 105 |
def apply_pixelate(img_np):
|
| 106 |
-
"""Pixelates the image."""
|
| 107 |
if img_np is None: return None
|
| 108 |
h, w = img_np.shape[:2]
|
| 109 |
pixel_size = 16
|
| 110 |
-
# Resize down to a small size using nearest neighbor interpolation
|
| 111 |
temp = cv2.resize(img_np, (w // pixel_size, h // pixel_size), interpolation=cv2.INTER_NEAREST)
|
| 112 |
-
# Resize back up to the original size
|
| 113 |
return cv2.resize(temp, (w, h), interpolation=cv2.INTER_NEAREST)
|
| 114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
def _create_sunglasses_mask():
|
| 119 |
-
"""Helper to create a PIL image of sunglasses."""
|
| 120 |
sunglasses = Image.new('RGBA', (300, 100), (0, 0, 0, 0))
|
| 121 |
draw = ImageDraw.Draw(sunglasses)
|
| 122 |
-
# Lenses
|
| 123 |
draw.ellipse((20, 20, 130, 80), fill=(20, 20, 20, 200), outline='gray', width=5)
|
| 124 |
draw.ellipse((170, 20, 280, 80), fill=(20, 20, 20, 200), outline='gray', width=5)
|
| 125 |
-
# Bridge
|
| 126 |
draw.line((130, 50, 170, 50), fill='gray', width=8)
|
| 127 |
return sunglasses
|
| 128 |
|
| 129 |
def apply_sunglasses(img_np):
|
| 130 |
-
"""Detects facial landmarks and overlays sunglasses, accounting for head tilt."""
|
| 131 |
if img_np is None: return img_np
|
| 132 |
-
|
| 133 |
results = face_mesh.process(img_np)
|
| 134 |
pil_image = Image.fromarray(img_np)
|
| 135 |
-
|
| 136 |
if results.multi_face_landmarks:
|
| 137 |
for face_landmarks in results.multi_face_landmarks:
|
| 138 |
-
# Convert landmarks to pixel coordinates
|
| 139 |
landmarks = np.array([(lm.x * img_np.shape[1], lm.y * img_np.shape[0]) for lm in face_landmarks.landmark])
|
| 140 |
-
|
| 141 |
-
# Key eye landmarks for positioning
|
| 142 |
-
left_eye = landmarks[33]
|
| 143 |
-
right_eye = landmarks[263]
|
| 144 |
-
|
| 145 |
-
# Calculate angle and width for sunglasses
|
| 146 |
eye_center = (left_eye + right_eye) / 2
|
| 147 |
eye_width = np.linalg.norm(left_eye - right_eye)
|
| 148 |
angle = math.degrees(math.atan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0]))
|
| 149 |
-
|
| 150 |
-
# Create and resize sunglasses mask
|
| 151 |
sunglasses_img = _create_sunglasses_mask()
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
resized_sunglasses = sunglasses_img.resize((sunglasses_width, sunglasses_height), Image.Resampling.LANCZOS)
|
| 155 |
-
|
| 156 |
-
# Rotate sunglasses to match head tilt
|
| 157 |
rotated_sunglasses = resized_sunglasses.rotate(angle, expand=True, resample=Image.Resampling.BICUBIC)
|
| 158 |
-
|
| 159 |
-
# Calculate top-left position for pasting, adjusting for rotation
|
| 160 |
-
pos_x = int(eye_center[0] - rotated_sunglasses.width / 2)
|
| 161 |
-
pos_y = int(eye_center[1] - rotated_sunglasses.height / 2)
|
| 162 |
-
|
| 163 |
-
# Paste the rotated sunglasses onto the image
|
| 164 |
pil_image.paste(rotated_sunglasses, (pos_x, pos_y), rotated_sunglasses)
|
| 165 |
-
|
| 166 |
return np.array(pil_image)
|
| 167 |
|
| 168 |
def _draw_headwear(draw, landmarks, wearable_type):
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
chin = landmarks[152]
|
| 172 |
-
forehead_top = landmarks[10]
|
| 173 |
-
left_cheek = landmarks[234]
|
| 174 |
-
right_cheek = landmarks[454]
|
| 175 |
-
nose_tip = landmarks[1]
|
| 176 |
-
|
| 177 |
-
# Calculate face dimensions for scaling
|
| 178 |
-
face_width = np.linalg.norm(left_cheek - right_cheek)
|
| 179 |
-
face_height = np.linalg.norm(forehead_top - chin)
|
| 180 |
-
|
| 181 |
-
# Convert to an OpenCV-compatible format for drawing
|
| 182 |
overlay_np = np.array(draw.im)
|
| 183 |
-
|
| 184 |
if wearable_type == "Plumber":
|
| 185 |
-
hat_color = (21, 46, 230)
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
hat_height = face_height * 0.3
|
| 189 |
-
hat_center_x = forehead_top[0]
|
| 190 |
-
hat_brim_y = forehead_top[1] - face_height * 0.05 # Position just above forehead
|
| 191 |
-
|
| 192 |
-
# Brim
|
| 193 |
cv2.ellipse(overlay_np, (int(hat_center_x), int(hat_brim_y)), (int(hat_width/2), int(hat_height*0.2)), 0, 0, 360, hat_color, -1)
|
| 194 |
-
# Top part
|
| 195 |
cv2.rectangle(overlay_np, (int(hat_center_x - hat_width/2.2), int(hat_brim_y - hat_height*0.8)), (int(hat_center_x + hat_width/2.2), int(hat_brim_y)), hat_color, -1)
|
| 196 |
-
|
| 197 |
elif wearable_type == "Elf Hero":
|
| 198 |
-
hat_color = (0, 128, 0)
|
| 199 |
-
|
| 200 |
-
hat_tip_x = forehead_top[0]
|
| 201 |
-
hat_tip_y = forehead_top[1] - face_height * 0.8 # Make hat taller
|
| 202 |
-
hat_base_y = forehead_top[1] + face_height * 0.1
|
| 203 |
-
hat_base_left_x = left_cheek[0]
|
| 204 |
-
hat_base_right_x = right_cheek[0]
|
| 205 |
-
|
| 206 |
-
pts = np.array([[hat_tip_x, hat_tip_y], [hat_base_left_x, hat_base_y], [hat_base_right_x, hat_base_y]], np.int32)
|
| 207 |
cv2.fillPoly(overlay_np, [pts], hat_color)
|
| 208 |
-
|
| 209 |
elif wearable_type == "Cowboy Hat":
|
| 210 |
-
hat_color = (25, 69, 99)
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
hat_height = face_height * 0.5
|
| 214 |
-
hat_center_x = int(forehead_top[0])
|
| 215 |
-
brim_center_y = int(forehead_top[1])
|
| 216 |
-
|
| 217 |
-
# Brim
|
| 218 |
cv2.ellipse(overlay_np, (hat_center_x, brim_center_y), (int(hat_width/2), int(hat_height*0.25)), 0, 0, 360, hat_color, -1)
|
| 219 |
-
# Crown
|
| 220 |
-
crown_top_y = brim_center_y - hat_height
|
| 221 |
cv2.ellipse(overlay_np, (hat_center_x, int(brim_center_y - hat_height*0.3)), (int(hat_width/3), int(hat_height*0.3)), 0, 0, 360, hat_color, -1)
|
| 222 |
-
|
| 223 |
elif wearable_type == "Crown":
|
| 224 |
-
hat_color = (0, 215, 255)
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
hat_height = face_height * 0.25
|
| 228 |
-
base_y = int(forehead_top[1] - face_height * 0.1)
|
| 229 |
-
base_x_start = int(forehead_top[0] - hat_width/2)
|
| 230 |
-
|
| 231 |
-
# Base band
|
| 232 |
cv2.rectangle(overlay_np, (base_x_start, base_y), (int(base_x_start + hat_width), int(base_y + hat_height * 0.3)), hat_color, -1)
|
| 233 |
-
# Spikes
|
| 234 |
for i in range(5):
|
| 235 |
spike_base_x = base_x_start + (i * hat_width/4)
|
| 236 |
-
pts = np.array([
|
| 237 |
-
[spike_base_x - hat_width*0.05, base_y],
|
| 238 |
-
[spike_base_x + hat_width*0.05, base_y],
|
| 239 |
-
[spike_base_x, base_y - hat_height]
|
| 240 |
-
], np.int32)
|
| 241 |
cv2.fillPoly(overlay_np, [pts], hat_color)
|
| 242 |
-
|
| 243 |
-
# Convert back to PIL Image to be pasted
|
| 244 |
return Image.fromarray(overlay_np)
|
| 245 |
|
| 246 |
-
|
| 247 |
def apply_headwear(img_np, wearable_type):
|
| 248 |
-
"""Detects facial landmarks and draws a character mask or hat."""
|
| 249 |
if img_np is None: return img_np
|
| 250 |
-
|
| 251 |
results = face_mesh.process(img_np)
|
| 252 |
-
|
| 253 |
pil_image = Image.fromarray(img_np)
|
| 254 |
if results.multi_face_landmarks:
|
| 255 |
for face_landmarks in results.multi_face_landmarks:
|
| 256 |
-
# Create a transparent overlay for drawing
|
| 257 |
overlay = Image.new('RGBA', pil_image.size, (255, 255, 255, 0))
|
| 258 |
draw = ImageDraw.Draw(overlay)
|
| 259 |
-
|
| 260 |
landmarks = np.array([(lm.x * img_np.shape[1], lm.y * img_np.shape[0]) for lm in face_landmarks.landmark])
|
| 261 |
-
|
| 262 |
-
# Draw the headwear on the overlay
|
| 263 |
drawn_overlay = _draw_headwear(draw, landmarks, wearable_type)
|
| 264 |
-
|
| 265 |
-
# Paste the overlay onto the original image
|
| 266 |
pil_image.paste(drawn_overlay, (0, 0), drawn_overlay)
|
| 267 |
-
|
| 268 |
return np.array(pil_image)
|
| 269 |
|
| 270 |
-
# --- Main Processing
|
| 271 |
-
def process_image(image, filter_name):
|
| 272 |
-
"""The main function that routes the image and filter name to the correct function."""
|
| 273 |
if image is None: return None
|
|
|
|
| 274 |
|
| 275 |
-
# Ensure image is a NumPy array in RGB format
|
| 276 |
-
if isinstance(image, Image.Image):
|
| 277 |
-
img_np = np.array(image.convert("RGB"))
|
| 278 |
-
else:
|
| 279 |
-
img_np = image
|
| 280 |
-
|
| 281 |
-
# Map of all available filters to their functions
|
| 282 |
filter_map = {
|
| 283 |
"Grayscale": apply_grayscale, "Sepia": apply_sepia, "Invert": apply_invert,
|
| 284 |
-
"Posterize": apply_posterize, "Solarize": apply_solarize,
|
| 285 |
-
"
|
| 286 |
-
"
|
| 287 |
-
"
|
|
|
|
|
|
|
| 288 |
"Plumber": lambda img: apply_headwear(img, "Plumber"),
|
| 289 |
"Elf Hero": lambda img: apply_headwear(img, "Elf Hero"),
|
| 290 |
"Cowboy Hat": lambda img: apply_headwear(img, "Cowboy Hat"),
|
|
@@ -293,7 +244,7 @@ def process_image(image, filter_name):
|
|
| 293 |
}
|
| 294 |
|
| 295 |
filter_function = filter_map.get(filter_name, lambda img: img)
|
| 296 |
-
return filter_function(img_np.copy())
|
| 297 |
|
| 298 |
# --- Gradio UI ---
|
| 299 |
css = """
|
|
@@ -304,87 +255,78 @@ css = """
|
|
| 304 |
|
| 305 |
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
|
| 306 |
gr.Markdown("# Advanced Image & Face Filter Studio", elem_id="title")
|
| 307 |
-
gr.Markdown("Apply classic, artistic, and face-aware effects to your images
|
| 308 |
|
| 309 |
-
# Define filter lists
|
| 310 |
filters_old = ["None", "Grayscale", "Sepia", "Invert", "Posterize", "Solarize"]
|
| 311 |
filters_new = ["None", "Vignette", "Contour", "Sharpen"]
|
| 312 |
filters_artistic = ["None", "Cartoon", "Sketch", "Pixelate"]
|
| 313 |
filters_face = ["None", "Sunglasses", "Plumber", "Elf Hero", "Cowboy Hat", "Crown"]
|
|
|
|
| 314 |
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 324 |
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 341 |
|
| 342 |
-
#
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
# Logic for clearing other radio buttons when one is selected
|
| 358 |
-
def clear_others_factory(selected_radio_index):
|
| 359 |
-
def clear_func():
|
| 360 |
-
outputs = [gr.update(value="None")] * len(radio_buttons)
|
| 361 |
-
return outputs
|
| 362 |
-
return clear_func
|
| 363 |
-
|
| 364 |
-
# Connect the clearing logic
|
| 365 |
-
for i, radio in enumerate(radio_buttons):
|
| 366 |
-
other_radios = [r for j, r in enumerate(radio_buttons) if i != j]
|
| 367 |
-
radio.change(lambda: ("None", "None", "None"), None, other_radios, queue=False)
|
| 368 |
-
|
| 369 |
-
# Connect the filtering logic
|
| 370 |
-
if is_live:
|
| 371 |
-
# For live mode, trigger on input image change
|
| 372 |
-
input_image.stream(apply_filter_func, [input_image] + radio_buttons, output_image)
|
| 373 |
-
else:
|
| 374 |
-
# For static modes, trigger on button click or input change
|
| 375 |
-
apply_button.click(apply_filter_func, [input_image] + radio_buttons, output_image)
|
| 376 |
-
# Also apply filter on radio change if an image is already present
|
| 377 |
-
for radio in radio_buttons:
|
| 378 |
-
radio.change(apply_filter_func, [input_image] + radio_buttons, output_image)
|
| 379 |
-
input_image.change(apply_filter_func, [input_image] + radio_buttons, output_image)
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
with gr.Tabs():
|
| 383 |
-
with gr.TabItem("Static Image (Upload or Capture)"):
|
| 384 |
-
create_ui(is_live=False)
|
| 385 |
-
with gr.TabItem("Live Webcam"):
|
| 386 |
-
create_ui(is_live=True)
|
| 387 |
|
| 388 |
|
| 389 |
if __name__ == "__main__":
|
| 390 |
-
demo.launch(debug=True)
|
|
|
|
| 8 |
# --- Asset Setup ---
|
| 9 |
# Initialize MediaPipe Face Mesh for landmark detection
|
| 10 |
mp_face_mesh = mp.solutions.face_mesh
|
|
|
|
|
|
|
| 11 |
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=1, min_detection_confidence=0.5)
|
| 12 |
|
| 13 |
|
| 14 |
+
# --- Standard Filter Functions ---
|
|
|
|
|
|
|
| 15 |
def apply_grayscale(img_np):
|
| 16 |
if img_np is None: return None
|
|
|
|
| 17 |
return cv2.cvtColor(cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
|
| 18 |
|
| 19 |
def apply_sepia(img_np):
|
| 20 |
if img_np is None: return None
|
|
|
|
| 21 |
sepia_matrix = np.array([[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]).T
|
| 22 |
+
sepia_img = np.dot(img_np[...,:3], sepia_matrix)
|
| 23 |
return np.clip(sepia_img, 0, 255).astype(np.uint8)
|
| 24 |
|
| 25 |
def apply_invert(img_np):
|
| 26 |
if img_np is None: return None
|
|
|
|
| 27 |
return cv2.bitwise_not(img_np)
|
| 28 |
|
| 29 |
def apply_posterize(img_np):
|
| 30 |
if img_np is None: return None
|
|
|
|
| 31 |
bits = 4
|
| 32 |
shift = 8 - bits
|
| 33 |
return ((img_np >> shift) << shift).astype(np.uint8)
|
|
|
|
| 35 |
def apply_solarize(img_np):
|
| 36 |
if img_np is None: return None
|
| 37 |
threshold = 128
|
|
|
|
| 38 |
return np.where(img_np > threshold, 255 - img_np, img_np).astype(np.uint8)
|
| 39 |
|
| 40 |
def apply_vignette(img_np):
|
| 41 |
if img_np is None: return None
|
| 42 |
rows, cols = img_np.shape[:2]
|
|
|
|
| 43 |
kernel_x = cv2.getGaussianKernel(cols, int(cols * 0.5))
|
| 44 |
kernel_y = cv2.getGaussianKernel(rows, int(rows * 0.5))
|
| 45 |
kernel = kernel_y * kernel_x.T
|
|
|
|
| 46 |
mask = 255 * kernel / np.max(kernel)
|
|
|
|
| 47 |
return np.clip(img_np * (mask[:, :, np.newaxis] / 255.0), 0, 255).astype(np.uint8)
|
| 48 |
|
| 49 |
def apply_contour(img_np):
|
| 50 |
if img_np is None: return None
|
| 51 |
gray = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
|
|
|
|
| 52 |
edges = cv2.Canny(gray, 100, 200)
|
| 53 |
return cv2.cvtColor(255 - edges, cv2.COLOR_GRAY2RGB)
|
| 54 |
|
| 55 |
def apply_sharpen(img_np):
|
| 56 |
if img_np is None: return None
|
|
|
|
| 57 |
kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
|
| 58 |
return cv2.filter2D(img_np, -1, kernel)
|
| 59 |
|
|
|
|
|
|
|
| 60 |
def apply_cartoon(img_np):
|
|
|
|
| 61 |
if img_np is None: return None
|
|
|
|
| 62 |
gray = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
|
| 63 |
gray = cv2.medianBlur(gray, 5)
|
| 64 |
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
|
|
|
|
| 65 |
color = cv2.bilateralFilter(img_np, 9, 250, 250)
|
| 66 |
+
return cv2.bitwise_and(color, color, mask=edges)
|
|
|
|
|
|
|
| 67 |
|
| 68 |
def apply_sketch(img_np):
|
|
|
|
| 69 |
if img_np is None: return None
|
|
|
|
| 70 |
gray_img = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
|
|
|
|
| 71 |
invert_img = 255 - gray_img
|
|
|
|
| 72 |
blur_img = cv2.GaussianBlur(invert_img, (21, 21), 0)
|
|
|
|
| 73 |
invert_blur_img = 255 - blur_img
|
|
|
|
| 74 |
sketch_img = cv2.divide(gray_img, invert_blur_img, scale=256.0)
|
|
|
|
| 75 |
return cv2.cvtColor(sketch_img, cv2.COLOR_GRAY2RGB)
|
| 76 |
|
| 77 |
def apply_pixelate(img_np):
|
|
|
|
| 78 |
if img_np is None: return None
|
| 79 |
h, w = img_np.shape[:2]
|
| 80 |
pixel_size = 16
|
|
|
|
| 81 |
temp = cv2.resize(img_np, (w // pixel_size, h // pixel_size), interpolation=cv2.INTER_NEAREST)
|
|
|
|
| 82 |
return cv2.resize(temp, (w, h), interpolation=cv2.INTER_NEAREST)
|
| 83 |
|
| 84 |
+
# --- New Advanced Filters ---
|
| 85 |
+
|
| 86 |
+
def apply_hdr_effect(img_np):
|
| 87 |
+
"""Simulates an HDR effect by enhancing details."""
|
| 88 |
+
if img_np is None: return None
|
| 89 |
+
# Using cv2.detailEnhance for a quick and effective HDR-like look
|
| 90 |
+
return cv2.detailEnhance(img_np, sigma_s=12, sigma_r=0.15)
|
| 91 |
|
| 92 |
+
def apply_color_splash(img_np, hex_color):
|
| 93 |
+
"""Keeps a selected color and converts the rest of the image to grayscale."""
|
| 94 |
+
if img_np is None: return None
|
| 95 |
+
# Convert hex to RGB
|
| 96 |
+
h = hex_color.lstrip('#')
|
| 97 |
+
rgb_color = tuple(int(h[i:i+2], 16) for i in (0, 2, 4))
|
| 98 |
+
|
| 99 |
+
# Convert image and color to HSV
|
| 100 |
+
hsv_img = cv2.cvtColor(img_np, cv2.COLOR_RGB2HSV)
|
| 101 |
+
hsv_color = cv2.cvtColor(np.uint8([[rgb_color]]), cv2.COLOR_RGB2HSV)[0][0]
|
| 102 |
+
|
| 103 |
+
# Define a color range in HSV. Hue tolerance is important.
|
| 104 |
+
hue_tolerance = 10
|
| 105 |
+
lower_bound = np.array([max(0, hsv_color[0] - hue_tolerance), 50, 50])
|
| 106 |
+
upper_bound = np.array([min(179, hsv_color[0] + hue_tolerance), 255, 255])
|
| 107 |
+
|
| 108 |
+
# Create the mask and its inverse
|
| 109 |
+
mask = cv2.inRange(hsv_img, lower_bound, upper_bound)
|
| 110 |
+
mask_inv = cv2.bitwise_not(mask)
|
| 111 |
+
|
| 112 |
+
# Create grayscale version of the image
|
| 113 |
+
gray_img = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
|
| 114 |
+
gray_img_3_channel = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2RGB)
|
| 115 |
+
|
| 116 |
+
# Isolate the colored parts and the grayscale parts
|
| 117 |
+
colored_part = cv2.bitwise_and(img_np, img_np, mask=mask)
|
| 118 |
+
grayscale_part = cv2.bitwise_and(gray_img_3_channel, gray_img_3_channel, mask=mask_inv)
|
| 119 |
+
|
| 120 |
+
# Combine the two parts
|
| 121 |
+
return cv2.add(colored_part, grayscale_part)
|
| 122 |
|
| 123 |
+
def apply_sunburst_glow(img_np):
|
| 124 |
+
"""Adds a sunburst/lens flare effect from the brightest point."""
|
| 125 |
+
if img_np is None: return None
|
| 126 |
+
gray = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
|
| 127 |
+
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(gray)
|
| 128 |
+
|
| 129 |
+
overlay = img_np.copy()
|
| 130 |
+
|
| 131 |
+
# Draw star-like rays from the brightest point
|
| 132 |
+
for i in range(12):
|
| 133 |
+
angle = i * 30 * np.pi / 180 # 30 degrees per line
|
| 134 |
+
length = np.random.randint(int(maxVal/2), int(maxVal*1.5))
|
| 135 |
+
pt2_x = int(maxLoc[0] + length * np.cos(angle))
|
| 136 |
+
pt2_y = int(maxLoc[1] + length * np.sin(angle))
|
| 137 |
+
cv2.line(overlay, maxLoc, (pt2_x, pt2_y), (255, 255, 220), 1)
|
| 138 |
+
|
| 139 |
+
# Add a soft glow
|
| 140 |
+
glow = cv2.GaussianBlur(overlay, (0,0), sigmaX=30, sigmaY=30)
|
| 141 |
+
|
| 142 |
+
# Blend the original image with the glow
|
| 143 |
+
return cv2.addWeighted(img_np, 0.8, glow, 0.4, 0)
|
| 144 |
+
|
| 145 |
+
def apply_dreamy_glow(img_np):
|
| 146 |
+
"""Adds a soft, dreamy glow effect to the image."""
|
| 147 |
+
if img_np is None: return None
|
| 148 |
+
# Create a blurred version of the image
|
| 149 |
+
blurred = cv2.GaussianBlur(img_np, (0,0), sigmaX=15, sigmaY=15)
|
| 150 |
+
# Blend using addWeighted to simulate a 'Screen' blend mode
|
| 151 |
+
return cv2.addWeighted(img_np, 1.0, blurred, 0.6, 0)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
# --- Face Effect Functions (Unchanged) ---
|
| 155 |
def _create_sunglasses_mask():
|
|
|
|
| 156 |
sunglasses = Image.new('RGBA', (300, 100), (0, 0, 0, 0))
|
| 157 |
draw = ImageDraw.Draw(sunglasses)
|
|
|
|
| 158 |
draw.ellipse((20, 20, 130, 80), fill=(20, 20, 20, 200), outline='gray', width=5)
|
| 159 |
draw.ellipse((170, 20, 280, 80), fill=(20, 20, 20, 200), outline='gray', width=5)
|
|
|
|
| 160 |
draw.line((130, 50, 170, 50), fill='gray', width=8)
|
| 161 |
return sunglasses
|
| 162 |
|
| 163 |
def apply_sunglasses(img_np):
|
|
|
|
| 164 |
if img_np is None: return img_np
|
|
|
|
| 165 |
results = face_mesh.process(img_np)
|
| 166 |
pil_image = Image.fromarray(img_np)
|
|
|
|
| 167 |
if results.multi_face_landmarks:
|
| 168 |
for face_landmarks in results.multi_face_landmarks:
|
|
|
|
| 169 |
landmarks = np.array([(lm.x * img_np.shape[1], lm.y * img_np.shape[0]) for lm in face_landmarks.landmark])
|
| 170 |
+
left_eye, right_eye = landmarks[33], landmarks[263]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
eye_center = (left_eye + right_eye) / 2
|
| 172 |
eye_width = np.linalg.norm(left_eye - right_eye)
|
| 173 |
angle = math.degrees(math.atan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0]))
|
|
|
|
|
|
|
| 174 |
sunglasses_img = _create_sunglasses_mask()
|
| 175 |
+
w, h = int(eye_width * 1.6), int(eye_width * 1.6 * sunglasses_img.height / sunglasses_img.width)
|
| 176 |
+
resized_sunglasses = sunglasses_img.resize((w, h), Image.Resampling.LANCZOS)
|
|
|
|
|
|
|
|
|
|
| 177 |
rotated_sunglasses = resized_sunglasses.rotate(angle, expand=True, resample=Image.Resampling.BICUBIC)
|
| 178 |
+
pos_x, pos_y = int(eye_center[0] - rotated_sunglasses.width / 2), int(eye_center[1] - rotated_sunglasses.height / 2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
pil_image.paste(rotated_sunglasses, (pos_x, pos_y), rotated_sunglasses)
|
|
|
|
| 180 |
return np.array(pil_image)
|
| 181 |
|
| 182 |
def _draw_headwear(draw, landmarks, wearable_type):
|
| 183 |
+
chin, forehead_top, left_cheek, right_cheek = landmarks[152], landmarks[10], landmarks[234], landmarks[454]
|
| 184 |
+
face_width, face_height = np.linalg.norm(left_cheek - right_cheek), np.linalg.norm(forehead_top - chin)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
overlay_np = np.array(draw.im)
|
|
|
|
| 186 |
if wearable_type == "Plumber":
|
| 187 |
+
hat_color = (21, 46, 230)
|
| 188 |
+
hat_width, hat_height = face_width * 1.1, face_height * 0.3
|
| 189 |
+
hat_center_x, hat_brim_y = forehead_top[0], forehead_top[1] - face_height * 0.05
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
cv2.ellipse(overlay_np, (int(hat_center_x), int(hat_brim_y)), (int(hat_width/2), int(hat_height*0.2)), 0, 0, 360, hat_color, -1)
|
|
|
|
| 191 |
cv2.rectangle(overlay_np, (int(hat_center_x - hat_width/2.2), int(hat_brim_y - hat_height*0.8)), (int(hat_center_x + hat_width/2.2), int(hat_brim_y)), hat_color, -1)
|
|
|
|
| 192 |
elif wearable_type == "Elf Hero":
|
| 193 |
+
hat_color = (0, 128, 0)
|
| 194 |
+
pts = np.array([[forehead_top[0], forehead_top[1] - face_height*0.8], [left_cheek[0], forehead_top[1] + face_height*0.1], [right_cheek[0], forehead_top[1] + face_height*0.1]], np.int32)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
cv2.fillPoly(overlay_np, [pts], hat_color)
|
|
|
|
| 196 |
elif wearable_type == "Cowboy Hat":
|
| 197 |
+
hat_color = (25, 69, 99)
|
| 198 |
+
hat_width, hat_height = face_width * 1.4, face_height * 0.5
|
| 199 |
+
hat_center_x, brim_center_y = int(forehead_top[0]), int(forehead_top[1])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
cv2.ellipse(overlay_np, (hat_center_x, brim_center_y), (int(hat_width/2), int(hat_height*0.25)), 0, 0, 360, hat_color, -1)
|
|
|
|
|
|
|
| 201 |
cv2.ellipse(overlay_np, (hat_center_x, int(brim_center_y - hat_height*0.3)), (int(hat_width/3), int(hat_height*0.3)), 0, 0, 360, hat_color, -1)
|
|
|
|
| 202 |
elif wearable_type == "Crown":
|
| 203 |
+
hat_color = (0, 215, 255)
|
| 204 |
+
hat_width, hat_height = face_width * 1.1, face_height * 0.25
|
| 205 |
+
base_y, base_x_start = int(forehead_top[1] - face_height * 0.1), int(forehead_top[0] - hat_width/2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
cv2.rectangle(overlay_np, (base_x_start, base_y), (int(base_x_start + hat_width), int(base_y + hat_height * 0.3)), hat_color, -1)
|
|
|
|
| 207 |
for i in range(5):
|
| 208 |
spike_base_x = base_x_start + (i * hat_width/4)
|
| 209 |
+
pts = np.array([[spike_base_x - hat_width*0.05, base_y], [spike_base_x + hat_width*0.05, base_y], [spike_base_x, base_y - hat_height]], np.int32)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 210 |
cv2.fillPoly(overlay_np, [pts], hat_color)
|
|
|
|
|
|
|
| 211 |
return Image.fromarray(overlay_np)
|
| 212 |
|
|
|
|
| 213 |
def apply_headwear(img_np, wearable_type):
|
|
|
|
| 214 |
if img_np is None: return img_np
|
|
|
|
| 215 |
results = face_mesh.process(img_np)
|
|
|
|
| 216 |
pil_image = Image.fromarray(img_np)
|
| 217 |
if results.multi_face_landmarks:
|
| 218 |
for face_landmarks in results.multi_face_landmarks:
|
|
|
|
| 219 |
overlay = Image.new('RGBA', pil_image.size, (255, 255, 255, 0))
|
| 220 |
draw = ImageDraw.Draw(overlay)
|
|
|
|
| 221 |
landmarks = np.array([(lm.x * img_np.shape[1], lm.y * img_np.shape[0]) for lm in face_landmarks.landmark])
|
|
|
|
|
|
|
| 222 |
drawn_overlay = _draw_headwear(draw, landmarks, wearable_type)
|
|
|
|
|
|
|
| 223 |
pil_image.paste(drawn_overlay, (0, 0), drawn_overlay)
|
|
|
|
| 224 |
return np.array(pil_image)
|
| 225 |
|
| 226 |
+
# --- Main Processing Function ---
|
| 227 |
+
def process_image(image, filter_name, splash_color):
|
|
|
|
| 228 |
if image is None: return None
|
| 229 |
+
img_np = np.array(image.convert("RGB")) if isinstance(image, Image.Image) else image
|
| 230 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
filter_map = {
|
| 232 |
"Grayscale": apply_grayscale, "Sepia": apply_sepia, "Invert": apply_invert,
|
| 233 |
+
"Posterize": apply_posterize, "Solarize": apply_solarize, "Vignette": apply_vignette,
|
| 234 |
+
"Contour": apply_contour, "Sharpen": apply_sharpen, "Cartoon": apply_cartoon,
|
| 235 |
+
"Sketch": apply_sketch, "Pixelate": apply_pixelate, "Sunglasses": apply_sunglasses,
|
| 236 |
+
"HDR Effect": apply_hdr_effect, "Sunburst Glow": apply_sunburst_glow,
|
| 237 |
+
"Dreamy Glow": apply_dreamy_glow,
|
| 238 |
+
"Color Splash": lambda img: apply_color_splash(img, splash_color),
|
| 239 |
"Plumber": lambda img: apply_headwear(img, "Plumber"),
|
| 240 |
"Elf Hero": lambda img: apply_headwear(img, "Elf Hero"),
|
| 241 |
"Cowboy Hat": lambda img: apply_headwear(img, "Cowboy Hat"),
|
|
|
|
| 244 |
}
|
| 245 |
|
| 246 |
filter_function = filter_map.get(filter_name, lambda img: img)
|
| 247 |
+
return filter_function(img_np.copy())
|
| 248 |
|
| 249 |
# --- Gradio UI ---
|
| 250 |
css = """
|
|
|
|
| 255 |
|
| 256 |
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
|
| 257 |
gr.Markdown("# Advanced Image & Face Filter Studio", elem_id="title")
|
| 258 |
+
gr.Markdown("Apply classic, artistic, and face-aware effects to your images.", elem_id="subtitle")
|
| 259 |
|
|
|
|
| 260 |
filters_old = ["None", "Grayscale", "Sepia", "Invert", "Posterize", "Solarize"]
|
| 261 |
filters_new = ["None", "Vignette", "Contour", "Sharpen"]
|
| 262 |
filters_artistic = ["None", "Cartoon", "Sketch", "Pixelate"]
|
| 263 |
filters_face = ["None", "Sunglasses", "Plumber", "Elf Hero", "Cowboy Hat", "Crown"]
|
| 264 |
+
filters_advanced = ["None", "HDR Effect", "Color Splash", "Sunburst Glow", "Dreamy Glow"]
|
| 265 |
|
| 266 |
+
with gr.Row(equal_height=False):
|
| 267 |
+
with gr.Column(scale=2):
|
| 268 |
+
input_image = gr.Image(sources=["upload", "webcam"], type="pil", label="Input Image")
|
| 269 |
+
|
| 270 |
+
with gr.Column(scale=1):
|
| 271 |
+
gr.Markdown("### Filter Controls")
|
| 272 |
+
with gr.Accordion("Old School", open=True):
|
| 273 |
+
radio_old = gr.Radio(filters_old, label="Filter", value="None")
|
| 274 |
+
with gr.Accordion("New School", open=True):
|
| 275 |
+
radio_new = gr.Radio(filters_new, label="Filter", value="None")
|
| 276 |
+
with gr.Accordion("Artistic", open=True):
|
| 277 |
+
radio_artistic = gr.Radio(filters_artistic, label="Filter", value="None")
|
| 278 |
+
with gr.Accordion("Advanced (May be slow)", open=True):
|
| 279 |
+
radio_advanced = gr.Radio(filters_advanced, label="Filter", value="None")
|
| 280 |
+
with gr.Accordion("Face Effects", open=True):
|
| 281 |
+
radio_face = gr.Radio(filters_face, label="Filter", value="None")
|
| 282 |
|
| 283 |
+
color_picker = gr.ColorPicker(label="Color to Keep (for Color Splash)", value="#FF0000", visible=False)
|
| 284 |
+
apply_button = gr.Button("Apply Filter", variant="primary")
|
| 285 |
+
|
| 286 |
+
with gr.Column(scale=2):
|
| 287 |
+
output_image = gr.Image(label="Filtered Output")
|
| 288 |
+
|
| 289 |
+
all_radios = [radio_old, radio_new, radio_artistic, radio_advanced, radio_face]
|
| 290 |
+
|
| 291 |
+
def master_update_function(img, r_old, r_new, r_art, r_adv, r_face, splash_color):
|
| 292 |
+
"""This function is the single point of truth for applying filters."""
|
| 293 |
+
# Find the single active filter. The logic assumes only one is selected at a time.
|
| 294 |
+
active_filter = next((f for f in [r_old, r_new, r_art, r_adv, r_face] if f != "None"), "None")
|
| 295 |
+
|
| 296 |
+
# Process the image
|
| 297 |
+
processed_img = process_image(img, active_filter, splash_color)
|
| 298 |
+
|
| 299 |
+
# Determine visibility of the color picker
|
| 300 |
+
color_picker_visibility = gr.update(visible=True) if active_filter == "Color Splash" else gr.update(visible=False)
|
| 301 |
+
|
| 302 |
+
return processed_img, color_picker_visibility
|
| 303 |
+
|
| 304 |
+
# This function resets other radio buttons. It's crucial for the UX.
|
| 305 |
+
def create_reset_function(radios_to_reset):
|
| 306 |
+
def reset_func():
|
| 307 |
+
return [gr.update(value="None") for _ in radios_to_reset]
|
| 308 |
+
return reset_func
|
| 309 |
+
|
| 310 |
+
# Connect all the events
|
| 311 |
+
# The key is chaining the UI reset with the image processing using .then()
|
| 312 |
+
for i, radio in enumerate(all_radios):
|
| 313 |
+
other_radios = [r for j, r in enumerate(all_radios) if i != j]
|
| 314 |
|
| 315 |
+
# When a radio's value changes:
|
| 316 |
+
# 1. First, create and trigger a function to reset all other radio groups.
|
| 317 |
+
radio.change(fn=create_reset_function(other_radios), inputs=None, outputs=other_radios, queue=False)
|
| 318 |
+
# 2. THEN, after the reset is queued, run the main update function to process the image.
|
| 319 |
+
radio.then(fn=master_update_function,
|
| 320 |
+
inputs=[input_image] + all_radios + [color_picker],
|
| 321 |
+
outputs=[output_image, color_picker])
|
| 322 |
+
|
| 323 |
+
# Connect the Apply button and image upload/change events to the master function as well
|
| 324 |
+
trigger_inputs = [input_image] + all_radios + [color_picker]
|
| 325 |
+
trigger_outputs = [output_image, color_picker]
|
| 326 |
+
apply_button.click(master_update_function, inputs=trigger_inputs, outputs=trigger_outputs)
|
| 327 |
+
input_image.change(master_update_function, inputs=trigger_inputs, outputs=trigger_outputs)
|
| 328 |
+
color_picker.change(master_update_function, inputs=trigger_inputs, outputs=trigger_outputs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 329 |
|
| 330 |
|
| 331 |
if __name__ == "__main__":
|
| 332 |
+
demo.launch(debug=True)
|