walidadebayo commited on
Commit
311bfbf
·
1 Parent(s): b074c3b

Enhance mask handling in image processing: resize and validate dimensions, update demo link, and correct copyright year in footer

Browse files
Files changed (2) hide show
  1. src/core.py +34 -8
  2. templates/index.html +3 -3
src/core.py CHANGED
@@ -400,13 +400,29 @@ def run(image, mask):
400
  return: BGR IMAGE
401
  """
402
  origin_height, origin_width = image.shape[1:]
 
 
 
 
 
 
 
 
 
 
403
  image = pad_img_to_modulo(image, mod=8)
404
  mask = pad_img_to_modulo(mask, mod=8)
405
 
 
 
 
 
406
  mask = (mask > 0) * 1
407
  image = torch.from_numpy(image).unsqueeze(0).to(device)
408
  mask = torch.from_numpy(mask).unsqueeze(0).to(device)
409
 
 
 
410
  start = time.time()
411
  with torch.no_grad():
412
  inpainted_image = model(image, mask)
@@ -432,21 +448,31 @@ def process_inpaint(image, mask):
432
  original_shape = image.shape
433
  interpolation = cv2.INTER_CUBIC
434
 
435
- #size_limit: Union[int, str] = request.form.get("sizeLimit", "1080")
436
- #if size_limit == "Original":
437
  size_limit = max(image.shape)
438
- #else:
439
- # size_limit = int(size_limit)
440
 
441
  print(f"Origin image shape: {original_shape}")
442
  image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
443
  print(f"Resized image shape: {image.shape}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
444
  image = norm_img(image)
445
-
446
- mask = 255-mask[:,:,3]
447
- mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
448
  mask = norm_img(mask)
449
-
 
 
450
  res_np_img = run(image, mask)
451
 
452
  return cv2.cvtColor(res_np_img, cv2.COLOR_BGR2RGB)
 
400
  return: BGR IMAGE
401
  """
402
  origin_height, origin_width = image.shape[1:]
403
+
404
+ # Ensure mask has the same spatial dimensions as the image
405
+ if mask.shape[1:] != image.shape[1:]:
406
+ # Resize mask to match image dimensions
407
+ mask_resized = np.zeros((1, origin_height, origin_width), dtype=mask.dtype)
408
+ mask_resized[0, :, :] = cv2.resize(mask[0], (origin_width, origin_height),
409
+ interpolation=cv2.INTER_NEAREST)
410
+ mask = mask_resized
411
+
412
+ # Make sure both have dimensions divisible by 8
413
  image = pad_img_to_modulo(image, mod=8)
414
  mask = pad_img_to_modulo(mask, mod=8)
415
 
416
+ # Double check dimensions match after padding
417
+ if image.shape[1:] != mask.shape[1:]:
418
+ raise ValueError(f"Image and mask dimensions don't match after padding: {image.shape} vs {mask.shape}")
419
+
420
  mask = (mask > 0) * 1
421
  image = torch.from_numpy(image).unsqueeze(0).to(device)
422
  mask = torch.from_numpy(mask).unsqueeze(0).to(device)
423
 
424
+ print(f"Input shapes - Image: {image.shape}, Mask: {mask.shape}")
425
+
426
  start = time.time()
427
  with torch.no_grad():
428
  inpainted_image = model(image, mask)
 
448
  original_shape = image.shape
449
  interpolation = cv2.INTER_CUBIC
450
 
 
 
451
  size_limit = max(image.shape)
 
 
452
 
453
  print(f"Origin image shape: {original_shape}")
454
  image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
455
  print(f"Resized image shape: {image.shape}")
456
+
457
+ # Get alpha channel from mask
458
+ if mask.ndim == 3 and mask.shape[2] == 4:
459
+ mask = 255-mask[:,:,3]
460
+ elif mask.ndim == 3 and mask.shape[2] == 1:
461
+ mask = mask[:,:,0]
462
+ elif mask.ndim == 2:
463
+ mask = mask
464
+ else:
465
+ raise ValueError(f"Unexpected mask shape: {mask.shape}")
466
+
467
+ # Ensure mask has the same dimensions as the image
468
+ if mask.shape[:2] != image.shape[:2]:
469
+ mask = cv2.resize(mask, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST)
470
+
471
  image = norm_img(image)
 
 
 
472
  mask = norm_img(mask)
473
+
474
+ print(f"Normalized shapes - Image: {image.shape}, Mask: {mask.shape}")
475
+
476
  res_np_img = run(image, mask)
477
 
478
  return cv2.cvtColor(res_np_img, cv2.COLOR_BGR2RGB)
templates/index.html CHANGED
@@ -305,13 +305,13 @@
305
 
306
  <section class="demo-section">
307
  <h2>Try It Out</h2>
308
- <p>Visit our interactive demo frontend:</p>
309
- <a href="https://walidadebayo-magic-eraser.netlify.app" class="btn" target="_blank">Open Demo Frontend</a>
310
  <p><small>Demo frontend connects to this API for processing images.</small></p>
311
  </section>
312
 
313
  <footer>
314
- <p><small>© 2023 Magic Eraser API. MIT License.</small></p>
315
  </footer>
316
  </body>
317
  </html>
 
305
 
306
  <section class="demo-section">
307
  <h2>Try It Out</h2>
308
+ <p>Visit InnoAI interactive demo frontend:</p>
309
+ <a href="https://huggingface.co/spaces/innoai/Magic" class="btn" target="_blank">Open Demo Frontend</a>
310
  <p><small>Demo frontend connects to this API for processing images.</small></p>
311
  </section>
312
 
313
  <footer>
314
+ <p><small>© 2025 Magic Eraser API. MIT License.</small></p>
315
  </footer>
316
  </body>
317
  </html>