ivanm151 commited on
Commit
2760b2b
·
1 Parent(s): fbb0759

mobilesam v1.5 (test new mask)

Browse files
Files changed (2) hide show
  1. app.py +4 -4
  2. utils.py +14 -4
app.py CHANGED
@@ -6,7 +6,7 @@ import io
6
  import base64
7
  from models import load_sam, load_model2, load_model3
8
  from utils import (
9
- crop_fruit_with_white_bg,
10
  preprocess_for_classifier,
11
  FRUIT_CLASSES,
12
  FRESHNESS_CLASSES
@@ -71,7 +71,7 @@ async def predict_full(
71
  }
72
 
73
  # Обрезка под 100×100 для сорта
74
- cropped_100 = crop_fruit_with_white_bg(orig_np, mask, out_size=100)
75
  input_tensor2 = preprocess_for_classifier(cropped_100).unsqueeze(0).to(DEVICE)
76
  with torch.no_grad():
77
  logits2 = model2(input_tensor2)
@@ -93,7 +93,7 @@ async def predict_full(
93
 
94
  # Свежесть, если подходит
95
  if fruit_name in FRESHNESS_ELIGIBLE:
96
- cropped_224 = crop_fruit_with_white_bg(orig_np, mask, out_size=224)
97
  input_tensor3 = preprocess_for_classifier(cropped_224).unsqueeze(0).to(DEVICE)
98
  with torch.no_grad():
99
  logits3 = model3(input_tensor3)
@@ -108,7 +108,7 @@ async def predict_full(
108
 
109
  # Возвращаем обрезанное изображение
110
  if return_cropped:
111
- cropped_final = crop_fruit_with_white_bg(orig_np, mask, out_size=cropped_size)
112
  pil_img = Image.fromarray(cropped_final)
113
  buffered = io.BytesIO()
114
  pil_img.save(buffered, format="PNG")
 
6
  import base64
7
  from models import load_sam, load_model2, load_model3
8
  from utils import (
9
+ crop_fruit_contour_letterbox,
10
  preprocess_for_classifier,
11
  FRUIT_CLASSES,
12
  FRESHNESS_CLASSES
 
71
  }
72
 
73
  # Обрезка под 100×100 для сорта
74
+ cropped_100 = crop_fruit_contour_letterbox(orig_np, mask, out_size=100)
75
  input_tensor2 = preprocess_for_classifier(cropped_100).unsqueeze(0).to(DEVICE)
76
  with torch.no_grad():
77
  logits2 = model2(input_tensor2)
 
93
 
94
  # Свежесть, если подходит
95
  if fruit_name in FRESHNESS_ELIGIBLE:
96
+ cropped_224 = crop_fruit_contour_letterbox(orig_np, mask, out_size=224)
97
  input_tensor3 = preprocess_for_classifier(cropped_224).unsqueeze(0).to(DEVICE)
98
  with torch.no_grad():
99
  logits3 = model3(input_tensor3)
 
108
 
109
  # Возвращаем обрезанное изображение
110
  if return_cropped:
111
+ cropped_final = crop_fruit_contour_letterbox(orig_np, mask, out_size=cropped_size)
112
  pil_img = Image.fromarray(cropped_final)
113
  buffered = io.BytesIO()
114
  pil_img.save(buffered, format="PNG")
utils.py CHANGED
@@ -39,13 +39,13 @@ def letterbox_any_size(
39
  cv2.BORDER_CONSTANT, value=bg_color)
40
  return padded
41
 
42
- def crop_fruit_with_white_bg(
43
  orig_img: np.ndarray,
44
  mask: np.ndarray,
45
  out_size: int = 224,
46
  bg_color: tuple = (255, 255, 255)
47
  ) -> np.ndarray:
48
- mask_bin = mask.astype(np.uint8)
49
 
50
  ys, xs = np.where(mask_bin == 1)
51
  if len(xs) == 0:
@@ -54,7 +54,17 @@ def crop_fruit_with_white_bg(
54
  y1, y2 = ys.min(), ys.max()
55
  x1, x2 = xs.min(), xs.max()
56
 
57
- cropped = orig_img[y1:y2+1, x1:x2+1].copy()
 
 
 
 
 
 
 
 
 
 
 
58
 
59
- final = letterbox_any_size(cropped, target_size=out_size, bg_color=bg_color)
60
  return final
 
39
  cv2.BORDER_CONSTANT, value=bg_color)
40
  return padded
41
 
42
+ def crop_fruit_contour_letterbox(
43
  orig_img: np.ndarray,
44
  mask: np.ndarray,
45
  out_size: int = 224,
46
  bg_color: tuple = (255, 255, 255)
47
  ) -> np.ndarray:
48
+ mask_bin = (mask > 0.5).astype(np.uint8)
49
 
50
  ys, xs = np.where(mask_bin == 1)
51
  if len(xs) == 0:
 
54
  y1, y2 = ys.min(), ys.max()
55
  x1, x2 = xs.min(), xs.max()
56
 
57
+ cropped_rgb = orig_img[y1:y2+1, x1:x2+1].copy()
58
+ cropped_mask = mask_bin[y1:y2+1, x1:x2+1]
59
+
60
+ # Сначала letterbox
61
+ letterboxed = letterbox_any_size(cropped_rgb, target_size=out_size, bg_color=bg_color)
62
+
63
+ # Масштабируем маску под letterbox (это сложнее, но можно приблизить)
64
+ # Для простоты — применяем маску на cropped перед letterbox
65
+ white_bg = np.full_like(cropped_rgb, bg_color)
66
+ masked_cropped = np.where(cropped_mask[..., None] == 1, cropped_rgb, white_bg)
67
+
68
+ final = letterbox_any_size(masked_cropped, target_size=out_size, bg_color=bg_color)
69
 
 
70
  return final