fitleech commited on
Commit
a239cff
·
verified ·
1 Parent(s): b144898

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. chute_config.yml +20 -0
  2. class_names.txt +3 -0
  3. miner.py +869 -0
  4. weights.onnx +3 -0
chute_config.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image:
2
+ from_base: parachutes/python:3.12
3
+ run_command:
4
+ - pip install --upgrade setuptools wheel
5
+ - pip install 'numpy>=1.23' 'onnxruntime-gpu>=1.16' 'opencv-python>=4.7' 'pillow>=9.5' 'huggingface_hub>=0.19.4' 'pydantic>=2.0' 'pyyaml>=6.0' 'aiohttp>=3.9'
6
+ - pip install torch==2.8.0 torchvision==0.23.0 torchaudio==2.8.0 --index-url https://download.pytorch.org/whl/cu128
7
+
8
+ NodeSelector:
9
+ gpu_count: 1
10
+ min_vram_gb_per_gpu: 16
11
+ include:
12
+ - pro_6000
13
+
14
+ Chute:
15
+ timeout_seconds: 900
16
+ concurrency: 4
17
+ max_instances: 5
18
+ scaling_threshold: 0.5
19
+ shutdown_after_seconds: 288000
20
+ tee: true
class_names.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ bottle
2
+ can
3
+ cup
miner.py ADDED
@@ -0,0 +1,869 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import math
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import onnxruntime as ort
7
+ from numpy import ndarray
8
+ from pydantic import BaseModel
9
+
10
+
11
+ class BoundingBox(BaseModel):
12
+ x1: int
13
+ y1: int
14
+ x2: int
15
+ y2: int
16
+ cls_id: int
17
+ conf: float
18
+
19
+
20
+ class TVFrameResult(BaseModel):
21
+ frame_id: int
22
+ boxes: list[BoundingBox]
23
+ keypoints: list[tuple[int, int]]
24
+
25
+
26
+ class Miner:
27
+ def __init__(self,
28
+ path_hf_repo: Path
29
+ ) -> None:
30
+ model_path = path_hf_repo / "weights.onnx"
31
+ self.class_names = ['cup', 'bottle', 'can']
32
+ self._cls_cup = self.class_names.index("cup")
33
+ self._cls_bottle = self.class_names.index("bottle")
34
+ self._cls_can = self.class_names.index("can")
35
+ model_class_order = ["bottle", "can", "cup"]
36
+ self.cls_remap = np.array(
37
+ [self.class_names.index(n) for n in model_class_order], dtype=np.int32
38
+ )
39
+ print("ORT version:", ort.__version__)
40
+
41
+ try:
42
+ ort.preload_dlls()
43
+ print("✅ onnxruntime.preload_dlls() success")
44
+ except Exception as e:
45
+ print(f"⚠️ preload_dlls failed: {e}")
46
+
47
+ print("ORT available providers BEFORE session:", ort.get_available_providers())
48
+
49
+ sess_options = ort.SessionOptions()
50
+ sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
51
+
52
+ try:
53
+ self.session = ort.InferenceSession(
54
+ str(model_path),
55
+ sess_options=sess_options,
56
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
57
+ )
58
+ print("✅ Created ORT session with preferred CUDA provider list")
59
+ except Exception as e:
60
+ print(f"⚠️ CUDA session creation failed, falling back to CPU: {e}")
61
+ self.session = ort.InferenceSession(
62
+ str(model_path),
63
+ sess_options=sess_options,
64
+ providers=["CPUExecutionProvider"],
65
+ )
66
+
67
+ print("ORT session providers:", self.session.get_providers())
68
+
69
+ for inp in self.session.get_inputs():
70
+ print("INPUT:", inp.name, inp.shape, inp.type)
71
+
72
+ for out in self.session.get_outputs():
73
+ print("OUTPUT:", out.name, out.shape, out.type)
74
+
75
+ self.input_name = self.session.get_inputs()[0].name
76
+ self.output_names = [output.name for output in self.session.get_outputs()]
77
+ self.input_shape = self.session.get_inputs()[0].shape
78
+
79
+ # Your export is fixed-size 1280, but we still read actual ONNX input shape first.
80
+ self.input_height = self._safe_dim(self.input_shape[2], default=1280)
81
+ self.input_width = self._safe_dim(self.input_shape[3], default=1280)
82
+
83
+ # Tuned for validator scoring: reduce FP (FALSE_POSITIVE pillar),
84
+ # preserve recall (MAP50, RECALL), improve precision.
85
+ self.conf_thres = 0.32 # Higher = fewer FP, slightly lower recall
86
+ self.iou_thres = 0.5 # Lower = suppress duplicate detections (FP)
87
+ self.cross_iou_thresh = 0.6
88
+ self.max_det = 150 # Cap detections per image
89
+ self.use_tta = True
90
+
91
+ # Box sanity: filter tiny/spurious detections (common FP source)
92
+ self.min_box_area = 100 # ~144 px²
93
+ self.min_side = 6
94
+ self.max_aspect_ratio = 8.0
95
+
96
+ print(f"✅ ONNX model loaded from: {model_path}")
97
+ print(f"✅ ONNX providers: {self.session.get_providers()}")
98
+ print(f"✅ ONNX input: name={self.input_name}, shape={self.input_shape}")
99
+
100
+ def __repr__(self) -> str:
101
+ return (
102
+ f"ONNXRuntime(session={type(self.session).__name__}, "
103
+ f"providers={self.session.get_providers()})"
104
+ )
105
+
106
+ @staticmethod
107
+ def _safe_dim(value, default: int) -> int:
108
+ return value if isinstance(value, int) and value > 0 else default
109
+
110
+ def _letterbox(
111
+ self,
112
+ image: ndarray,
113
+ new_shape: tuple[int, int],
114
+ color=(114, 114, 114),
115
+ ) -> tuple[ndarray, float, tuple[float, float]]:
116
+ """
117
+ Resize with unchanged aspect ratio and pad to target shape.
118
+ Returns:
119
+ padded_image,
120
+ ratio,
121
+ (pad_w, pad_h) # half-padding
122
+ """
123
+ h, w = image.shape[:2]
124
+ new_w, new_h = new_shape
125
+
126
+ ratio = min(new_w / w, new_h / h)
127
+ resized_w = int(round(w * ratio))
128
+ resized_h = int(round(h * ratio))
129
+
130
+ if (resized_w, resized_h) != (w, h):
131
+ interp = cv2.INTER_CUBIC if ratio > 1.0 else cv2.INTER_LINEAR
132
+ image = cv2.resize(image, (resized_w, resized_h), interpolation=interp)
133
+
134
+ dw = new_w - resized_w
135
+ dh = new_h - resized_h
136
+ dw /= 2.0
137
+ dh /= 2.0
138
+
139
+ left = int(round(dw - 0.1))
140
+ right = int(round(dw + 0.1))
141
+ top = int(round(dh - 0.1))
142
+ bottom = int(round(dh + 0.1))
143
+
144
+ padded = cv2.copyMakeBorder(
145
+ image,
146
+ top,
147
+ bottom,
148
+ left,
149
+ right,
150
+ borderType=cv2.BORDER_CONSTANT,
151
+ value=color,
152
+ )
153
+ return padded, ratio, (dw, dh)
154
+
155
+ def _preprocess(
156
+ self, image: ndarray
157
+ ) -> tuple[np.ndarray, float, tuple[float, float], tuple[int, int]]:
158
+ """
159
+ Preprocess for fixed-size ONNX export:
160
+ - enhance image quality (CLAHE, denoise, sharpen)
161
+ - letterbox to model input size
162
+ - BGR -> RGB
163
+ - normalize to [0,1]
164
+ - HWC -> NCHW float32
165
+ """
166
+ orig_h, orig_w = image.shape[:2]
167
+
168
+ img, ratio, pad = self._letterbox(
169
+ image, (self.input_width, self.input_height)
170
+ )
171
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
172
+ img = img.astype(np.float32) / 255.0
173
+ img = np.transpose(img, (2, 0, 1))[None, ...]
174
+ img = np.ascontiguousarray(img, dtype=np.float32)
175
+
176
+ return img, ratio, pad, (orig_w, orig_h)
177
+
178
+ @staticmethod
179
+ def _clip_boxes(boxes: np.ndarray, image_size: tuple[int, int]) -> np.ndarray:
180
+ w, h = image_size
181
+ boxes[:, 0] = np.clip(boxes[:, 0], 0, w - 1)
182
+ boxes[:, 1] = np.clip(boxes[:, 1], 0, h - 1)
183
+ boxes[:, 2] = np.clip(boxes[:, 2], 0, w - 1)
184
+ boxes[:, 3] = np.clip(boxes[:, 3], 0, h - 1)
185
+ return boxes
186
+
187
+ @staticmethod
188
+ def _xywh_to_xyxy(boxes: np.ndarray) -> np.ndarray:
189
+ out = np.empty_like(boxes)
190
+ out[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
191
+ out[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
192
+ out[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
193
+ out[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
194
+ return out
195
+
196
+ def _soft_nms(
197
+ self,
198
+ boxes: np.ndarray,
199
+ scores: np.ndarray,
200
+ sigma: float = 0.5,
201
+ score_thresh: float = 0.01,
202
+ ) -> tuple[np.ndarray, np.ndarray]:
203
+ """
204
+ Soft-NMS: Gaussian decay of overlapping scores instead of hard removal.
205
+ Processing order prefers **larger** boxes first (then score), so duplicate
206
+ detections on one object tend to keep the larger box.
207
+ Returns (kept_original_indices, updated_scores).
208
+ """
209
+ N = len(boxes)
210
+ if N == 0:
211
+ return np.array([], dtype=np.intp), np.array([], dtype=np.float32)
212
+
213
+ boxes = boxes.astype(np.float32, copy=True)
214
+ scores = scores.astype(np.float32, copy=True)
215
+ areas = (
216
+ np.maximum(0.0, boxes[:, 2] - boxes[:, 0])
217
+ * np.maximum(0.0, boxes[:, 3] - boxes[:, 1])
218
+ ).astype(np.float32)
219
+ order = np.arange(N)
220
+
221
+ for i in range(N):
222
+ max_pos = i + int(np.lexsort((-scores[i:], -areas[i:]))[-1])
223
+ boxes[[i, max_pos]] = boxes[[max_pos, i]]
224
+ scores[[i, max_pos]] = scores[[max_pos, i]]
225
+ order[[i, max_pos]] = order[[max_pos, i]]
226
+ areas[[i, max_pos]] = areas[[max_pos, i]]
227
+
228
+ if i + 1 >= N:
229
+ break
230
+
231
+ xx1 = np.maximum(boxes[i, 0], boxes[i + 1:, 0])
232
+ yy1 = np.maximum(boxes[i, 1], boxes[i + 1:, 1])
233
+ xx2 = np.minimum(boxes[i, 2], boxes[i + 1:, 2])
234
+ yy2 = np.minimum(boxes[i, 3], boxes[i + 1:, 3])
235
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
236
+
237
+ area_i = max(0.0, float(
238
+ (boxes[i, 2] - boxes[i, 0]) * (boxes[i, 3] - boxes[i, 1])
239
+ ))
240
+ areas_j = (
241
+ np.maximum(0.0, boxes[i + 1:, 2] - boxes[i + 1:, 0])
242
+ * np.maximum(0.0, boxes[i + 1:, 3] - boxes[i + 1:, 1])
243
+ )
244
+ iou = inter / (area_i + areas_j - inter + 1e-7)
245
+ scores[i + 1:] *= np.exp(-(iou ** 2) / sigma)
246
+
247
+ mask = scores > score_thresh
248
+ return order[mask], scores[mask]
249
+
250
+ @staticmethod
251
+ def _hard_nms(
252
+ boxes: np.ndarray,
253
+ scores: np.ndarray,
254
+ iou_thresh: float,
255
+ ) -> np.ndarray:
256
+ """
257
+ Hard NMS: keep one box per overlapping cluster.
258
+ When two boxes cover the same object, keep the **larger** box (area),
259
+ breaking ties with higher score.
260
+ """
261
+ N = len(boxes)
262
+ if N == 0:
263
+ return np.array([], dtype=np.intp)
264
+ boxes = np.asarray(boxes, dtype=np.float32)
265
+ scores = np.asarray(scores, dtype=np.float32)
266
+ areas = np.maximum(0.0, boxes[:, 2] - boxes[:, 0]) * np.maximum(
267
+ 0.0, boxes[:, 3] - boxes[:, 1]
268
+ )
269
+ order = np.lexsort((-scores, -areas))
270
+ keep: list[int] = []
271
+ suppressed = np.zeros(N, dtype=bool)
272
+ for i in range(N):
273
+ idx = order[i]
274
+ if suppressed[idx]:
275
+ continue
276
+ keep.append(idx)
277
+ bi = boxes[idx]
278
+ for k in range(i + 1, N):
279
+ jdx = order[k]
280
+ if suppressed[jdx]:
281
+ continue
282
+ bj = boxes[jdx]
283
+ xx1 = max(bi[0], bj[0])
284
+ yy1 = max(bi[1], bj[1])
285
+ xx2 = min(bi[2], bj[2])
286
+ yy2 = min(bi[3], bj[3])
287
+ inter = max(0.0, xx2 - xx1) * max(0.0, yy2 - yy1)
288
+ area_i = (bi[2] - bi[0]) * (bi[3] - bi[1])
289
+ area_j = (bj[2] - bj[0]) * (bj[3] - bj[1])
290
+ iou = inter / (area_i + area_j - inter + 1e-7)
291
+ if iou > iou_thresh:
292
+ suppressed[jdx] = True
293
+ return np.array(keep)
294
+
295
+ def _per_class_hard_nms(
296
+ self,
297
+ boxes: np.ndarray,
298
+ scores: np.ndarray,
299
+ cls_ids: np.ndarray,
300
+ iou_thresh: float,
301
+ ) -> np.ndarray:
302
+ """Hard NMS applied independently per class."""
303
+ if len(boxes) == 0:
304
+ return np.array([], dtype=np.intp)
305
+ all_keep: list[int] = []
306
+ for c in np.unique(cls_ids):
307
+ mask = cls_ids == c
308
+ indices = np.where(mask)[0]
309
+ keep = self._hard_nms(boxes[mask], scores[mask], iou_thresh)
310
+ all_keep.extend(indices[keep].tolist())
311
+ all_keep.sort()
312
+ return np.array(all_keep, dtype=np.intp)
313
+
314
+ def _per_class_soft_nms(
315
+ self,
316
+ boxes: np.ndarray,
317
+ scores: np.ndarray,
318
+ cls_ids: np.ndarray,
319
+ sigma: float = 0.5,
320
+ score_thresh: float = 0.01,
321
+ ) -> tuple[np.ndarray, np.ndarray]:
322
+ """Soft NMS applied independently per class."""
323
+ if len(boxes) == 0:
324
+ return np.array([], dtype=np.intp), np.array([], dtype=np.float32)
325
+ all_keep: list[int] = []
326
+ all_scores: list[float] = []
327
+ for c in np.unique(cls_ids):
328
+ mask = cls_ids == c
329
+ indices = np.where(mask)[0]
330
+ keep, updated = self._soft_nms(boxes[mask], scores[mask], sigma, score_thresh)
331
+ for k, s in zip(keep, updated):
332
+ all_keep.append(int(indices[k]))
333
+ all_scores.append(float(s))
334
+ if not all_keep:
335
+ return np.array([], dtype=np.intp), np.array([], dtype=np.float32)
336
+ return np.array(all_keep, dtype=np.intp), np.array(all_scores, dtype=np.float32)
337
+
338
+ @staticmethod
339
+ def _cross_class_dedup(
340
+ boxes: np.ndarray,
341
+ scores: np.ndarray,
342
+ cls_ids: np.ndarray,
343
+ iou_thresh: float,
344
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
345
+ """Suppress high-overlap duplicates regardless of class."""
346
+ n = len(boxes)
347
+ if n <= 1:
348
+ return boxes, scores, cls_ids
349
+
350
+ boxes = np.asarray(boxes, dtype=np.float32)
351
+ scores = np.asarray(scores, dtype=np.float32)
352
+ cls_ids = np.asarray(cls_ids, dtype=np.int32)
353
+
354
+ areas = np.maximum(0.0, boxes[:, 2] - boxes[:, 0]) * np.maximum(
355
+ 0.0, boxes[:, 3] - boxes[:, 1]
356
+ )
357
+ # Match dataset-prep behavior: keep larger boxes first, then higher score.
358
+ order = np.lexsort((-scores, -areas))
359
+ suppressed = np.zeros(n, dtype=bool)
360
+ keep: list[int] = []
361
+
362
+ for i in order:
363
+ if suppressed[i]:
364
+ continue
365
+ keep.append(int(i))
366
+ bi = boxes[i]
367
+ xx1 = np.maximum(bi[0], boxes[:, 0])
368
+ yy1 = np.maximum(bi[1], boxes[:, 1])
369
+ xx2 = np.minimum(bi[2], boxes[:, 2])
370
+ yy2 = np.minimum(bi[3], boxes[:, 3])
371
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
372
+ area_i = max(1e-7, float((bi[2] - bi[0]) * (bi[3] - bi[1])))
373
+ union = area_i + areas - inter + 1e-7
374
+ iou = inter / union
375
+ dup = iou > iou_thresh
376
+ dup[i] = False
377
+ suppressed |= dup
378
+
379
+ keep_idx = np.array(keep, dtype=np.intp)
380
+ return boxes[keep_idx], scores[keep_idx], cls_ids[keep_idx]
381
+
382
+ @staticmethod
383
+ def _iou_xyxy(a: np.ndarray, b: np.ndarray) -> float:
384
+ """Intersection-over-union for two xyxy boxes (float arrays length 4)."""
385
+ ax1, ay1, ax2, ay2 = float(a[0]), float(a[1]), float(a[2]), float(a[3])
386
+ bx1, by1, bx2, by2 = float(b[0]), float(b[1]), float(b[2]), float(b[3])
387
+ ix1 = max(ax1, bx1)
388
+ iy1 = max(ay1, by1)
389
+ ix2 = min(ax2, bx2)
390
+ iy2 = min(ay2, by2)
391
+ iw = max(0.0, ix2 - ix1)
392
+ ih = max(0.0, iy2 - iy1)
393
+ inter = iw * ih
394
+ area_a = max(0.0, ax2 - ax1) * max(0.0, ay2 - ay1)
395
+ area_b = max(0.0, bx2 - bx1) * max(0.0, by2 - by1)
396
+ union = area_a + area_b - inter + 1e-7
397
+ return inter / union
398
+
399
+ def _apply_cross_class_precedence(
400
+ self,
401
+ boxes: np.ndarray,
402
+ scores: np.ndarray,
403
+ cls_ids: np.ndarray,
404
+ iou_thresh: float | None = None,
405
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
406
+ """
407
+ When one object is detected as multiple classes (high IoU overlap):
408
+ - bottle wins over cup and can (drop overlapping cup / can)
409
+ - can wins over cup (drop overlapping cup when no bottle conflict)
410
+ """
411
+ thr = self.cross_iou_thresh if iou_thresh is None else iou_thresh
412
+ if len(boxes) == 0:
413
+ return boxes, scores, cls_ids
414
+
415
+ bottle_boxes = boxes[cls_ids == self._cls_bottle]
416
+ can_boxes = boxes[cls_ids == self._cls_can]
417
+ cup_mask = cls_ids == self._cls_cup
418
+ can_mask = cls_ids == self._cls_can
419
+
420
+ keep_row = np.ones(len(boxes), dtype=bool)
421
+
422
+ # Can loses to bottle
423
+ if len(bottle_boxes) > 0 and can_mask.any():
424
+ for i in np.where(can_mask)[0]:
425
+ bi = boxes[i]
426
+ for bb in bottle_boxes:
427
+ if self._iou_xyxy(bi, bb) >= thr:
428
+ keep_row[i] = False
429
+ break
430
+
431
+ # Cup loses to bottle or can
432
+ if cup_mask.any():
433
+ for i in np.where(cup_mask)[0]:
434
+ if not keep_row[i]:
435
+ continue
436
+ bi = boxes[i]
437
+ if len(bottle_boxes) > 0:
438
+ for bb in bottle_boxes:
439
+ if self._iou_xyxy(bi, bb) >= thr:
440
+ keep_row[i] = False
441
+ break
442
+ if keep_row[i] and len(can_boxes) > 0:
443
+ for cb in can_boxes:
444
+ if self._iou_xyxy(bi, cb) >= thr:
445
+ keep_row[i] = False
446
+ break
447
+
448
+ if keep_row.all():
449
+ return boxes, scores, cls_ids
450
+ k = np.where(keep_row)[0]
451
+ return boxes[k], scores[k], cls_ids[k]
452
+
453
+ def _apply_cross_class_precedence_list(
454
+ self, boxes: list[BoundingBox]
455
+ ) -> list[BoundingBox]:
456
+ """Same precedence as _apply_cross_class_precedence for post-TTA lists."""
457
+ if len(boxes) < 2:
458
+ return boxes
459
+ thr = self.cross_iou_thresh
460
+ bottles = [b for b in boxes if b.cls_id == self._cls_bottle]
461
+ cans = [b for b in boxes if b.cls_id == self._cls_can]
462
+
463
+ def overlaps_any(ba: np.ndarray, others: list[BoundingBox]) -> bool:
464
+ for o in others:
465
+ oa = np.array([o.x1, o.y1, o.x2, o.y2], dtype=np.float32)
466
+ if self._iou_xyxy(ba, oa) >= thr:
467
+ return True
468
+ return False
469
+
470
+ out: list[BoundingBox] = []
471
+ for b in boxes:
472
+ ba = np.array([b.x1, b.y1, b.x2, b.y2], dtype=np.float32)
473
+ if b.cls_id == self._cls_can:
474
+ if bottles and overlaps_any(ba, bottles):
475
+ continue
476
+ elif b.cls_id == self._cls_cup:
477
+ if bottles and overlaps_any(ba, bottles):
478
+ continue
479
+ if cans and overlaps_any(ba, cans):
480
+ continue
481
+ out.append(b)
482
+ return out
483
+
484
+ def _filter_sane_boxes(
485
+ self,
486
+ boxes: np.ndarray,
487
+ scores: np.ndarray,
488
+ cls_ids: np.ndarray,
489
+ orig_size: tuple[int, int],
490
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
491
+ """Filter out tiny, degenerate, or implausible boxes (common FP)."""
492
+ if len(boxes) == 0:
493
+ return boxes, scores, cls_ids
494
+ orig_w, orig_h = orig_size
495
+ image_area = float(orig_w * orig_h)
496
+ keep = []
497
+ for i, box in enumerate(boxes):
498
+ x1, y1, x2, y2 = box.tolist()
499
+ bw = x2 - x1
500
+ bh = y2 - y1
501
+ if bw <= 0 or bh <= 0:
502
+ continue
503
+ if bw < self.min_side or bh < self.min_side:
504
+ continue
505
+ area = bw * bh
506
+ if area < self.min_box_area:
507
+ continue
508
+ if area > 0.95 * image_area:
509
+ continue
510
+ ar = max(bw / max(bh, 1e-6), bh / max(bw, 1e-6))
511
+ if ar > self.max_aspect_ratio:
512
+ continue
513
+ keep.append(i)
514
+ if not keep:
515
+ return (
516
+ np.empty((0, 4), dtype=np.float32),
517
+ np.empty((0,), dtype=np.float32),
518
+ np.empty((0,), dtype=np.int32),
519
+ )
520
+ k = np.array(keep, dtype=np.intp)
521
+ return boxes[k], scores[k], cls_ids[k]
522
+
523
+ @staticmethod
524
+ def _max_score_per_cluster(
525
+ coords: np.ndarray,
526
+ scores: np.ndarray,
527
+ keep_indices: np.ndarray,
528
+ iou_thresh: float,
529
+ ) -> np.ndarray:
530
+ """
531
+ For each kept box, return the max original score among itself and any
532
+ box that overlaps it with IOU >= iou_thresh (so TTA cluster keeps best conf).
533
+ """
534
+ n_keep = len(keep_indices)
535
+ if n_keep == 0:
536
+ return np.array([], dtype=np.float32)
537
+ out = np.empty(n_keep, dtype=np.float32)
538
+ coords = np.asarray(coords, dtype=np.float32)
539
+ scores = np.asarray(scores, dtype=np.float32)
540
+ for i in range(n_keep):
541
+ idx = keep_indices[i]
542
+ bi = coords[idx]
543
+ xx1 = np.maximum(bi[0], coords[:, 0])
544
+ yy1 = np.maximum(bi[1], coords[:, 1])
545
+ xx2 = np.minimum(bi[2], coords[:, 2])
546
+ yy2 = np.minimum(bi[3], coords[:, 3])
547
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
548
+ area_i = (bi[2] - bi[0]) * (bi[3] - bi[1])
549
+ areas_j = (coords[:, 2] - coords[:, 0]) * (coords[:, 3] - coords[:, 1])
550
+ iou = inter / (area_i + areas_j - inter + 1e-7)
551
+ in_cluster = iou >= iou_thresh
552
+ out[i] = float(np.max(scores[in_cluster]))
553
+ return out
554
+
555
+ def _decode_final_dets(
556
+ self,
557
+ preds: np.ndarray,
558
+ ratio: float,
559
+ pad: tuple[float, float],
560
+ orig_size: tuple[int, int],
561
+ apply_optional_dedup: bool = False,
562
+ ) -> list[BoundingBox]:
563
+ """
564
+ Primary path:
565
+ expected output rows like [x1, y1, x2, y2, conf, cls_id]
566
+ in letterboxed input coordinates.
567
+ """
568
+ if preds.ndim == 3 and preds.shape[0] == 1:
569
+ preds = preds[0]
570
+
571
+ if preds.ndim != 2 or preds.shape[1] < 6:
572
+ raise ValueError(f"Unexpected ONNX final-det output shape: {preds.shape}")
573
+
574
+ boxes = preds[:, :4].astype(np.float32)
575
+ scores = preds[:, 4].astype(np.float32)
576
+ cls_ids = preds[:, 5].astype(np.int32)
577
+ cls_ids = self.cls_remap[cls_ids]
578
+
579
+ keep = scores >= self.conf_thres
580
+ boxes = boxes[keep]
581
+ scores = scores[keep]
582
+ cls_ids = cls_ids[keep]
583
+
584
+ if len(boxes) == 0:
585
+ return []
586
+
587
+ pad_w, pad_h = pad
588
+ orig_w, orig_h = orig_size
589
+
590
+ # reverse letterbox
591
+ boxes[:, [0, 2]] -= pad_w
592
+ boxes[:, [1, 3]] -= pad_h
593
+ boxes /= ratio
594
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
595
+
596
+ # Box sanity filter (reduces FP)
597
+ boxes, scores, cls_ids = self._filter_sane_boxes(
598
+ boxes, scores, cls_ids, orig_size
599
+ )
600
+ if len(boxes) == 0:
601
+ return []
602
+
603
+ # Per-class NMS to remove duplicates without suppressing across classes
604
+ if len(boxes) > 1:
605
+ if apply_optional_dedup:
606
+ keep_idx, scores = self._per_class_soft_nms(boxes, scores, cls_ids)
607
+ boxes = boxes[keep_idx]
608
+ cls_ids = cls_ids[keep_idx]
609
+ else:
610
+ keep_idx = self._per_class_hard_nms(boxes, scores, cls_ids, self.iou_thres)
611
+ keep_idx = keep_idx[: self.max_det]
612
+ boxes = boxes[keep_idx]
613
+ scores = scores[keep_idx]
614
+ cls_ids = cls_ids[keep_idx]
615
+ boxes, scores, cls_ids = self._cross_class_dedup(
616
+ boxes, scores, cls_ids, self.cross_iou_thresh
617
+ )
618
+
619
+ if len(boxes) > 0:
620
+ boxes, scores, cls_ids = self._apply_cross_class_precedence(
621
+ boxes, scores, cls_ids
622
+ )
623
+
624
+ results: list[BoundingBox] = []
625
+ for box, conf, cls_id in zip(boxes, scores, cls_ids):
626
+ x1, y1, x2, y2 = box.tolist()
627
+
628
+ if x2 <= x1 or y2 <= y1:
629
+ continue
630
+
631
+ results.append(
632
+ BoundingBox(
633
+ x1=int(math.floor(x1)),
634
+ y1=int(math.floor(y1)),
635
+ x2=int(math.ceil(x2)),
636
+ y2=int(math.ceil(y2)),
637
+ cls_id=int(cls_id),
638
+ conf=float(conf),
639
+ )
640
+ )
641
+
642
+ return results
643
+
644
+ def _decode_raw_yolo(
645
+ self,
646
+ preds: np.ndarray,
647
+ ratio: float,
648
+ pad: tuple[float, float],
649
+ orig_size: tuple[int, int],
650
+ ) -> list[BoundingBox]:
651
+ """
652
+ Fallback path for raw YOLO predictions.
653
+ Supports common layouts:
654
+ - [1, C, N]
655
+ - [1, N, C]
656
+ """
657
+ if preds.ndim != 3:
658
+ raise ValueError(f"Unexpected raw ONNX output shape: {preds.shape}")
659
+
660
+ if preds.shape[0] != 1:
661
+ raise ValueError(f"Unexpected batch dimension in raw output: {preds.shape}")
662
+
663
+ preds = preds[0]
664
+
665
+ # Normalize to [N, C]
666
+ if preds.shape[0] <= 16 and preds.shape[1] > preds.shape[0]:
667
+ preds = preds.T
668
+
669
+ if preds.ndim != 2 or preds.shape[1] < 5:
670
+ raise ValueError(f"Unexpected normalized raw output shape: {preds.shape}")
671
+
672
+ boxes_xywh = preds[:, :4].astype(np.float32)
673
+ cls_part = preds[:, 4:].astype(np.float32)
674
+
675
+ if cls_part.shape[1] == 1:
676
+ scores = cls_part[:, 0]
677
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
678
+ else:
679
+ cls_ids = np.argmax(cls_part, axis=1).astype(np.int32)
680
+ scores = cls_part[np.arange(len(cls_part)), cls_ids]
681
+ cls_ids = self.cls_remap[cls_ids]
682
+
683
+ keep = scores >= self.conf_thres
684
+ boxes_xywh = boxes_xywh[keep]
685
+ scores = scores[keep]
686
+ cls_ids = cls_ids[keep]
687
+
688
+ if len(boxes_xywh) == 0:
689
+ return []
690
+
691
+ boxes = self._xywh_to_xyxy(boxes_xywh)
692
+
693
+ keep_idx = self._per_class_hard_nms(boxes, scores, cls_ids, self.iou_thres)
694
+ keep_idx = keep_idx[: self.max_det]
695
+ boxes = boxes[keep_idx]
696
+ scores = scores[keep_idx]
697
+ cls_ids = cls_ids[keep_idx]
698
+ boxes, scores, cls_ids = self._cross_class_dedup(
699
+ boxes, scores, cls_ids, self.cross_iou_thresh
700
+ )
701
+ boxes, scores, cls_ids = self._apply_cross_class_precedence(
702
+ boxes, scores, cls_ids
703
+ )
704
+
705
+ pad_w, pad_h = pad
706
+ orig_w, orig_h = orig_size
707
+
708
+ boxes[:, [0, 2]] -= pad_w
709
+ boxes[:, [1, 3]] -= pad_h
710
+ boxes /= ratio
711
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
712
+
713
+ boxes, scores, cls_ids = self._filter_sane_boxes(
714
+ boxes, scores, cls_ids, (orig_w, orig_h)
715
+ )
716
+ if len(boxes) == 0:
717
+ return []
718
+
719
+ results: list[BoundingBox] = []
720
+ for box, conf, cls_id in zip(boxes, scores, cls_ids):
721
+ x1, y1, x2, y2 = box.tolist()
722
+
723
+ if x2 <= x1 or y2 <= y1:
724
+ continue
725
+
726
+ results.append(
727
+ BoundingBox(
728
+ x1=int(math.floor(x1)),
729
+ y1=int(math.floor(y1)),
730
+ x2=int(math.ceil(x2)),
731
+ y2=int(math.ceil(y2)),
732
+ cls_id=int(cls_id),
733
+ conf=float(conf),
734
+ )
735
+ )
736
+
737
+ return results
738
+
739
+ def _postprocess(
740
+ self,
741
+ output: np.ndarray,
742
+ ratio: float,
743
+ pad: tuple[float, float],
744
+ orig_size: tuple[int, int],
745
+ ) -> list[BoundingBox]:
746
+ """
747
+ Prefer final detections first.
748
+ Fallback to raw decode only if needed.
749
+ """
750
+ # final detections: [N,6]
751
+ if output.ndim == 2 and output.shape[1] >= 6:
752
+ return self._decode_final_dets(output, ratio, pad, orig_size)
753
+
754
+ # final detections: [1,N,6]
755
+ if output.ndim == 3 and output.shape[0] == 1 and output.shape[2] == 6:
756
+ return self._decode_final_dets(output, ratio, pad, orig_size)
757
+
758
+ # fallback raw decode
759
+ return self._decode_raw_yolo(output, ratio, pad, orig_size)
760
+
761
+ def _predict_single(self, image: np.ndarray) -> list[BoundingBox]:
762
+ if image is None:
763
+ raise ValueError("Input image is None")
764
+ if not isinstance(image, np.ndarray):
765
+ raise TypeError(f"Input is not numpy array: {type(image)}")
766
+ if image.ndim != 3:
767
+ raise ValueError(f"Expected HWC image, got shape={image.shape}")
768
+ if image.shape[0] <= 0 or image.shape[1] <= 0:
769
+ raise ValueError(f"Invalid image shape={image.shape}")
770
+ if image.shape[2] != 3:
771
+ raise ValueError(f"Expected 3 channels, got shape={image.shape}")
772
+
773
+ if image.dtype != np.uint8:
774
+ image = image.astype(np.uint8)
775
+
776
+ input_tensor, ratio, pad, orig_size = self._preprocess(image)
777
+
778
+ expected_shape = (1, 3, self.input_height, self.input_width)
779
+ if input_tensor.shape != expected_shape:
780
+ raise ValueError(
781
+ f"Bad input tensor shape={input_tensor.shape}, expected={expected_shape}"
782
+ )
783
+
784
+ outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
785
+ det_output = outputs[0]
786
+ return self._postprocess(det_output, ratio, pad, orig_size)
787
+
788
+ def _predict_tta(self, image: np.ndarray) -> list[BoundingBox]:
789
+ """
790
+ Horizontal-flip TTA: merge original + flipped via hard NMS.
791
+ Boost confidence for consensus detections (both views agree) to improve
792
+ mAP: validator sorts by confidence, so higher conf for TP helps PR curve.
793
+ """
794
+ boxes_orig = self._predict_single(image)
795
+
796
+ flipped = cv2.flip(image, 1)
797
+ boxes_flip = self._predict_single(flipped)
798
+
799
+ w = image.shape[1]
800
+ boxes_flip = [
801
+ BoundingBox(
802
+ x1=w - b.x2, y1=b.y1, x2=w - b.x1, y2=b.y2,
803
+ cls_id=b.cls_id, conf=b.conf,
804
+ )
805
+ for b in boxes_flip
806
+ ]
807
+
808
+ all_boxes = boxes_orig + boxes_flip
809
+ if len(all_boxes) == 0:
810
+ return []
811
+
812
+ coords = np.array(
813
+ [[b.x1, b.y1, b.x2, b.y2] for b in all_boxes], dtype=np.float32
814
+ )
815
+ scores = np.array([b.conf for b in all_boxes], dtype=np.float32)
816
+ cls_ids = np.array([b.cls_id for b in all_boxes], dtype=np.int32)
817
+
818
+ hard_keep = self._per_class_hard_nms(coords, scores, cls_ids, self.iou_thres)
819
+ if len(hard_keep) == 0:
820
+ return []
821
+
822
+ hard_keep = hard_keep[: self.max_det]
823
+
824
+ # Boost confidence when both views agree (overlapping detections)
825
+ boosted = self._max_score_per_cluster(
826
+ coords, scores, hard_keep, self.iou_thres
827
+ )
828
+
829
+ return self._apply_cross_class_precedence_list(
830
+ [
831
+ BoundingBox(
832
+ x1=all_boxes[i].x1,
833
+ y1=all_boxes[i].y1,
834
+ x2=all_boxes[i].x2,
835
+ y2=all_boxes[i].y2,
836
+ cls_id=all_boxes[i].cls_id,
837
+ conf=float(boosted[j]),
838
+ )
839
+ for j, i in enumerate(hard_keep)
840
+ ]
841
+ )
842
+
843
+ def predict_batch(
844
+ self,
845
+ batch_images: list[ndarray],
846
+ offset: int,
847
+ n_keypoints: int,
848
+ ) -> list[TVFrameResult]:
849
+ results: list[TVFrameResult] = []
850
+
851
+ for frame_number_in_batch, image in enumerate(batch_images):
852
+ try:
853
+ if self.use_tta:
854
+ boxes = self._predict_tta(image)
855
+ else:
856
+ boxes = self._predict_single(image)
857
+ except Exception as e:
858
+ print(f"⚠️ Inference failed for frame {offset + frame_number_in_batch}: {e}")
859
+ boxes = []
860
+
861
+ results.append(
862
+ TVFrameResult(
863
+ frame_id=offset + frame_number_in_batch,
864
+ boxes=boxes,
865
+ keypoints=[(0, 0) for _ in range(max(0, int(n_keypoints)))],
866
+ )
867
+ )
868
+
869
+ return results
weights.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c0831618841953e228d3e19a3ce1726a7ea2a8650c0bda1600eea1c4ea94da8
3
+ size 19407230