fitleech commited on
Commit
d7df1ae
·
verified ·
1 Parent(s): bc973f8

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. chute_config.yml +23 -0
  2. miner.py +599 -0
  3. weights.onnx +3 -0
chute_config.yml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image:
2
+ from_base: parachutes/python:3.12
3
+ run_command:
4
+ - pip install --upgrade setuptools wheel
5
+ - pip install 'numpy>=1.23' 'onnxruntime-gpu[cuda,cudnn]>=1.16' 'opencv-python>=4.7' 'pillow>=9.5' 'huggingface_hub>=0.19.4' 'pydantic>=2.0' 'pyyaml>=6.0' 'aiohttp>=3.9'
6
+ - pip install torch torchvision
7
+
8
+ NodeSelector:
9
+ gpu_count: 1
10
+ min_vram_gb_per_gpu: 16
11
+ max_hourly_price_per_gpu: 0.8
12
+ exclude:
13
+ - b200
14
+ - h200
15
+ - h20
16
+ - mi300x
17
+
18
+ Chute:
19
+ timeout_seconds: 900
20
+ concurrency: 4
21
+ max_instances: 5
22
+ scaling_threshold: 0.5
23
+ shutdown_after_seconds: 288000
miner.py ADDED
@@ -0,0 +1,599 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import math
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import onnxruntime as ort
7
+ from numpy import ndarray
8
+ from pydantic import BaseModel
9
+
10
+
11
+ class BoundingBox(BaseModel):
12
+ x1: int
13
+ y1: int
14
+ x2: int
15
+ y2: int
16
+ cls_id: int
17
+ conf: float
18
+
19
+
20
+ class TVFrameResult(BaseModel):
21
+ frame_id: int
22
+ boxes: list[BoundingBox]
23
+ keypoints: list[tuple[int, int]]
24
+
25
+
26
+ class Miner:
27
+ def __init__(self,
28
+ path_hf_repo: Path
29
+ ) -> None:
30
+ model_path = path_hf_repo / "weights.onnx"
31
+ self.class_names = ["numberplate"]
32
+ print("ORT version:", ort.__version__)
33
+
34
+ try:
35
+ ort.preload_dlls()
36
+ print("✅ onnxruntime.preload_dlls() success")
37
+ except Exception as e:
38
+ print(f"⚠️ preload_dlls failed: {e}")
39
+
40
+ print("ORT available providers BEFORE session:", ort.get_available_providers())
41
+
42
+ sess_options = ort.SessionOptions()
43
+ sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
44
+
45
+ try:
46
+ self.session = ort.InferenceSession(
47
+ str(model_path),
48
+ sess_options=sess_options,
49
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
50
+ )
51
+ print("✅ Created ORT session with preferred CUDA provider list")
52
+ except Exception as e:
53
+ print(f"⚠️ CUDA session creation failed, falling back to CPU: {e}")
54
+ self.session = ort.InferenceSession(
55
+ str(model_path),
56
+ sess_options=sess_options,
57
+ providers=["CPUExecutionProvider"],
58
+ )
59
+
60
+ print("ORT session providers:", self.session.get_providers())
61
+
62
+ for inp in self.session.get_inputs():
63
+ print("INPUT:", inp.name, inp.shape, inp.type)
64
+
65
+ for out in self.session.get_outputs():
66
+ print("OUTPUT:", out.name, out.shape, out.type)
67
+
68
+ self.input_name = self.session.get_inputs()[0].name
69
+ self.output_names = [output.name for output in self.session.get_outputs()]
70
+ self.input_shape = self.session.get_inputs()[0].shape
71
+
72
+ self.input_height = self._safe_dim(self.input_shape[2], default=1280)
73
+ self.input_width = self._safe_dim(self.input_shape[3], default=1280)
74
+
75
+ # ---------- Scoring-oriented thresholds (single-class number plates) ----------
76
+ # Aligned with miner_script/002/miner.py so non-sweep predictions match 002.
77
+ self.conf_thres = 0.082
78
+
79
+ # Below this on orig view, a box needs a flip-view IoU match (TTA) to survive.
80
+ self.conf_high = 0.66
81
+
82
+ self.iou_thres = 0.62
83
+
84
+ # Plates are small; allow a bit of shift between orig / flipped view.
85
+ self.tta_match_iou = 0.62
86
+
87
+ self.max_det = 26
88
+ self.use_tta = True
89
+
90
+ # Box sanity filters (plates can be small and very wide)
91
+ self.min_box_area = 13
92
+ self.min_w = 5
93
+ self.min_h = 3
94
+ self.max_aspect_ratio = 8.5
95
+ self.max_box_area_ratio = 0.8
96
+
97
+ print(f"✅ ONNX model loaded from: {model_path}")
98
+ print(f"✅ ONNX providers: {self.session.get_providers()}")
99
+ print(f"✅ ONNX input: name={self.input_name}, shape={self.input_shape}")
100
+
101
+ def __repr__(self) -> str:
102
+ return (
103
+ f"ONNXRuntime(session={type(self.session).__name__}, "
104
+ f"providers={self.session.get_providers()})"
105
+ )
106
+
107
+ @staticmethod
108
+ def _safe_dim(value, default: int) -> int:
109
+ return value if isinstance(value, int) and value > 0 else default
110
+
111
+ def _letterbox(
112
+ self,
113
+ image: ndarray,
114
+ new_shape: tuple[int, int],
115
+ color=(114, 114, 114),
116
+ ) -> tuple[ndarray, float, tuple[float, float]]:
117
+ h, w = image.shape[:2]
118
+ new_w, new_h = new_shape
119
+
120
+ ratio = min(new_w / w, new_h / h)
121
+ resized_w = int(round(w * ratio))
122
+ resized_h = int(round(h * ratio))
123
+
124
+ if (resized_w, resized_h) != (w, h):
125
+ interp = cv2.INTER_CUBIC if ratio > 1.0 else cv2.INTER_LINEAR
126
+ image = cv2.resize(image, (resized_w, resized_h), interpolation=interp)
127
+
128
+ dw = new_w - resized_w
129
+ dh = new_h - resized_h
130
+ dw /= 2.0
131
+ dh /= 2.0
132
+
133
+ left = int(round(dw - 0.1))
134
+ right = int(round(dw + 0.1))
135
+ top = int(round(dh - 0.1))
136
+ bottom = int(round(dh + 0.1))
137
+
138
+ padded = cv2.copyMakeBorder(
139
+ image,
140
+ top,
141
+ bottom,
142
+ left,
143
+ right,
144
+ borderType=cv2.BORDER_CONSTANT,
145
+ value=color,
146
+ )
147
+ return padded, ratio, (dw, dh)
148
+
149
+ def _preprocess(
150
+ self, image: ndarray
151
+ ) -> tuple[np.ndarray, float, tuple[float, float], tuple[int, int]]:
152
+ orig_h, orig_w = image.shape[:2]
153
+
154
+ img, ratio, pad = self._letterbox(
155
+ image, (self.input_width, self.input_height)
156
+ )
157
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
158
+ img = img.astype(np.float32) / 255.0
159
+ img = np.transpose(img, (2, 0, 1))[None, ...]
160
+ img = np.ascontiguousarray(img, dtype=np.float32)
161
+
162
+ return img, ratio, pad, (orig_w, orig_h)
163
+
164
+ @staticmethod
165
+ def _clip_boxes(boxes: np.ndarray, image_size: tuple[int, int]) -> np.ndarray:
166
+ w, h = image_size
167
+ boxes[:, 0] = np.clip(boxes[:, 0], 0, w - 1)
168
+ boxes[:, 1] = np.clip(boxes[:, 1], 0, h - 1)
169
+ boxes[:, 2] = np.clip(boxes[:, 2], 0, w - 1)
170
+ boxes[:, 3] = np.clip(boxes[:, 3], 0, h - 1)
171
+ return boxes
172
+
173
+ @staticmethod
174
+ def _xywh_to_xyxy(boxes: np.ndarray) -> np.ndarray:
175
+ out = np.empty_like(boxes)
176
+ out[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
177
+ out[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
178
+ out[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
179
+ out[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
180
+ return out
181
+
182
+ @staticmethod
183
+ def _hard_nms(
184
+ boxes: np.ndarray,
185
+ scores: np.ndarray,
186
+ iou_thresh: float,
187
+ ) -> np.ndarray:
188
+ if len(boxes) == 0:
189
+ return np.array([], dtype=np.intp)
190
+
191
+ boxes = np.asarray(boxes, dtype=np.float32)
192
+ scores = np.asarray(scores, dtype=np.float32)
193
+ order = np.argsort(scores)[::-1]
194
+ keep = []
195
+
196
+ while len(order) > 0:
197
+ i = order[0]
198
+ keep.append(i)
199
+ if len(order) == 1:
200
+ break
201
+
202
+ rest = order[1:]
203
+
204
+ xx1 = np.maximum(boxes[i, 0], boxes[rest, 0])
205
+ yy1 = np.maximum(boxes[i, 1], boxes[rest, 1])
206
+ xx2 = np.minimum(boxes[i, 2], boxes[rest, 2])
207
+ yy2 = np.minimum(boxes[i, 3], boxes[rest, 3])
208
+
209
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
210
+
211
+ area_i = np.maximum(0.0, (boxes[i, 2] - boxes[i, 0])) * np.maximum(0.0, (boxes[i, 3] - boxes[i, 1]))
212
+ area_r = np.maximum(0.0, (boxes[rest, 2] - boxes[rest, 0])) * np.maximum(0.0, (boxes[rest, 3] - boxes[rest, 1]))
213
+
214
+ iou = inter / (area_i + area_r - inter + 1e-7)
215
+ order = rest[iou <= iou_thresh]
216
+
217
+ return np.array(keep, dtype=np.intp)
218
+
219
+ @staticmethod
220
+ def _box_iou_one_to_many(box: np.ndarray, boxes: np.ndarray) -> np.ndarray:
221
+ xx1 = np.maximum(box[0], boxes[:, 0])
222
+ yy1 = np.maximum(box[1], boxes[:, 1])
223
+ xx2 = np.minimum(box[2], boxes[:, 2])
224
+ yy2 = np.minimum(box[3], boxes[:, 3])
225
+
226
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
227
+
228
+ area_a = max(0.0, (box[2] - box[0]) * (box[3] - box[1]))
229
+ area_b = np.maximum(0.0, boxes[:, 2] - boxes[:, 0]) * np.maximum(0.0, boxes[:, 3] - boxes[:, 1])
230
+
231
+ return inter / (area_a + area_b - inter + 1e-7)
232
+
233
+ def _filter_sane_boxes(
234
+ self,
235
+ boxes: np.ndarray,
236
+ scores: np.ndarray,
237
+ cls_ids: np.ndarray,
238
+ orig_size: tuple[int, int],
239
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
240
+ if len(boxes) == 0:
241
+ return boxes, scores, cls_ids
242
+
243
+ orig_w, orig_h = orig_size
244
+ image_area = float(orig_w * orig_h)
245
+
246
+ keep = []
247
+ for i, box in enumerate(boxes):
248
+ x1, y1, x2, y2 = box.tolist()
249
+ bw = x2 - x1
250
+ bh = y2 - y1
251
+
252
+ if bw <= 0 or bh <= 0:
253
+ continue
254
+ if bw < self.min_w or bh < self.min_h:
255
+ continue
256
+
257
+ area = bw * bh
258
+ if area < self.min_box_area:
259
+ continue
260
+ if area > self.max_box_area_ratio * image_area:
261
+ continue
262
+
263
+ ar = max(bw / max(bh, 1e-6), bh / max(bw, 1e-6))
264
+ if ar > self.max_aspect_ratio:
265
+ continue
266
+
267
+ keep.append(i)
268
+
269
+ if not keep:
270
+ return (
271
+ np.empty((0, 4), dtype=np.float32),
272
+ np.empty((0,), dtype=np.float32),
273
+ np.empty((0,), dtype=np.int32),
274
+ )
275
+
276
+ keep = np.array(keep, dtype=np.intp)
277
+ return boxes[keep], scores[keep], cls_ids[keep]
278
+
279
+ def _decode_final_dets(
280
+ self,
281
+ preds: np.ndarray,
282
+ ratio: float,
283
+ pad: tuple[float, float],
284
+ orig_size: tuple[int, int],
285
+ ) -> list[BoundingBox]:
286
+ if preds.ndim == 3 and preds.shape[0] == 1:
287
+ preds = preds[0]
288
+
289
+ if preds.ndim != 2 or preds.shape[1] < 6:
290
+ raise ValueError(f"Unexpected ONNX final-det output shape: {preds.shape}")
291
+
292
+ boxes = preds[:, :4].astype(np.float32)
293
+ scores = preds[:, 4].astype(np.float32)
294
+ cls_ids = preds[:, 5].astype(np.int32)
295
+
296
+ # person only
297
+ keep = cls_ids == 0
298
+ boxes = boxes[keep]
299
+ scores = scores[keep]
300
+ cls_ids = cls_ids[keep]
301
+
302
+ # candidate threshold
303
+ keep = scores >= self.conf_thres
304
+ boxes = boxes[keep]
305
+ scores = scores[keep]
306
+ cls_ids = cls_ids[keep]
307
+
308
+ if len(boxes) == 0:
309
+ return []
310
+
311
+ pad_w, pad_h = pad
312
+ orig_w, orig_h = orig_size
313
+
314
+ boxes[:, [0, 2]] -= pad_w
315
+ boxes[:, [1, 3]] -= pad_h
316
+ boxes /= ratio
317
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
318
+
319
+ boxes, scores, cls_ids = self._filter_sane_boxes(boxes, scores, cls_ids, orig_size)
320
+ if len(boxes) == 0:
321
+ return []
322
+
323
+ keep_idx = self._hard_nms(boxes, scores, self.iou_thres)
324
+ keep_idx = keep_idx[: self.max_det]
325
+
326
+ boxes = boxes[keep_idx]
327
+ scores = scores[keep_idx]
328
+ cls_ids = cls_ids[keep_idx]
329
+
330
+ return [
331
+ BoundingBox(
332
+ x1=int(math.floor(box[0])),
333
+ y1=int(math.floor(box[1])),
334
+ x2=int(math.ceil(box[2])),
335
+ y2=int(math.ceil(box[3])),
336
+ cls_id=int(cls_id),
337
+ conf=float(conf),
338
+ )
339
+ for box, conf, cls_id in zip(boxes, scores, cls_ids)
340
+ if box[2] > box[0] and box[3] > box[1]
341
+ ]
342
+
343
+ def _decode_raw_yolo(
344
+ self,
345
+ preds: np.ndarray,
346
+ ratio: float,
347
+ pad: tuple[float, float],
348
+ orig_size: tuple[int, int],
349
+ ) -> list[BoundingBox]:
350
+ if preds.ndim != 3:
351
+ raise ValueError(f"Unexpected raw ONNX output shape: {preds.shape}")
352
+ if preds.shape[0] != 1:
353
+ raise ValueError(f"Unexpected batch dimension in raw output: {preds.shape}")
354
+
355
+ preds = preds[0]
356
+
357
+ # Normalize to [N, C]
358
+ if preds.shape[0] <= 16 and preds.shape[1] > preds.shape[0]:
359
+ preds = preds.T
360
+
361
+ if preds.ndim != 2 or preds.shape[1] < 5:
362
+ raise ValueError(f"Unexpected normalized raw output shape: {preds.shape}")
363
+
364
+ boxes_xywh = preds[:, :4].astype(np.float32)
365
+ tail = preds[:, 4:].astype(np.float32)
366
+
367
+ # Supports:
368
+ # [x,y,w,h,score] single-class
369
+ # [x,y,w,h,obj,cls] YOLO standard single-class
370
+ # [x,y,w,h,obj,cls1,cls2,...] multi-class
371
+ if tail.shape[1] == 1:
372
+ scores = tail[:, 0]
373
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
374
+ elif tail.shape[1] == 2:
375
+ obj = tail[:, 0]
376
+ cls_prob = tail[:, 1]
377
+ scores = obj * cls_prob
378
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
379
+ else:
380
+ obj = tail[:, 0]
381
+ class_probs = tail[:, 1:]
382
+ cls_ids = np.argmax(class_probs, axis=1).astype(np.int32)
383
+ cls_scores = class_probs[np.arange(len(class_probs)), cls_ids]
384
+ scores = obj * cls_scores
385
+
386
+ keep = cls_ids == 0
387
+ boxes_xywh = boxes_xywh[keep]
388
+ scores = scores[keep]
389
+ cls_ids = cls_ids[keep]
390
+
391
+ keep = scores >= self.conf_thres
392
+ boxes_xywh = boxes_xywh[keep]
393
+ scores = scores[keep]
394
+ cls_ids = cls_ids[keep]
395
+
396
+ if len(boxes_xywh) == 0:
397
+ return []
398
+
399
+ boxes = self._xywh_to_xyxy(boxes_xywh)
400
+
401
+ pad_w, pad_h = pad
402
+ orig_w, orig_h = orig_size
403
+
404
+ boxes[:, [0, 2]] -= pad_w
405
+ boxes[:, [1, 3]] -= pad_h
406
+ boxes /= ratio
407
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
408
+
409
+ boxes, scores, cls_ids = self._filter_sane_boxes(boxes, scores, cls_ids, orig_size)
410
+ if len(boxes) == 0:
411
+ return []
412
+
413
+ keep_idx = self._hard_nms(boxes, scores, self.iou_thres)
414
+ keep_idx = keep_idx[: self.max_det]
415
+
416
+ boxes = boxes[keep_idx]
417
+ scores = scores[keep_idx]
418
+ cls_ids = cls_ids[keep_idx]
419
+
420
+ return [
421
+ BoundingBox(
422
+ x1=int(math.floor(box[0])),
423
+ y1=int(math.floor(box[1])),
424
+ x2=int(math.ceil(box[2])),
425
+ y2=int(math.ceil(box[3])),
426
+ cls_id=int(cls_id),
427
+ conf=float(conf),
428
+ )
429
+ for box, conf, cls_id in zip(boxes, scores, cls_ids)
430
+ if box[2] > box[0] and box[3] > box[1]
431
+ ]
432
+
433
+ def _postprocess(
434
+ self,
435
+ output: np.ndarray,
436
+ ratio: float,
437
+ pad: tuple[float, float],
438
+ orig_size: tuple[int, int],
439
+ ) -> list[BoundingBox]:
440
+ if output.ndim == 2 and output.shape[1] >= 6:
441
+ return self._decode_final_dets(output, ratio, pad, orig_size)
442
+
443
+ if output.ndim == 3 and output.shape[0] == 1 and output.shape[2] >= 6:
444
+ return self._decode_final_dets(output, ratio, pad, orig_size)
445
+
446
+ return self._decode_raw_yolo(output, ratio, pad, orig_size)
447
+
448
+ def _predict_single(self, image: np.ndarray) -> list[BoundingBox]:
449
+ if image is None:
450
+ raise ValueError("Input image is None")
451
+ if not isinstance(image, np.ndarray):
452
+ raise TypeError(f"Input is not numpy array: {type(image)}")
453
+ if image.ndim != 3:
454
+ raise ValueError(f"Expected HWC image, got shape={image.shape}")
455
+ if image.shape[0] <= 0 or image.shape[1] <= 0:
456
+ raise ValueError(f"Invalid image shape={image.shape}")
457
+ if image.shape[2] != 3:
458
+ raise ValueError(f"Expected 3 channels, got shape={image.shape}")
459
+
460
+ if image.dtype != np.uint8:
461
+ image = image.astype(np.uint8)
462
+
463
+ input_tensor, ratio, pad, orig_size = self._preprocess(image)
464
+
465
+ expected_shape = (1, 3, self.input_height, self.input_width)
466
+ if input_tensor.shape != expected_shape:
467
+ raise ValueError(
468
+ f"Bad input tensor shape={input_tensor.shape}, expected={expected_shape}"
469
+ )
470
+
471
+ outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
472
+ det_output = outputs[0]
473
+ return self._postprocess(det_output, ratio, pad, orig_size)
474
+
475
+ def _merge_tta_consensus(
476
+ self,
477
+ boxes_orig: list[BoundingBox],
478
+ boxes_flip: list[BoundingBox],
479
+ ) -> list[BoundingBox]:
480
+ """
481
+ Keep:
482
+ - any box with conf >= conf_high
483
+ - low/medium-conf boxes only if confirmed across TTA views
484
+ Then run final hard NMS.
485
+ """
486
+ if not boxes_orig and not boxes_flip:
487
+ return []
488
+
489
+ coords_o = np.array([[b.x1, b.y1, b.x2, b.y2] for b in boxes_orig], dtype=np.float32) if boxes_orig else np.empty((0, 4), dtype=np.float32)
490
+ scores_o = np.array([b.conf for b in boxes_orig], dtype=np.float32) if boxes_orig else np.empty((0,), dtype=np.float32)
491
+
492
+ coords_f = np.array([[b.x1, b.y1, b.x2, b.y2] for b in boxes_flip], dtype=np.float32) if boxes_flip else np.empty((0, 4), dtype=np.float32)
493
+ scores_f = np.array([b.conf for b in boxes_flip], dtype=np.float32) if boxes_flip else np.empty((0,), dtype=np.float32)
494
+
495
+ accepted_boxes = []
496
+ accepted_scores = []
497
+
498
+ # Original view candidates
499
+ for i in range(len(coords_o)):
500
+ score = scores_o[i]
501
+ if score >= self.conf_high:
502
+ accepted_boxes.append(coords_o[i])
503
+ accepted_scores.append(score)
504
+ elif len(coords_f) > 0:
505
+ ious = self._box_iou_one_to_many(coords_o[i], coords_f)
506
+ j = int(np.argmax(ious))
507
+ if ious[j] >= self.tta_match_iou:
508
+ fused_score = max(score, scores_f[j])
509
+ accepted_boxes.append(coords_o[i])
510
+ accepted_scores.append(fused_score)
511
+
512
+ # Flipped-view high-confidence boxes that original missed
513
+ for i in range(len(coords_f)):
514
+ score = scores_f[i]
515
+ if score < self.conf_high:
516
+ continue
517
+
518
+ if len(coords_o) == 0:
519
+ accepted_boxes.append(coords_f[i])
520
+ accepted_scores.append(score)
521
+ continue
522
+
523
+ ious = self._box_iou_one_to_many(coords_f[i], coords_o)
524
+ if np.max(ious) < self.tta_match_iou:
525
+ accepted_boxes.append(coords_f[i])
526
+ accepted_scores.append(score)
527
+
528
+ if not accepted_boxes:
529
+ return []
530
+
531
+ boxes = np.array(accepted_boxes, dtype=np.float32)
532
+ scores = np.array(accepted_scores, dtype=np.float32)
533
+
534
+ keep = self._hard_nms(boxes, scores, self.iou_thres)
535
+ keep = keep[: self.max_det]
536
+
537
+ out = []
538
+ for idx in keep:
539
+ x1, y1, x2, y2 = boxes[idx].tolist()
540
+ out.append(
541
+ BoundingBox(
542
+ x1=int(math.floor(x1)),
543
+ y1=int(math.floor(y1)),
544
+ x2=int(math.ceil(x2)),
545
+ y2=int(math.ceil(y2)),
546
+ cls_id=0,
547
+ conf=float(scores[idx]),
548
+ )
549
+ )
550
+ return out
551
+
552
+ def _predict_tta(self, image: np.ndarray) -> list[BoundingBox]:
553
+ boxes_orig = self._predict_single(image)
554
+
555
+ flipped = cv2.flip(image, 1)
556
+ boxes_flip_raw = self._predict_single(flipped)
557
+
558
+ w = image.shape[1]
559
+ boxes_flip = [
560
+ BoundingBox(
561
+ x1=w - b.x2,
562
+ y1=b.y1,
563
+ x2=w - b.x1,
564
+ y2=b.y2,
565
+ cls_id=b.cls_id,
566
+ conf=b.conf,
567
+ )
568
+ for b in boxes_flip_raw
569
+ ]
570
+
571
+ return self._merge_tta_consensus(boxes_orig, boxes_flip)
572
+
573
+ def predict_batch(
574
+ self,
575
+ batch_images: list[ndarray],
576
+ offset: int,
577
+ n_keypoints: int,
578
+ ) -> list[TVFrameResult]:
579
+ results: list[TVFrameResult] = []
580
+
581
+ for frame_number_in_batch, image in enumerate(batch_images):
582
+ try:
583
+ if self.use_tta:
584
+ boxes = self._predict_tta(image)
585
+ else:
586
+ boxes = self._predict_single(image)
587
+ except Exception as e:
588
+ print(f"⚠️ Inference failed for frame {offset + frame_number_in_batch}: {e}")
589
+ boxes = []
590
+
591
+ results.append(
592
+ TVFrameResult(
593
+ frame_id=offset + frame_number_in_batch,
594
+ boxes=boxes,
595
+ keypoints=[(0, 0) for _ in range(max(0, int(n_keypoints)))],
596
+ )
597
+ )
598
+
599
+ return results
weights.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac86065b9d2b73ee302124f3fdaae9144d09d756ab8290900146e98375bba724
3
+ size 19892467