fitleech commited on
Commit
e8cee04
·
verified ·
1 Parent(s): 6df817d

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. chute_config.yml +23 -0
  2. miner.py +600 -0
  3. weights.onnx +3 -0
chute_config.yml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image:
2
+ from_base: parachutes/python:3.12
3
+ run_command:
4
+ - pip install --upgrade setuptools wheel
5
+ - pip install 'numpy>=1.23' 'onnxruntime-gpu[cuda,cudnn]>=1.16' 'opencv-python>=4.7' 'pillow>=9.5' 'huggingface_hub>=0.19.4' 'pydantic>=2.0' 'pyyaml>=6.0' 'aiohttp>=3.9'
6
+ - pip install torch torchvision
7
+
8
+ NodeSelector:
9
+ gpu_count: 1
10
+ min_vram_gb_per_gpu: 16
11
+ max_hourly_price_per_gpu: 1
12
+ exclude:
13
+ - b200
14
+ - h200
15
+ - h20
16
+ - mi300x
17
+
18
+ Chute:
19
+ timeout_seconds: 900
20
+ concurrency: 4
21
+ max_instances: 5
22
+ scaling_threshold: 0.5
23
+ shutdown_after_seconds: 288000
miner.py ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import math
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import onnxruntime as ort
7
+ from numpy import ndarray
8
+ from pydantic import BaseModel
9
+
10
+
11
+ class BoundingBox(BaseModel):
12
+ x1: int
13
+ y1: int
14
+ x2: int
15
+ y2: int
16
+ cls_id: int
17
+ conf: float
18
+
19
+
20
+ class TVFrameResult(BaseModel):
21
+ frame_id: int
22
+ boxes: list[BoundingBox]
23
+ keypoints: list[tuple[int, int]]
24
+
25
+
26
+ class Miner:
27
+ def __init__(self,
28
+ path_hf_repo: Path
29
+ ) -> None:
30
+ model_path = path_hf_repo / "weights.onnx"
31
+ self.class_names = ["person"]
32
+ print("ORT version:", ort.__version__)
33
+
34
+ try:
35
+ ort.preload_dlls()
36
+ print("✅ onnxruntime.preload_dlls() success")
37
+ except Exception as e:
38
+ print(f"⚠️ preload_dlls failed: {e}")
39
+
40
+ print("ORT available providers BEFORE session:", ort.get_available_providers())
41
+
42
+ sess_options = ort.SessionOptions()
43
+ sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
44
+
45
+ try:
46
+ self.session = ort.InferenceSession(
47
+ str(model_path),
48
+ sess_options=sess_options,
49
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
50
+ )
51
+ print("✅ Created ORT session with preferred CUDA provider list")
52
+ except Exception as e:
53
+ print(f"⚠️ CUDA session creation failed, falling back to CPU: {e}")
54
+ self.session = ort.InferenceSession(
55
+ str(model_path),
56
+ sess_options=sess_options,
57
+ providers=["CPUExecutionProvider"],
58
+ )
59
+
60
+ print("ORT session providers:", self.session.get_providers())
61
+
62
+ for inp in self.session.get_inputs():
63
+ print("INPUT:", inp.name, inp.shape, inp.type)
64
+
65
+ for out in self.session.get_outputs():
66
+ print("OUTPUT:", out.name, out.shape, out.type)
67
+
68
+ self.input_name = self.session.get_inputs()[0].name
69
+ self.output_names = [output.name for output in self.session.get_outputs()]
70
+ self.input_shape = self.session.get_inputs()[0].shape
71
+
72
+ self.input_height = self._safe_dim(self.input_shape[2], default=1280)
73
+ self.input_width = self._safe_dim(self.input_shape[3], default=1280)
74
+
75
+ # ---------- Scoring-oriented thresholds ----------
76
+ # Low threshold for candidate generation
77
+ self.conf_thres = 0.68
78
+
79
+ # High-confidence boxes can survive without TTA confirmation
80
+ self.conf_high = 0.30
81
+
82
+ # NMS threshold
83
+ self.iou_thres = 0.35
84
+
85
+ # TTA confirmation IoU
86
+ self.tta_match_iou = 0.68
87
+
88
+ self.max_det = 150
89
+ self.use_tta = True
90
+
91
+ # Box sanity filters
92
+ self.min_box_area = 14 * 14
93
+ self.min_w = 8
94
+ self.min_h = 8
95
+ self.max_aspect_ratio = 8.0
96
+ self.max_box_area_ratio = 0.8
97
+
98
+ print(f"✅ ONNX model loaded from: {model_path}")
99
+ print(f"✅ ONNX providers: {self.session.get_providers()}")
100
+ print(f"✅ ONNX input: name={self.input_name}, shape={self.input_shape}")
101
+
102
+ def __repr__(self) -> str:
103
+ return (
104
+ f"ONNXRuntime(session={type(self.session).__name__}, "
105
+ f"providers={self.session.get_providers()})"
106
+ )
107
+
108
+ @staticmethod
109
+ def _safe_dim(value, default: int) -> int:
110
+ return value if isinstance(value, int) and value > 0 else default
111
+
112
+ def _letterbox(
113
+ self,
114
+ image: ndarray,
115
+ new_shape: tuple[int, int],
116
+ color=(114, 114, 114),
117
+ ) -> tuple[ndarray, float, tuple[float, float]]:
118
+ h, w = image.shape[:2]
119
+ new_w, new_h = new_shape
120
+
121
+ ratio = min(new_w / w, new_h / h)
122
+ resized_w = int(round(w * ratio))
123
+ resized_h = int(round(h * ratio))
124
+
125
+ if (resized_w, resized_h) != (w, h):
126
+ interp = cv2.INTER_CUBIC if ratio > 1.0 else cv2.INTER_LINEAR
127
+ image = cv2.resize(image, (resized_w, resized_h), interpolation=interp)
128
+
129
+ dw = new_w - resized_w
130
+ dh = new_h - resized_h
131
+ dw /= 2.0
132
+ dh /= 2.0
133
+
134
+ left = int(round(dw - 0.1))
135
+ right = int(round(dw + 0.1))
136
+ top = int(round(dh - 0.1))
137
+ bottom = int(round(dh + 0.1))
138
+
139
+ padded = cv2.copyMakeBorder(
140
+ image,
141
+ top,
142
+ bottom,
143
+ left,
144
+ right,
145
+ borderType=cv2.BORDER_CONSTANT,
146
+ value=color,
147
+ )
148
+ return padded, ratio, (dw, dh)
149
+
150
+ def _preprocess(
151
+ self, image: ndarray
152
+ ) -> tuple[np.ndarray, float, tuple[float, float], tuple[int, int]]:
153
+ orig_h, orig_w = image.shape[:2]
154
+
155
+ img, ratio, pad = self._letterbox(
156
+ image, (self.input_width, self.input_height)
157
+ )
158
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
159
+ img = img.astype(np.float32) / 255.0
160
+ img = np.transpose(img, (2, 0, 1))[None, ...]
161
+ img = np.ascontiguousarray(img, dtype=np.float32)
162
+
163
+ return img, ratio, pad, (orig_w, orig_h)
164
+
165
+ @staticmethod
166
+ def _clip_boxes(boxes: np.ndarray, image_size: tuple[int, int]) -> np.ndarray:
167
+ w, h = image_size
168
+ boxes[:, 0] = np.clip(boxes[:, 0], 0, w - 1)
169
+ boxes[:, 1] = np.clip(boxes[:, 1], 0, h - 1)
170
+ boxes[:, 2] = np.clip(boxes[:, 2], 0, w - 1)
171
+ boxes[:, 3] = np.clip(boxes[:, 3], 0, h - 1)
172
+ return boxes
173
+
174
+ @staticmethod
175
+ def _xywh_to_xyxy(boxes: np.ndarray) -> np.ndarray:
176
+ out = np.empty_like(boxes)
177
+ out[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
178
+ out[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
179
+ out[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
180
+ out[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
181
+ return out
182
+
183
+ @staticmethod
184
+ def _hard_nms(
185
+ boxes: np.ndarray,
186
+ scores: np.ndarray,
187
+ iou_thresh: float,
188
+ ) -> np.ndarray:
189
+ if len(boxes) == 0:
190
+ return np.array([], dtype=np.intp)
191
+
192
+ boxes = np.asarray(boxes, dtype=np.float32)
193
+ scores = np.asarray(scores, dtype=np.float32)
194
+ order = np.argsort(scores)[::-1]
195
+ keep = []
196
+
197
+ while len(order) > 0:
198
+ i = order[0]
199
+ keep.append(i)
200
+ if len(order) == 1:
201
+ break
202
+
203
+ rest = order[1:]
204
+
205
+ xx1 = np.maximum(boxes[i, 0], boxes[rest, 0])
206
+ yy1 = np.maximum(boxes[i, 1], boxes[rest, 1])
207
+ xx2 = np.minimum(boxes[i, 2], boxes[rest, 2])
208
+ yy2 = np.minimum(boxes[i, 3], boxes[rest, 3])
209
+
210
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
211
+
212
+ area_i = np.maximum(0.0, (boxes[i, 2] - boxes[i, 0])) * np.maximum(0.0, (boxes[i, 3] - boxes[i, 1]))
213
+ area_r = np.maximum(0.0, (boxes[rest, 2] - boxes[rest, 0])) * np.maximum(0.0, (boxes[rest, 3] - boxes[rest, 1]))
214
+
215
+ iou = inter / (area_i + area_r - inter + 1e-7)
216
+ order = rest[iou <= iou_thresh]
217
+
218
+ return np.array(keep, dtype=np.intp)
219
+
220
+ @staticmethod
221
+ def _box_iou_one_to_many(box: np.ndarray, boxes: np.ndarray) -> np.ndarray:
222
+ xx1 = np.maximum(box[0], boxes[:, 0])
223
+ yy1 = np.maximum(box[1], boxes[:, 1])
224
+ xx2 = np.minimum(box[2], boxes[:, 2])
225
+ yy2 = np.minimum(box[3], boxes[:, 3])
226
+
227
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
228
+
229
+ area_a = max(0.0, (box[2] - box[0]) * (box[3] - box[1]))
230
+ area_b = np.maximum(0.0, boxes[:, 2] - boxes[:, 0]) * np.maximum(0.0, boxes[:, 3] - boxes[:, 1])
231
+
232
+ return inter / (area_a + area_b - inter + 1e-7)
233
+
234
+ def _filter_sane_boxes(
235
+ self,
236
+ boxes: np.ndarray,
237
+ scores: np.ndarray,
238
+ cls_ids: np.ndarray,
239
+ orig_size: tuple[int, int],
240
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
241
+ if len(boxes) == 0:
242
+ return boxes, scores, cls_ids
243
+
244
+ orig_w, orig_h = orig_size
245
+ image_area = float(orig_w * orig_h)
246
+
247
+ keep = []
248
+ for i, box in enumerate(boxes):
249
+ x1, y1, x2, y2 = box.tolist()
250
+ bw = x2 - x1
251
+ bh = y2 - y1
252
+
253
+ if bw <= 0 or bh <= 0:
254
+ continue
255
+ if bw < self.min_w or bh < self.min_h:
256
+ continue
257
+
258
+ area = bw * bh
259
+ if area < self.min_box_area:
260
+ continue
261
+ if area > self.max_box_area_ratio * image_area:
262
+ continue
263
+
264
+ ar = max(bw / max(bh, 1e-6), bh / max(bw, 1e-6))
265
+ if ar > self.max_aspect_ratio:
266
+ continue
267
+
268
+ keep.append(i)
269
+
270
+ if not keep:
271
+ return (
272
+ np.empty((0, 4), dtype=np.float32),
273
+ np.empty((0,), dtype=np.float32),
274
+ np.empty((0,), dtype=np.int32),
275
+ )
276
+
277
+ keep = np.array(keep, dtype=np.intp)
278
+ return boxes[keep], scores[keep], cls_ids[keep]
279
+
280
+ def _decode_final_dets(
281
+ self,
282
+ preds: np.ndarray,
283
+ ratio: float,
284
+ pad: tuple[float, float],
285
+ orig_size: tuple[int, int],
286
+ ) -> list[BoundingBox]:
287
+ if preds.ndim == 3 and preds.shape[0] == 1:
288
+ preds = preds[0]
289
+
290
+ if preds.ndim != 2 or preds.shape[1] < 6:
291
+ raise ValueError(f"Unexpected ONNX final-det output shape: {preds.shape}")
292
+
293
+ boxes = preds[:, :4].astype(np.float32)
294
+ scores = preds[:, 4].astype(np.float32)
295
+ cls_ids = preds[:, 5].astype(np.int32)
296
+
297
+ # person only
298
+ keep = cls_ids == 0
299
+ boxes = boxes[keep]
300
+ scores = scores[keep]
301
+ cls_ids = cls_ids[keep]
302
+
303
+ # candidate threshold
304
+ keep = scores >= self.conf_thres
305
+ boxes = boxes[keep]
306
+ scores = scores[keep]
307
+ cls_ids = cls_ids[keep]
308
+
309
+ if len(boxes) == 0:
310
+ return []
311
+
312
+ pad_w, pad_h = pad
313
+ orig_w, orig_h = orig_size
314
+
315
+ boxes[:, [0, 2]] -= pad_w
316
+ boxes[:, [1, 3]] -= pad_h
317
+ boxes /= ratio
318
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
319
+
320
+ boxes, scores, cls_ids = self._filter_sane_boxes(boxes, scores, cls_ids, orig_size)
321
+ if len(boxes) == 0:
322
+ return []
323
+
324
+ keep_idx = self._hard_nms(boxes, scores, self.iou_thres)
325
+ keep_idx = keep_idx[: self.max_det]
326
+
327
+ boxes = boxes[keep_idx]
328
+ scores = scores[keep_idx]
329
+ cls_ids = cls_ids[keep_idx]
330
+
331
+ return [
332
+ BoundingBox(
333
+ x1=int(math.floor(box[0])),
334
+ y1=int(math.floor(box[1])),
335
+ x2=int(math.ceil(box[2])),
336
+ y2=int(math.ceil(box[3])),
337
+ cls_id=int(cls_id),
338
+ conf=float(conf),
339
+ )
340
+ for box, conf, cls_id in zip(boxes, scores, cls_ids)
341
+ if box[2] > box[0] and box[3] > box[1]
342
+ ]
343
+
344
+ def _decode_raw_yolo(
345
+ self,
346
+ preds: np.ndarray,
347
+ ratio: float,
348
+ pad: tuple[float, float],
349
+ orig_size: tuple[int, int],
350
+ ) -> list[BoundingBox]:
351
+ if preds.ndim != 3:
352
+ raise ValueError(f"Unexpected raw ONNX output shape: {preds.shape}")
353
+ if preds.shape[0] != 1:
354
+ raise ValueError(f"Unexpected batch dimension in raw output: {preds.shape}")
355
+
356
+ preds = preds[0]
357
+
358
+ # Normalize to [N, C]
359
+ if preds.shape[0] <= 16 and preds.shape[1] > preds.shape[0]:
360
+ preds = preds.T
361
+
362
+ if preds.ndim != 2 or preds.shape[1] < 5:
363
+ raise ValueError(f"Unexpected normalized raw output shape: {preds.shape}")
364
+
365
+ boxes_xywh = preds[:, :4].astype(np.float32)
366
+ tail = preds[:, 4:].astype(np.float32)
367
+
368
+ # Supports:
369
+ # [x,y,w,h,score] single-class
370
+ # [x,y,w,h,obj,cls] YOLO standard single-class
371
+ # [x,y,w,h,obj,cls1,cls2,...] multi-class
372
+ if tail.shape[1] == 1:
373
+ scores = tail[:, 0]
374
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
375
+ elif tail.shape[1] == 2:
376
+ obj = tail[:, 0]
377
+ cls_prob = tail[:, 1]
378
+ scores = obj * cls_prob
379
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
380
+ else:
381
+ obj = tail[:, 0]
382
+ class_probs = tail[:, 1:]
383
+ cls_ids = np.argmax(class_probs, axis=1).astype(np.int32)
384
+ cls_scores = class_probs[np.arange(len(class_probs)), cls_ids]
385
+ scores = obj * cls_scores
386
+
387
+ keep = cls_ids == 0
388
+ boxes_xywh = boxes_xywh[keep]
389
+ scores = scores[keep]
390
+ cls_ids = cls_ids[keep]
391
+
392
+ keep = scores >= self.conf_thres
393
+ boxes_xywh = boxes_xywh[keep]
394
+ scores = scores[keep]
395
+ cls_ids = cls_ids[keep]
396
+
397
+ if len(boxes_xywh) == 0:
398
+ return []
399
+
400
+ boxes = self._xywh_to_xyxy(boxes_xywh)
401
+
402
+ pad_w, pad_h = pad
403
+ orig_w, orig_h = orig_size
404
+
405
+ boxes[:, [0, 2]] -= pad_w
406
+ boxes[:, [1, 3]] -= pad_h
407
+ boxes /= ratio
408
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
409
+
410
+ boxes, scores, cls_ids = self._filter_sane_boxes(boxes, scores, cls_ids, orig_size)
411
+ if len(boxes) == 0:
412
+ return []
413
+
414
+ keep_idx = self._hard_nms(boxes, scores, self.iou_thres)
415
+ keep_idx = keep_idx[: self.max_det]
416
+
417
+ boxes = boxes[keep_idx]
418
+ scores = scores[keep_idx]
419
+ cls_ids = cls_ids[keep_idx]
420
+
421
+ return [
422
+ BoundingBox(
423
+ x1=int(math.floor(box[0])),
424
+ y1=int(math.floor(box[1])),
425
+ x2=int(math.ceil(box[2])),
426
+ y2=int(math.ceil(box[3])),
427
+ cls_id=int(cls_id),
428
+ conf=float(conf),
429
+ )
430
+ for box, conf, cls_id in zip(boxes, scores, cls_ids)
431
+ if box[2] > box[0] and box[3] > box[1]
432
+ ]
433
+
434
+ def _postprocess(
435
+ self,
436
+ output: np.ndarray,
437
+ ratio: float,
438
+ pad: tuple[float, float],
439
+ orig_size: tuple[int, int],
440
+ ) -> list[BoundingBox]:
441
+ if output.ndim == 2 and output.shape[1] >= 6:
442
+ return self._decode_final_dets(output, ratio, pad, orig_size)
443
+
444
+ if output.ndim == 3 and output.shape[0] == 1 and output.shape[2] >= 6:
445
+ return self._decode_final_dets(output, ratio, pad, orig_size)
446
+
447
+ return self._decode_raw_yolo(output, ratio, pad, orig_size)
448
+
449
+ def _predict_single(self, image: np.ndarray) -> list[BoundingBox]:
450
+ if image is None:
451
+ raise ValueError("Input image is None")
452
+ if not isinstance(image, np.ndarray):
453
+ raise TypeError(f"Input is not numpy array: {type(image)}")
454
+ if image.ndim != 3:
455
+ raise ValueError(f"Expected HWC image, got shape={image.shape}")
456
+ if image.shape[0] <= 0 or image.shape[1] <= 0:
457
+ raise ValueError(f"Invalid image shape={image.shape}")
458
+ if image.shape[2] != 3:
459
+ raise ValueError(f"Expected 3 channels, got shape={image.shape}")
460
+
461
+ if image.dtype != np.uint8:
462
+ image = image.astype(np.uint8)
463
+
464
+ input_tensor, ratio, pad, orig_size = self._preprocess(image)
465
+
466
+ expected_shape = (1, 3, self.input_height, self.input_width)
467
+ if input_tensor.shape != expected_shape:
468
+ raise ValueError(
469
+ f"Bad input tensor shape={input_tensor.shape}, expected={expected_shape}"
470
+ )
471
+
472
+ outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
473
+ det_output = outputs[0]
474
+ return self._postprocess(det_output, ratio, pad, orig_size)
475
+
476
+ def _merge_tta_consensus(
477
+ self,
478
+ boxes_orig: list[BoundingBox],
479
+ boxes_flip: list[BoundingBox],
480
+ ) -> list[BoundingBox]:
481
+ """
482
+ Keep:
483
+ - any box with conf >= conf_high
484
+ - low/medium-conf boxes only if confirmed across TTA views
485
+ Then run final hard NMS.
486
+ """
487
+ if not boxes_orig and not boxes_flip:
488
+ return []
489
+
490
+ coords_o = np.array([[b.x1, b.y1, b.x2, b.y2] for b in boxes_orig], dtype=np.float32) if boxes_orig else np.empty((0, 4), dtype=np.float32)
491
+ scores_o = np.array([b.conf for b in boxes_orig], dtype=np.float32) if boxes_orig else np.empty((0,), dtype=np.float32)
492
+
493
+ coords_f = np.array([[b.x1, b.y1, b.x2, b.y2] for b in boxes_flip], dtype=np.float32) if boxes_flip else np.empty((0, 4), dtype=np.float32)
494
+ scores_f = np.array([b.conf for b in boxes_flip], dtype=np.float32) if boxes_flip else np.empty((0,), dtype=np.float32)
495
+
496
+ accepted_boxes = []
497
+ accepted_scores = []
498
+
499
+ # Original view candidates
500
+ for i in range(len(coords_o)):
501
+ score = scores_o[i]
502
+ if score >= self.conf_high:
503
+ accepted_boxes.append(coords_o[i])
504
+ accepted_scores.append(score)
505
+ elif len(coords_f) > 0:
506
+ ious = self._box_iou_one_to_many(coords_o[i], coords_f)
507
+ j = int(np.argmax(ious))
508
+ if ious[j] >= self.tta_match_iou:
509
+ fused_score = max(score, scores_f[j])
510
+ accepted_boxes.append(coords_o[i])
511
+ accepted_scores.append(fused_score)
512
+
513
+ # Flipped-view high-confidence boxes that original missed
514
+ for i in range(len(coords_f)):
515
+ score = scores_f[i]
516
+ if score < self.conf_high:
517
+ continue
518
+
519
+ if len(coords_o) == 0:
520
+ accepted_boxes.append(coords_f[i])
521
+ accepted_scores.append(score)
522
+ continue
523
+
524
+ ious = self._box_iou_one_to_many(coords_f[i], coords_o)
525
+ if np.max(ious) < self.tta_match_iou:
526
+ accepted_boxes.append(coords_f[i])
527
+ accepted_scores.append(score)
528
+
529
+ if not accepted_boxes:
530
+ return []
531
+
532
+ boxes = np.array(accepted_boxes, dtype=np.float32)
533
+ scores = np.array(accepted_scores, dtype=np.float32)
534
+
535
+ keep = self._hard_nms(boxes, scores, self.iou_thres)
536
+ keep = keep[: self.max_det]
537
+
538
+ out = []
539
+ for idx in keep:
540
+ x1, y1, x2, y2 = boxes[idx].tolist()
541
+ out.append(
542
+ BoundingBox(
543
+ x1=int(math.floor(x1)),
544
+ y1=int(math.floor(y1)),
545
+ x2=int(math.ceil(x2)),
546
+ y2=int(math.ceil(y2)),
547
+ cls_id=0,
548
+ conf=float(scores[idx]),
549
+ )
550
+ )
551
+ return out
552
+
553
+ def _predict_tta(self, image: np.ndarray) -> list[BoundingBox]:
554
+ boxes_orig = self._predict_single(image)
555
+
556
+ flipped = cv2.flip(image, 1)
557
+ boxes_flip_raw = self._predict_single(flipped)
558
+
559
+ w = image.shape[1]
560
+ boxes_flip = [
561
+ BoundingBox(
562
+ x1=w - b.x2,
563
+ y1=b.y1,
564
+ x2=w - b.x1,
565
+ y2=b.y2,
566
+ cls_id=b.cls_id,
567
+ conf=b.conf,
568
+ )
569
+ for b in boxes_flip_raw
570
+ ]
571
+
572
+ return self._merge_tta_consensus(boxes_orig, boxes_flip)
573
+
574
+ def predict_batch(
575
+ self,
576
+ batch_images: list[ndarray],
577
+ offset: int,
578
+ n_keypoints: int,
579
+ ) -> list[TVFrameResult]:
580
+ results: list[TVFrameResult] = []
581
+
582
+ for frame_number_in_batch, image in enumerate(batch_images):
583
+ try:
584
+ if self.use_tta:
585
+ boxes = self._predict_tta(image)
586
+ else:
587
+ boxes = self._predict_single(image)
588
+ except Exception as e:
589
+ print(f"⚠️ Inference failed for frame {offset + frame_number_in_batch}: {e}")
590
+ boxes = []
591
+
592
+ results.append(
593
+ TVFrameResult(
594
+ frame_id=offset + frame_number_in_batch,
595
+ boxes=boxes,
596
+ keypoints=[(0, 0) for _ in range(max(0, int(n_keypoints)))],
597
+ )
598
+ )
599
+
600
+ return results
weights.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8603eec59969cdd368ec3651ff4ed01700c6d3700dad1328349e41f3f09efaf5
3
+ size 20431015