alfred8995 commited on
Commit
d87c096
·
verified ·
1 Parent(s): b9e222a

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +23 -0
  2. chute_config.yml +24 -0
  3. miner.py +644 -0
  4. weights.onnx +3 -0
README.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - element_type:detect
4
+ - model:yolov11-nano
5
+ - object:person
6
+ manako:
7
+ description: Roboflow - generated by element_trainer service to detect person
8
+ source: element_trainer/800e961b-eb64-4380-880c-f1ed67abd563
9
+ prompt_hints: null
10
+ input_payload:
11
+ - name: frame
12
+ type: image
13
+ description: RGB frame
14
+ output_payload:
15
+ - name: detections
16
+ type: detections
17
+ description: List of detections
18
+ evaluation_score: null
19
+ last_benchmark:
20
+ type: synthetic_fixed
21
+ ran_at: '2026-03-06T02:20:51.927289Z'
22
+ result_path: benchmark/synthetic/1ada5b1e-38b8-4bdc-967a-d8a27b0e6afb.json
23
+ ---
chute_config.yml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image:
2
+ from_base: parachutes/python:3.12
3
+ run_command:
4
+ - pip install --upgrade setuptools wheel
5
+ - pip install 'numpy>=1.23' 'onnxruntime-gpu[cuda,cudnn]>=1.16' 'opencv-python>=4.7' 'pillow>=9.5' 'huggingface_hub>=0.19.4' 'pydantic>=2.0' 'pyyaml>=6.0' 'aiohttp>=3.9'
6
+ - pip install torch torchvision
7
+
8
+ NodeSelector:
9
+ gpu_count: 1
10
+ min_vram_gb_per_gpu: 16
11
+ max_hourly_price_per_gpu: 0.5
12
+
13
+ exclude:
14
+ - "5090"
15
+ - b200
16
+ - h200
17
+ - mi300x
18
+
19
+ Chute:
20
+ timeout_seconds: 900
21
+ concurrency: 4
22
+ max_instances: 5
23
+ scaling_threshold: 0.5
24
+ shutdown_after_seconds: 288000
miner.py ADDED
@@ -0,0 +1,644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import math
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import onnxruntime as ort
7
+ from numpy import ndarray
8
+ from pydantic import BaseModel
9
+
10
+
11
+ class BoundingBox(BaseModel):
12
+ x1: int
13
+ y1: int
14
+ x2: int
15
+ y2: int
16
+ cls_id: int
17
+ conf: float
18
+
19
+
20
+ class TVFrameResult(BaseModel):
21
+ frame_id: int
22
+ boxes: list[BoundingBox]
23
+ keypoints: list[tuple[int, int]]
24
+
25
+
26
+ class Miner:
27
+ def __init__(self,
28
+ path_hf_repo: Path,
29
+ conf_thres: float = 0.3109,
30
+ iou_thres: float = 0.5181,
31
+ max_det: int = 150,
32
+ conf_high: float = 0.6062,
33
+ tta_match_iou: float = 0.6772,
34
+ conf_adapt_low: float = 0.4076,
35
+ conf_adapt_high: float = 0.6375,
36
+ count_low: int = 5,
37
+ count_high: int = 23,
38
+ use_tta: bool = True,
39
+ min_box_area: int = 14 * 14,
40
+ min_w: int = 8,
41
+ min_h: int = 22,
42
+ max_aspect_ratio: float = 6.5,
43
+ max_box_area_ratio: float = 0.8,
44
+ ) -> None:
45
+ model_path = path_hf_repo / "weights.onnx"
46
+ self.class_names = ["person"]
47
+ print("ORT version:", ort.__version__)
48
+
49
+ try:
50
+ ort.preload_dlls()
51
+ print("✅ onnxruntime.preload_dlls() success")
52
+ except Exception as e:
53
+ print(f"⚠️ preload_dlls failed: {e}")
54
+
55
+ print("ORT available providers BEFORE session:", ort.get_available_providers())
56
+
57
+ sess_options = ort.SessionOptions()
58
+ sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
59
+
60
+ try:
61
+ self.session = ort.InferenceSession(
62
+ str(model_path),
63
+ sess_options=sess_options,
64
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
65
+ )
66
+ print("✅ Created ORT session with preferred CUDA provider list")
67
+ except Exception as e:
68
+ print(f"⚠️ CUDA session creation failed, falling back to CPU: {e}")
69
+ self.session = ort.InferenceSession(
70
+ str(model_path),
71
+ sess_options=sess_options,
72
+ providers=["CPUExecutionProvider"],
73
+ )
74
+
75
+ print("ORT session providers:", self.session.get_providers())
76
+
77
+ for inp in self.session.get_inputs():
78
+ print("INPUT:", inp.name, inp.shape, inp.type)
79
+
80
+ for out in self.session.get_outputs():
81
+ print("OUTPUT:", out.name, out.shape, out.type)
82
+
83
+ self.input_name = self.session.get_inputs()[0].name
84
+ self.output_names = [output.name for output in self.session.get_outputs()]
85
+ self.input_shape = self.session.get_inputs()[0].shape
86
+
87
+ self.input_height = self._safe_dim(self.input_shape[2], default=1280)
88
+ self.input_width = self._safe_dim(self.input_shape[3], default=1280)
89
+
90
+ # --- Scoring-aware adaptive confidence ---
91
+ # total_score = mAP50 * 0.65 + FP_score * 0.35
92
+ # FP_score = max(0, 1 - n_FP / n_images), typically n_images ≈ 10
93
+ #
94
+ # mAP50 weight is higher for person detection → favor recall slightly more
95
+ # Crossover at ~1.9 GT/image: below → recall wins, above → precision wins
96
+ self.conf_thres = conf_thres # Base threshold for candidate generation (wide net)
97
+ self.iou_thres = iou_thres # NMS threshold
98
+ self.max_det = max_det
99
+
100
+ # TTA consensus thresholds
101
+ self.conf_high = conf_high # Boxes above this survive without TTA confirmation
102
+ self.tta_match_iou = tta_match_iou # TTA cross-view match IoU
103
+
104
+ # Adaptive conf curve: lerp between low/high based on raw detection count
105
+ self.conf_adapt_low = conf_adapt_low # Few objects: favor recall, each TP ≈ 0.065+ of total
106
+ self.conf_adapt_high = conf_adapt_high # Many objects: favor precision, FP costs 0.035 each
107
+ self.count_low = count_low # Raw count below this → use conf_adapt_low
108
+ self.count_high = count_high # Raw count above this → use conf_adapt_high
109
+
110
+ self.use_tta = use_tta
111
+
112
+ # Box sanity filters
113
+ self.min_box_area = min_box_area
114
+ self.min_w = min_w
115
+ self.min_h = min_h
116
+ self.max_aspect_ratio = max_aspect_ratio
117
+ self.max_box_area_ratio = max_box_area_ratio
118
+
119
+ print(f"✅ ONNX model loaded from: {model_path}")
120
+ print(f"✅ ONNX providers: {self.session.get_providers()}")
121
+ print(f"✅ ONNX input: name={self.input_name}, shape={self.input_shape}")
122
+
123
+ def __repr__(self) -> str:
124
+ return (
125
+ f"ONNXRuntime(session={type(self.session).__name__}, "
126
+ f"providers={self.session.get_providers()})"
127
+ )
128
+
129
+ @staticmethod
130
+ def _safe_dim(value, default: int) -> int:
131
+ return value if isinstance(value, int) and value > 0 else default
132
+
133
+ def _letterbox(
134
+ self,
135
+ image: ndarray,
136
+ new_shape: tuple[int, int],
137
+ color=(114, 114, 114),
138
+ ) -> tuple[ndarray, float, tuple[float, float]]:
139
+ h, w = image.shape[:2]
140
+ new_w, new_h = new_shape
141
+
142
+ ratio = min(new_w / w, new_h / h)
143
+ resized_w = int(round(w * ratio))
144
+ resized_h = int(round(h * ratio))
145
+
146
+ if (resized_w, resized_h) != (w, h):
147
+ interp = cv2.INTER_CUBIC if ratio > 1.0 else cv2.INTER_LINEAR
148
+ image = cv2.resize(image, (resized_w, resized_h), interpolation=interp)
149
+
150
+ dw = new_w - resized_w
151
+ dh = new_h - resized_h
152
+ dw /= 2.0
153
+ dh /= 2.0
154
+
155
+ left = int(round(dw - 0.1))
156
+ right = int(round(dw + 0.1))
157
+ top = int(round(dh - 0.1))
158
+ bottom = int(round(dh + 0.1))
159
+
160
+ padded = cv2.copyMakeBorder(
161
+ image,
162
+ top,
163
+ bottom,
164
+ left,
165
+ right,
166
+ borderType=cv2.BORDER_CONSTANT,
167
+ value=color,
168
+ )
169
+ return padded, ratio, (dw, dh)
170
+
171
+ def _preprocess(
172
+ self, image: ndarray
173
+ ) -> tuple[np.ndarray, float, tuple[float, float], tuple[int, int]]:
174
+ orig_h, orig_w = image.shape[:2]
175
+
176
+ img, ratio, pad = self._letterbox(
177
+ image, (self.input_width, self.input_height)
178
+ )
179
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
180
+ img = img.astype(np.float32) / 255.0
181
+ img = np.transpose(img, (2, 0, 1))[None, ...]
182
+ img = np.ascontiguousarray(img, dtype=np.float32)
183
+
184
+ return img, ratio, pad, (orig_w, orig_h)
185
+
186
+ @staticmethod
187
+ def _clip_boxes(boxes: np.ndarray, image_size: tuple[int, int]) -> np.ndarray:
188
+ w, h = image_size
189
+ boxes[:, 0] = np.clip(boxes[:, 0], 0, w - 1)
190
+ boxes[:, 1] = np.clip(boxes[:, 1], 0, h - 1)
191
+ boxes[:, 2] = np.clip(boxes[:, 2], 0, w - 1)
192
+ boxes[:, 3] = np.clip(boxes[:, 3], 0, h - 1)
193
+ return boxes
194
+
195
+ @staticmethod
196
+ def _xywh_to_xyxy(boxes: np.ndarray) -> np.ndarray:
197
+ out = np.empty_like(boxes)
198
+ out[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
199
+ out[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
200
+ out[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
201
+ out[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
202
+ return out
203
+
204
+ @staticmethod
205
+ def _hard_nms(
206
+ boxes: np.ndarray,
207
+ scores: np.ndarray,
208
+ iou_thresh: float,
209
+ ) -> np.ndarray:
210
+ if len(boxes) == 0:
211
+ return np.array([], dtype=np.intp)
212
+
213
+ boxes = np.asarray(boxes, dtype=np.float32)
214
+ scores = np.asarray(scores, dtype=np.float32)
215
+ order = np.argsort(scores)[::-1]
216
+ keep = []
217
+
218
+ while len(order) > 0:
219
+ i = order[0]
220
+ keep.append(i)
221
+ if len(order) == 1:
222
+ break
223
+
224
+ rest = order[1:]
225
+
226
+ xx1 = np.maximum(boxes[i, 0], boxes[rest, 0])
227
+ yy1 = np.maximum(boxes[i, 1], boxes[rest, 1])
228
+ xx2 = np.minimum(boxes[i, 2], boxes[rest, 2])
229
+ yy2 = np.minimum(boxes[i, 3], boxes[rest, 3])
230
+
231
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
232
+
233
+ area_i = np.maximum(0.0, (boxes[i, 2] - boxes[i, 0])) * np.maximum(0.0, (boxes[i, 3] - boxes[i, 1]))
234
+ area_r = np.maximum(0.0, (boxes[rest, 2] - boxes[rest, 0])) * np.maximum(0.0, (boxes[rest, 3] - boxes[rest, 1]))
235
+
236
+ iou = inter / (area_i + area_r - inter + 1e-7)
237
+ order = rest[iou <= iou_thresh]
238
+
239
+ return np.array(keep, dtype=np.intp)
240
+
241
+ @staticmethod
242
+ def _box_iou_one_to_many(box: np.ndarray, boxes: np.ndarray) -> np.ndarray:
243
+ xx1 = np.maximum(box[0], boxes[:, 0])
244
+ yy1 = np.maximum(box[1], boxes[:, 1])
245
+ xx2 = np.minimum(box[2], boxes[:, 2])
246
+ yy2 = np.minimum(box[3], boxes[:, 3])
247
+
248
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
249
+
250
+ area_a = max(0.0, (box[2] - box[0]) * (box[3] - box[1]))
251
+ area_b = np.maximum(0.0, boxes[:, 2] - boxes[:, 0]) * np.maximum(0.0, boxes[:, 3] - boxes[:, 1])
252
+
253
+ return inter / (area_a + area_b - inter + 1e-7)
254
+
255
+ def _filter_sane_boxes(
256
+ self,
257
+ boxes: np.ndarray,
258
+ scores: np.ndarray,
259
+ cls_ids: np.ndarray,
260
+ orig_size: tuple[int, int],
261
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
262
+ if len(boxes) == 0:
263
+ return boxes, scores, cls_ids
264
+
265
+ orig_w, orig_h = orig_size
266
+ image_area = float(orig_w * orig_h)
267
+
268
+ keep = []
269
+ for i, box in enumerate(boxes):
270
+ x1, y1, x2, y2 = box.tolist()
271
+ bw = x2 - x1
272
+ bh = y2 - y1
273
+
274
+ if bw <= 0 or bh <= 0:
275
+ continue
276
+ if bw < self.min_w or bh < self.min_h:
277
+ continue
278
+
279
+ area = bw * bh
280
+ if area < self.min_box_area:
281
+ continue
282
+ if area > self.max_box_area_ratio * image_area:
283
+ continue
284
+
285
+ ar = max(bw / max(bh, 1e-6), bh / max(bw, 1e-6))
286
+ if ar > self.max_aspect_ratio:
287
+ continue
288
+
289
+ keep.append(i)
290
+
291
+ if not keep:
292
+ return (
293
+ np.empty((0, 4), dtype=np.float32),
294
+ np.empty((0,), dtype=np.float32),
295
+ np.empty((0,), dtype=np.int32),
296
+ )
297
+
298
+ keep = np.array(keep, dtype=np.intp)
299
+ return boxes[keep], scores[keep], cls_ids[keep]
300
+
301
+ def _decode_final_dets(
302
+ self,
303
+ preds: np.ndarray,
304
+ ratio: float,
305
+ pad: tuple[float, float],
306
+ orig_size: tuple[int, int],
307
+ ) -> list[BoundingBox]:
308
+ if preds.ndim == 3 and preds.shape[0] == 1:
309
+ preds = preds[0]
310
+
311
+ if preds.ndim != 2 or preds.shape[1] < 6:
312
+ raise ValueError(f"Unexpected ONNX final-det output shape: {preds.shape}")
313
+
314
+ boxes = preds[:, :4].astype(np.float32)
315
+ scores = preds[:, 4].astype(np.float32)
316
+ cls_ids = preds[:, 5].astype(np.int32)
317
+
318
+ # person only
319
+ keep = cls_ids == 0
320
+ boxes = boxes[keep]
321
+ scores = scores[keep]
322
+ cls_ids = cls_ids[keep]
323
+
324
+ # candidate threshold
325
+ keep = scores >= self.conf_thres
326
+ boxes = boxes[keep]
327
+ scores = scores[keep]
328
+ cls_ids = cls_ids[keep]
329
+
330
+ if len(boxes) == 0:
331
+ return []
332
+
333
+ pad_w, pad_h = pad
334
+ orig_w, orig_h = orig_size
335
+
336
+ boxes[:, [0, 2]] -= pad_w
337
+ boxes[:, [1, 3]] -= pad_h
338
+ boxes /= ratio
339
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
340
+
341
+ boxes, scores, cls_ids = self._filter_sane_boxes(boxes, scores, cls_ids, orig_size)
342
+ if len(boxes) == 0:
343
+ return []
344
+
345
+ keep_idx = self._hard_nms(boxes, scores, self.iou_thres)
346
+ keep_idx = keep_idx[: self.max_det]
347
+
348
+ boxes = boxes[keep_idx]
349
+ scores = scores[keep_idx]
350
+ cls_ids = cls_ids[keep_idx]
351
+
352
+ return [
353
+ BoundingBox(
354
+ x1=int(math.floor(box[0])),
355
+ y1=int(math.floor(box[1])),
356
+ x2=int(math.ceil(box[2])),
357
+ y2=int(math.ceil(box[3])),
358
+ cls_id=int(cls_id),
359
+ conf=float(conf),
360
+ )
361
+ for box, conf, cls_id in zip(boxes, scores, cls_ids)
362
+ if box[2] > box[0] and box[3] > box[1]
363
+ ]
364
+
365
+ def _decode_raw_yolo(
366
+ self,
367
+ preds: np.ndarray,
368
+ ratio: float,
369
+ pad: tuple[float, float],
370
+ orig_size: tuple[int, int],
371
+ ) -> list[BoundingBox]:
372
+ if preds.ndim != 3:
373
+ raise ValueError(f"Unexpected raw ONNX output shape: {preds.shape}")
374
+ if preds.shape[0] != 1:
375
+ raise ValueError(f"Unexpected batch dimension in raw output: {preds.shape}")
376
+
377
+ preds = preds[0]
378
+
379
+ # Normalize to [N, C]
380
+ if preds.shape[0] <= 16 and preds.shape[1] > preds.shape[0]:
381
+ preds = preds.T
382
+
383
+ if preds.ndim != 2 or preds.shape[1] < 5:
384
+ raise ValueError(f"Unexpected normalized raw output shape: {preds.shape}")
385
+
386
+ boxes_xywh = preds[:, :4].astype(np.float32)
387
+ tail = preds[:, 4:].astype(np.float32)
388
+
389
+ # Supports:
390
+ # [x,y,w,h,score] single-class
391
+ # [x,y,w,h,obj,cls] YOLO standard single-class
392
+ # [x,y,w,h,obj,cls1,cls2,...] multi-class
393
+ if tail.shape[1] == 1:
394
+ scores = tail[:, 0]
395
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
396
+ elif tail.shape[1] == 2:
397
+ obj = tail[:, 0]
398
+ cls_prob = tail[:, 1]
399
+ scores = obj * cls_prob
400
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
401
+ else:
402
+ obj = tail[:, 0]
403
+ class_probs = tail[:, 1:]
404
+ cls_ids = np.argmax(class_probs, axis=1).astype(np.int32)
405
+ cls_scores = class_probs[np.arange(len(class_probs)), cls_ids]
406
+ scores = obj * cls_scores
407
+
408
+ keep = cls_ids == 0
409
+ boxes_xywh = boxes_xywh[keep]
410
+ scores = scores[keep]
411
+ cls_ids = cls_ids[keep]
412
+
413
+ keep = scores >= self.conf_thres
414
+ boxes_xywh = boxes_xywh[keep]
415
+ scores = scores[keep]
416
+ cls_ids = cls_ids[keep]
417
+
418
+ if len(boxes_xywh) == 0:
419
+ return []
420
+
421
+ boxes = self._xywh_to_xyxy(boxes_xywh)
422
+
423
+ pad_w, pad_h = pad
424
+ orig_w, orig_h = orig_size
425
+
426
+ boxes[:, [0, 2]] -= pad_w
427
+ boxes[:, [1, 3]] -= pad_h
428
+ boxes /= ratio
429
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
430
+
431
+ boxes, scores, cls_ids = self._filter_sane_boxes(boxes, scores, cls_ids, orig_size)
432
+ if len(boxes) == 0:
433
+ return []
434
+
435
+ keep_idx = self._hard_nms(boxes, scores, self.iou_thres)
436
+ keep_idx = keep_idx[: self.max_det]
437
+
438
+ boxes = boxes[keep_idx]
439
+ scores = scores[keep_idx]
440
+ cls_ids = cls_ids[keep_idx]
441
+
442
+ return [
443
+ BoundingBox(
444
+ x1=int(math.floor(box[0])),
445
+ y1=int(math.floor(box[1])),
446
+ x2=int(math.ceil(box[2])),
447
+ y2=int(math.ceil(box[3])),
448
+ cls_id=int(cls_id),
449
+ conf=float(conf),
450
+ )
451
+ for box, conf, cls_id in zip(boxes, scores, cls_ids)
452
+ if box[2] > box[0] and box[3] > box[1]
453
+ ]
454
+
455
+ def _postprocess(
456
+ self,
457
+ output: np.ndarray,
458
+ ratio: float,
459
+ pad: tuple[float, float],
460
+ orig_size: tuple[int, int],
461
+ ) -> list[BoundingBox]:
462
+ if output.ndim == 2 and output.shape[1] >= 6:
463
+ return self._decode_final_dets(output, ratio, pad, orig_size)
464
+
465
+ if output.ndim == 3 and output.shape[0] == 1 and output.shape[2] >= 6:
466
+ return self._decode_final_dets(output, ratio, pad, orig_size)
467
+
468
+ return self._decode_raw_yolo(output, ratio, pad, orig_size)
469
+
470
+ def _predict_single(self, image: np.ndarray) -> list[BoundingBox]:
471
+ if image is None:
472
+ raise ValueError("Input image is None")
473
+ if not isinstance(image, np.ndarray):
474
+ raise TypeError(f"Input is not numpy array: {type(image)}")
475
+ if image.ndim != 3:
476
+ raise ValueError(f"Expected HWC image, got shape={image.shape}")
477
+ if image.shape[0] <= 0 or image.shape[1] <= 0:
478
+ raise ValueError(f"Invalid image shape={image.shape}")
479
+ if image.shape[2] != 3:
480
+ raise ValueError(f"Expected 3 channels, got shape={image.shape}")
481
+
482
+ if image.dtype != np.uint8:
483
+ image = image.astype(np.uint8)
484
+
485
+ input_tensor, ratio, pad, orig_size = self._preprocess(image)
486
+
487
+ expected_shape = (1, 3, self.input_height, self.input_width)
488
+ if input_tensor.shape != expected_shape:
489
+ raise ValueError(
490
+ f"Bad input tensor shape={input_tensor.shape}, expected={expected_shape}"
491
+ )
492
+
493
+ outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
494
+ det_output = outputs[0]
495
+ return self._postprocess(det_output, ratio, pad, orig_size)
496
+
497
+ def _merge_tta_consensus(
498
+ self,
499
+ boxes_orig: list[BoundingBox],
500
+ boxes_flip: list[BoundingBox],
501
+ ) -> list[BoundingBox]:
502
+ """
503
+ Keep:
504
+ - any box with conf >= conf_high
505
+ - low/medium-conf boxes only if confirmed across TTA views
506
+ Then run final hard NMS.
507
+ """
508
+ if not boxes_orig and not boxes_flip:
509
+ return []
510
+
511
+ coords_o = np.array([[b.x1, b.y1, b.x2, b.y2] for b in boxes_orig], dtype=np.float32) if boxes_orig else np.empty((0, 4), dtype=np.float32)
512
+ scores_o = np.array([b.conf for b in boxes_orig], dtype=np.float32) if boxes_orig else np.empty((0,), dtype=np.float32)
513
+
514
+ coords_f = np.array([[b.x1, b.y1, b.x2, b.y2] for b in boxes_flip], dtype=np.float32) if boxes_flip else np.empty((0, 4), dtype=np.float32)
515
+ scores_f = np.array([b.conf for b in boxes_flip], dtype=np.float32) if boxes_flip else np.empty((0,), dtype=np.float32)
516
+
517
+ accepted_boxes = []
518
+ accepted_scores = []
519
+
520
+ # Original view candidates
521
+ for i in range(len(coords_o)):
522
+ score = scores_o[i]
523
+ if score >= self.conf_high:
524
+ accepted_boxes.append(coords_o[i])
525
+ accepted_scores.append(score)
526
+ elif len(coords_f) > 0:
527
+ ious = self._box_iou_one_to_many(coords_o[i], coords_f)
528
+ j = int(np.argmax(ious))
529
+ if ious[j] >= self.tta_match_iou:
530
+ fused_score = max(score, scores_f[j])
531
+ accepted_boxes.append(coords_o[i])
532
+ accepted_scores.append(fused_score)
533
+
534
+ # Flipped-view high-confidence boxes that original missed
535
+ for i in range(len(coords_f)):
536
+ score = scores_f[i]
537
+ if score < self.conf_high:
538
+ continue
539
+
540
+ if len(coords_o) == 0:
541
+ accepted_boxes.append(coords_f[i])
542
+ accepted_scores.append(score)
543
+ continue
544
+
545
+ ious = self._box_iou_one_to_many(coords_f[i], coords_o)
546
+ if np.max(ious) < self.tta_match_iou:
547
+ accepted_boxes.append(coords_f[i])
548
+ accepted_scores.append(score)
549
+
550
+ if not accepted_boxes:
551
+ return []
552
+
553
+ boxes = np.array(accepted_boxes, dtype=np.float32)
554
+ scores = np.array(accepted_scores, dtype=np.float32)
555
+
556
+ keep = self._hard_nms(boxes, scores, self.iou_thres)
557
+ keep = keep[: self.max_det]
558
+
559
+ out = []
560
+ for idx in keep:
561
+ x1, y1, x2, y2 = boxes[idx].tolist()
562
+ out.append(
563
+ BoundingBox(
564
+ x1=int(math.floor(x1)),
565
+ y1=int(math.floor(y1)),
566
+ x2=int(math.ceil(x2)),
567
+ y2=int(math.ceil(y2)),
568
+ cls_id=0,
569
+ conf=float(scores[idx]),
570
+ )
571
+ )
572
+ return out
573
+
574
+ def _predict_tta(self, image: np.ndarray) -> list[BoundingBox]:
575
+ boxes_orig = self._predict_single(image)
576
+
577
+ flipped = cv2.flip(image, 1)
578
+ boxes_flip_raw = self._predict_single(flipped)
579
+
580
+ w = image.shape[1]
581
+ boxes_flip = [
582
+ BoundingBox(
583
+ x1=w - b.x2,
584
+ y1=b.y1,
585
+ x2=w - b.x1,
586
+ y2=b.y2,
587
+ cls_id=b.cls_id,
588
+ conf=b.conf,
589
+ )
590
+ for b in boxes_flip_raw
591
+ ]
592
+
593
+ return self._merge_tta_consensus(boxes_orig, boxes_flip)
594
+
595
+ def _adaptive_conf_threshold(self, n_raw: int) -> float:
596
+ """
597
+ Dynamic confidence threshold based on raw detection count.
598
+
599
+ total_score = mAP50 * 0.65 + FP_score * 0.35
600
+ - Few objects → each TP worth ~0.065/n for mAP50 → keep low conf (maximize recall)
601
+ - Many objects → each TP worth little, FPs dominate → raise conf (minimize FP)
602
+ """
603
+ if n_raw <= self.count_low:
604
+ return self.conf_adapt_low
605
+ if n_raw >= self.count_high:
606
+ return self.conf_adapt_high
607
+ t = (n_raw - self.count_low) / (self.count_high - self.count_low)
608
+ return self.conf_adapt_low + t * (self.conf_adapt_high - self.conf_adapt_low)
609
+
610
+ def _apply_adaptive_filter(self, boxes: list[BoundingBox]) -> list[BoundingBox]:
611
+ if not boxes:
612
+ return boxes
613
+ n_raw = len(boxes)
614
+ thresh = self._adaptive_conf_threshold(n_raw)
615
+ return [b for b in boxes if b.conf >= thresh]
616
+
617
+ def predict_batch(
618
+ self,
619
+ batch_images: list[ndarray],
620
+ offset: int,
621
+ n_keypoints: int,
622
+ ) -> list[TVFrameResult]:
623
+ results: list[TVFrameResult] = []
624
+
625
+ for frame_number_in_batch, image in enumerate(batch_images):
626
+ try:
627
+ if self.use_tta:
628
+ boxes = self._predict_tta(image)
629
+ else:
630
+ boxes = self._predict_single(image)
631
+ boxes = self._apply_adaptive_filter(boxes)
632
+ except Exception as e:
633
+ print(f"⚠️ Inference failed for frame {offset + frame_number_in_batch}: {e}")
634
+ boxes = []
635
+
636
+ results.append(
637
+ TVFrameResult(
638
+ frame_id=offset + frame_number_in_batch,
639
+ boxes=boxes,
640
+ keypoints=[(0, 0) for _ in range(max(0, int(n_keypoints)))],
641
+ )
642
+ )
643
+
644
+ return results
weights.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07de8ad467ec6807d2833815ff4d02855ecfd35915609b2f0734f58aeb0ceb25
3
+ size 19405530