SuperBitDev commited on
Commit
2f35295
·
verified ·
1 Parent(s): f93849f

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. chute_config.yml +25 -0
  2. miner.py +715 -0
  3. weights.onnx +3 -0
chute_config.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image:
2
+ from_base: parachutes/python:3.12
3
+ run_command:
4
+ - pip install --upgrade setuptools wheel
5
+ - pip install torch==2.8.0+cu128 torchvision==0.23.0+cu128 --extra-index-url https://download.pytorch.org/whl/cu128
6
+ - pip install 'numpy>=1.23' 'onnxruntime-gpu[cuda,cudnn]>=1.16' 'opencv-python>=4.7' 'pillow>=9.5' 'huggingface_hub>=0.19.4' 'pydantic>=2.0' 'pyyaml>=6.0' 'aiohttp>=3.9' tqdm ultralytics supervision
7
+
8
+ NodeSelector:
9
+ gpu_count: 1
10
+ min_vram_gb_per_gpu: 16
11
+ max_hourly_price_per_gpu: 0.5
12
+
13
+ exclude:
14
+ - "5090"
15
+ - b200
16
+ - h200
17
+ - a100
18
+ - mi300x
19
+
20
+ Chute:
21
+ timeout_seconds: 900
22
+ concurrency: 4
23
+ max_instances: 5
24
+ scaling_threshold: 0.5
25
+ shutdown_after_seconds: 288000
miner.py ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import math
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import onnxruntime as ort
7
+ from numpy import ndarray
8
+ from pydantic import BaseModel
9
+ import argparse
10
+ import json
11
+
12
+
13
+ class BoundingBox(BaseModel):
14
+ x1: int
15
+ y1: int
16
+ x2: int
17
+ y2: int
18
+ cls_id: int
19
+ conf: float
20
+
21
+
22
+ class TVFrameResult(BaseModel):
23
+ frame_id: int
24
+ boxes: list[BoundingBox]
25
+ keypoints: list[tuple[int, int]]
26
+
27
+ SIZE = 1280
28
+
29
+
30
+ class Miner:
31
+ def __init__(self, path_hf_repo: Path) -> None:
32
+ model_path = path_hf_repo / "weights.onnx"
33
+ cn_path = model_path.with_name("class_names.txt")
34
+ if cn_path.is_file():
35
+ lines = cn_path.read_text(encoding="utf-8").splitlines()
36
+ self.class_names = [
37
+ ln.strip()
38
+ for ln in lines
39
+ if ln.strip() and not ln.strip().startswith("#")
40
+ ]
41
+ else:
42
+ self.class_names = ["petrol_hose", "petrol_pump", "price board", "roof canopy"]
43
+ print("ORT version:", ort.__version__)
44
+
45
+ try:
46
+ ort.preload_dlls()
47
+ print("✅ onnxruntime.preload_dlls() success")
48
+ except Exception as e:
49
+ print(f"⚠️ preload_dlls failed: {e}")
50
+
51
+ print("ORT available providers BEFORE session:", ort.get_available_providers())
52
+
53
+ sess_options = ort.SessionOptions()
54
+ sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
55
+
56
+ try:
57
+ self.session = ort.InferenceSession(
58
+ str(model_path),
59
+ sess_options=sess_options,
60
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
61
+ )
62
+ print("✅ Created ORT session with preferred CUDA provider list")
63
+ except Exception as e:
64
+ print(f"⚠️ CUDA session creation failed, falling back to CPU: {e}")
65
+ self.session = ort.InferenceSession(
66
+ str(model_path),
67
+ sess_options=sess_options,
68
+ providers=["CPUExecutionProvider"],
69
+ )
70
+
71
+ print("ORT session providers:", self.session.get_providers())
72
+
73
+ for inp in self.session.get_inputs():
74
+ print("INPUT:", inp.name, inp.shape, inp.type)
75
+
76
+ for out in self.session.get_outputs():
77
+ print("OUTPUT:", out.name, out.shape, out.type)
78
+
79
+ self.input_name = self.session.get_inputs()[0].name
80
+ self.output_names = [output.name for output in self.session.get_outputs()]
81
+ self.input_shape = self.session.get_inputs()[0].shape
82
+
83
+ self.input_height = self._safe_dim(self.input_shape[2], default=SIZE)
84
+ self.input_width = self._safe_dim(self.input_shape[3], default=SIZE)
85
+
86
+ self.conf_thres = 0.36
87
+ self.iou_thres = 0.5
88
+ self.max_det = 100
89
+ self.use_tta = True
90
+ self.tile_size = SIZE
91
+ self.overlap = 0.5
92
+ self.use_slicer = True
93
+ self.use_full_image_merge = True
94
+
95
+ print(f"✅ ONNX model loaded from: {model_path}")
96
+ print(f"✅ ONNX providers: {self.session.get_providers()}")
97
+ print(f"✅ ONNX input: name={self.input_name}, shape={self.input_shape}")
98
+
99
+ def __repr__(self) -> str:
100
+ return (
101
+ f"ONNXRuntime(session={type(self.session).__name__}, "
102
+ f"providers={self.session.get_providers()})"
103
+ )
104
+
105
+ @staticmethod
106
+ def _safe_dim(value, default: int) -> int:
107
+ return value if isinstance(value, int) and value > 0 else default
108
+
109
+ def _letterbox(
110
+ self,
111
+ image: ndarray,
112
+ new_shape: tuple[int, int],
113
+ color=(114, 114, 114),
114
+ ) -> tuple[ndarray, float, tuple[float, float]]:
115
+ """
116
+ Resize with unchanged aspect ratio and pad to target shape.
117
+ Returns:
118
+ padded_image,
119
+ ratio,
120
+ (pad_w, pad_h) # half-padding
121
+ """
122
+ h, w = image.shape[:2]
123
+ new_w, new_h = new_shape
124
+
125
+ ratio = min(new_w / w, new_h / h)
126
+ resized_w = int(round(w * ratio))
127
+ resized_h = int(round(h * ratio))
128
+
129
+ if (resized_w, resized_h) != (w, h):
130
+ interp = cv2.INTER_CUBIC if ratio > 1.0 else cv2.INTER_LINEAR
131
+ image = cv2.resize(image, (resized_w, resized_h), interpolation=interp)
132
+
133
+ dw = new_w - resized_w
134
+ dh = new_h - resized_h
135
+ dw /= 2.0
136
+ dh /= 2.0
137
+
138
+ left = int(round(dw - 0.1))
139
+ right = int(round(dw + 0.1))
140
+ top = int(round(dh - 0.1))
141
+ bottom = int(round(dh + 0.1))
142
+
143
+ padded = cv2.copyMakeBorder(
144
+ image,
145
+ top,
146
+ bottom,
147
+ left,
148
+ right,
149
+ borderType=cv2.BORDER_CONSTANT,
150
+ value=color,
151
+ )
152
+ return padded, ratio, (dw, dh)
153
+
154
+ def _preprocess(
155
+ self, image: ndarray
156
+ ) -> tuple[np.ndarray, float, tuple[float, float], tuple[int, int]]:
157
+ """
158
+ Preprocess for fixed-size ONNX export:
159
+ - enhance image quality (CLAHE, denoise, sharpen)
160
+ - letterbox to model input size
161
+ - BGR -> RGB
162
+ - normalize to [0,1]
163
+ - HWC -> NCHW float32
164
+ """
165
+ orig_h, orig_w = image.shape[:2]
166
+
167
+ img, ratio, pad = self._letterbox(
168
+ image, (self.input_width, self.input_height)
169
+ )
170
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
171
+ img = img.astype(np.float32) / 255.0
172
+ img = np.transpose(img, (2, 0, 1))[None, ...]
173
+ img = np.ascontiguousarray(img, dtype=np.float32)
174
+
175
+ return img, ratio, pad, (orig_w, orig_h)
176
+
177
+ @staticmethod
178
+ def _clip_boxes(boxes: np.ndarray, image_size: tuple[int, int]) -> np.ndarray:
179
+ w, h = image_size
180
+ boxes[:, 0] = np.clip(boxes[:, 0], 0, w - 1)
181
+ boxes[:, 1] = np.clip(boxes[:, 1], 0, h - 1)
182
+ boxes[:, 2] = np.clip(boxes[:, 2], 0, w - 1)
183
+ boxes[:, 3] = np.clip(boxes[:, 3], 0, h - 1)
184
+ return boxes
185
+
186
+ @staticmethod
187
+ def _xywh_to_xyxy(boxes: np.ndarray) -> np.ndarray:
188
+ out = np.empty_like(boxes)
189
+ out[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
190
+ out[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
191
+ out[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
192
+ out[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
193
+ return out
194
+
195
+ def _slice_image(
196
+ self, image: np.ndarray
197
+ ) -> list[tuple[np.ndarray, tuple[int, int], tuple[int, int]]]:
198
+ h, w = image.shape[:2]
199
+ t = self.tile_size
200
+ st = max(1, int(t * (1.0 - self.overlap)))
201
+
202
+ xs = []
203
+ x = 0
204
+ while True:
205
+ if x + t >= w:
206
+ xs.append(max(0, w - t))
207
+ break
208
+ xs.append(x)
209
+ x += st
210
+
211
+ ys = []
212
+ y = 0
213
+ while True:
214
+ if y + t >= h:
215
+ ys.append(max(0, h - t))
216
+ break
217
+ ys.append(y)
218
+ y += st
219
+
220
+ xs = list(dict.fromkeys(xs))
221
+ ys = list(dict.fromkeys(ys))
222
+
223
+ out = []
224
+ for y0 in ys:
225
+ for x0 in xs:
226
+ x1 = min(x0 + t, w)
227
+ y1 = min(y0 + t, h)
228
+ crop = image[y0:y1, x0:x1]
229
+ vh, vw = crop.shape[:2]
230
+ out.append((crop, (x0, y0), (vw, vh)))
231
+ return out
232
+
233
+ def _soft_nms(
234
+ self,
235
+ boxes: np.ndarray,
236
+ scores: np.ndarray,
237
+ sigma: float = 0.5,
238
+ score_thresh: float = 0.01,
239
+ ) -> tuple[np.ndarray, np.ndarray]:
240
+ """
241
+ Soft-NMS: Gaussian decay of overlapping scores instead of hard removal.
242
+ Returns (kept_original_indices, updated_scores).
243
+ """
244
+ N = len(boxes)
245
+ if N == 0:
246
+ return np.array([], dtype=np.intp), np.array([], dtype=np.float32)
247
+
248
+ boxes = boxes.astype(np.float32, copy=True)
249
+ scores = scores.astype(np.float32, copy=True)
250
+ order = np.arange(N)
251
+
252
+ for i in range(N):
253
+ max_pos = i + int(np.argmax(scores[i:]))
254
+ boxes[[i, max_pos]] = boxes[[max_pos, i]]
255
+ scores[[i, max_pos]] = scores[[max_pos, i]]
256
+ order[[i, max_pos]] = order[[max_pos, i]]
257
+
258
+ if i + 1 >= N:
259
+ break
260
+
261
+ xx1 = np.maximum(boxes[i, 0], boxes[i + 1:, 0])
262
+ yy1 = np.maximum(boxes[i, 1], boxes[i + 1:, 1])
263
+ xx2 = np.minimum(boxes[i, 2], boxes[i + 1:, 2])
264
+ yy2 = np.minimum(boxes[i, 3], boxes[i + 1:, 3])
265
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
266
+
267
+ area_i = max(0.0, float(
268
+ (boxes[i, 2] - boxes[i, 0]) * (boxes[i, 3] - boxes[i, 1])
269
+ ))
270
+ areas_j = (
271
+ np.maximum(0.0, boxes[i + 1:, 2] - boxes[i + 1:, 0])
272
+ * np.maximum(0.0, boxes[i + 1:, 3] - boxes[i + 1:, 1])
273
+ )
274
+ iou = inter / (area_i + areas_j - inter + 1e-7)
275
+ scores[i + 1:] *= np.exp(-(iou ** 2) / sigma)
276
+
277
+ mask = scores > score_thresh
278
+ return order[mask], scores[mask]
279
+
280
+ @staticmethod
281
+ def _hard_nms(
282
+ boxes: np.ndarray,
283
+ scores: np.ndarray,
284
+ iou_thresh: float,
285
+ ) -> np.ndarray:
286
+ """
287
+ Standard NMS: keep one box per overlapping cluster (the one with highest score).
288
+ Returns indices of kept boxes (into the boxes/scores arrays).
289
+ """
290
+ N = len(boxes)
291
+ if N == 0:
292
+ return np.array([], dtype=np.intp)
293
+ boxes = np.asarray(boxes, dtype=np.float32)
294
+ scores = np.asarray(scores, dtype=np.float32)
295
+ order = np.argsort(scores)[::-1]
296
+ keep: list[int] = []
297
+ suppressed = np.zeros(N, dtype=bool)
298
+ for i in range(N):
299
+ idx = order[i]
300
+ if suppressed[idx]:
301
+ continue
302
+ keep.append(idx)
303
+ bi = boxes[idx]
304
+ for k in range(i + 1, N):
305
+ jdx = order[k]
306
+ if suppressed[jdx]:
307
+ continue
308
+ bj = boxes[jdx]
309
+ xx1 = max(bi[0], bj[0])
310
+ yy1 = max(bi[1], bj[1])
311
+ xx2 = min(bi[2], bj[2])
312
+ yy2 = min(bi[3], bj[3])
313
+ inter = max(0.0, xx2 - xx1) * max(0.0, yy2 - yy1)
314
+ area_i = (bi[2] - bi[0]) * (bi[3] - bi[1])
315
+ area_j = (bj[2] - bj[0]) * (bj[3] - bj[1])
316
+ iou = inter / (area_i + area_j - inter + 1e-7)
317
+ if iou > iou_thresh:
318
+ suppressed[jdx] = True
319
+ return np.array(keep)
320
+
321
+ def _hard_nms_by_class(
322
+ self,
323
+ boxes: np.ndarray,
324
+ scores: np.ndarray,
325
+ cls_ids: np.ndarray,
326
+ iou_thresh: float,
327
+ ) -> np.ndarray:
328
+ if len(boxes) == 0:
329
+ return np.array([], dtype=np.intp)
330
+ keep_all: list[int] = []
331
+ for c in np.unique(cls_ids):
332
+ m = cls_ids == c
333
+ inds = np.flatnonzero(m)
334
+ sub_keep = self._hard_nms(boxes[m], scores[m], iou_thresh)
335
+ keep_all.extend(int(inds[i]) for i in sub_keep)
336
+ keep_all = np.asarray(keep_all, dtype=np.intp)
337
+ order = np.argsort(scores[keep_all])[::-1][: self.max_det]
338
+ return keep_all[order]
339
+
340
+ @staticmethod
341
+ def _max_score_per_cluster(
342
+ coords: np.ndarray,
343
+ scores: np.ndarray,
344
+ keep_indices: np.ndarray,
345
+ iou_thresh: float,
346
+ ) -> np.ndarray:
347
+ """
348
+ For each kept box, return the max original score among itself and any
349
+ box that overlaps it with IOU >= iou_thresh (so TTA cluster keeps best conf).
350
+ """
351
+ n_keep = len(keep_indices)
352
+ if n_keep == 0:
353
+ return np.array([], dtype=np.float32)
354
+ out = np.empty(n_keep, dtype=np.float32)
355
+ coords = np.asarray(coords, dtype=np.float32)
356
+ scores = np.asarray(scores, dtype=np.float32)
357
+ for i in range(n_keep):
358
+ idx = keep_indices[i]
359
+ bi = coords[idx]
360
+ xx1 = np.maximum(bi[0], coords[:, 0])
361
+ yy1 = np.maximum(bi[1], coords[:, 1])
362
+ xx2 = np.minimum(bi[2], coords[:, 2])
363
+ yy2 = np.minimum(bi[3], coords[:, 3])
364
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
365
+ area_i = (bi[2] - bi[0]) * (bi[3] - bi[1])
366
+ areas_j = (coords[:, 2] - coords[:, 0]) * (coords[:, 3] - coords[:, 1])
367
+ iou = inter / (area_i + areas_j - inter + 1e-7)
368
+ in_cluster = iou >= iou_thresh
369
+ out[i] = float(np.max(scores[in_cluster]))
370
+ return out
371
+
372
+ def _decode_final_dets(
373
+ self,
374
+ preds: np.ndarray,
375
+ ratio: float,
376
+ pad: tuple[float, float],
377
+ orig_size: tuple[int, int],
378
+ apply_optional_dedup: bool = False,
379
+ ) -> list[BoundingBox]:
380
+ """
381
+ Primary path:
382
+ expected output rows like [x1, y1, x2, y2, conf, cls_id]
383
+ in letterboxed input coordinates.
384
+ """
385
+ if preds.ndim == 3 and preds.shape[0] == 1:
386
+ preds = preds[0]
387
+
388
+ if preds.ndim != 2 or preds.shape[1] < 6:
389
+ raise ValueError(f"Unexpected ONNX final-det output shape: {preds.shape}")
390
+
391
+ boxes = preds[:, :4].astype(np.float32)
392
+ scores = preds[:, 4].astype(np.float32)
393
+ cls_ids = preds[:, 5].astype(np.int32)
394
+
395
+ keep = scores >= self.conf_thres
396
+ boxes = boxes[keep]
397
+ scores = scores[keep]
398
+ cls_ids = cls_ids[keep]
399
+
400
+ if len(boxes) == 0:
401
+ return []
402
+
403
+ pad_w, pad_h = pad
404
+ orig_w, orig_h = orig_size
405
+
406
+ # reverse letterbox
407
+ boxes[:, [0, 2]] -= pad_w
408
+ boxes[:, [1, 3]] -= pad_h
409
+ boxes /= ratio
410
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
411
+
412
+ if apply_optional_dedup and len(boxes) > 1:
413
+ keep_idx, scores = self._soft_nms(boxes, scores)
414
+ boxes = boxes[keep_idx]
415
+ cls_ids = cls_ids[keep_idx]
416
+
417
+ results: list[BoundingBox] = []
418
+ for box, conf, cls_id in zip(boxes, scores, cls_ids):
419
+ x1, y1, x2, y2 = box.tolist()
420
+
421
+ if x2 <= x1 or y2 <= y1:
422
+ continue
423
+
424
+ results.append(
425
+ BoundingBox(
426
+ x1=int(math.floor(x1)),
427
+ y1=int(math.floor(y1)),
428
+ x2=int(math.ceil(x2)),
429
+ y2=int(math.ceil(y2)),
430
+ cls_id=int(cls_id),
431
+ conf=float(conf),
432
+ )
433
+ )
434
+
435
+ return results
436
+
437
+ def _decode_raw_yolo(
438
+ self,
439
+ preds: np.ndarray,
440
+ ratio: float,
441
+ pad: tuple[float, float],
442
+ orig_size: tuple[int, int],
443
+ ) -> list[BoundingBox]:
444
+ """
445
+ Fallback path for raw YOLO predictions.
446
+ Supports common layouts:
447
+ - [1, C, N]
448
+ - [1, N, C]
449
+ """
450
+ if preds.ndim != 3:
451
+ raise ValueError(f"Unexpected raw ONNX output shape: {preds.shape}")
452
+
453
+ if preds.shape[0] != 1:
454
+ raise ValueError(f"Unexpected batch dimension in raw output: {preds.shape}")
455
+
456
+ preds = preds[0]
457
+
458
+ # Normalize to [N, C]
459
+ if preds.shape[0] <= 16 and preds.shape[1] > preds.shape[0]:
460
+ preds = preds.T
461
+
462
+ if preds.ndim != 2 or preds.shape[1] < 5:
463
+ raise ValueError(f"Unexpected normalized raw output shape: {preds.shape}")
464
+
465
+ boxes_xywh = preds[:, :4].astype(np.float32)
466
+ cls_part = preds[:, 4:].astype(np.float32)
467
+
468
+ if cls_part.shape[1] == 1:
469
+ scores = cls_part[:, 0]
470
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
471
+ else:
472
+ cls_ids = np.argmax(cls_part, axis=1).astype(np.int32)
473
+ scores = cls_part[np.arange(len(cls_part)), cls_ids]
474
+
475
+ keep = scores >= self.conf_thres
476
+ boxes_xywh = boxes_xywh[keep]
477
+ scores = scores[keep]
478
+ cls_ids = cls_ids[keep]
479
+
480
+ if len(boxes_xywh) == 0:
481
+ return []
482
+
483
+ boxes = self._xywh_to_xyxy(boxes_xywh)
484
+ keep_idx, scores = self._soft_nms(boxes, scores)
485
+ keep_idx = keep_idx[: self.max_det]
486
+ scores = scores[: self.max_det]
487
+
488
+ boxes = boxes[keep_idx]
489
+ cls_ids = cls_ids[keep_idx]
490
+
491
+ pad_w, pad_h = pad
492
+ orig_w, orig_h = orig_size
493
+
494
+ boxes[:, [0, 2]] -= pad_w
495
+ boxes[:, [1, 3]] -= pad_h
496
+ boxes /= ratio
497
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
498
+
499
+ results: list[BoundingBox] = []
500
+ for box, conf, cls_id in zip(boxes, scores, cls_ids):
501
+ x1, y1, x2, y2 = box.tolist()
502
+
503
+ if x2 <= x1 or y2 <= y1:
504
+ continue
505
+
506
+ results.append(
507
+ BoundingBox(
508
+ x1=int(math.floor(x1)),
509
+ y1=int(math.floor(y1)),
510
+ x2=int(math.ceil(x2)),
511
+ y2=int(math.ceil(y2)),
512
+ cls_id=int(cls_id),
513
+ conf=float(conf),
514
+ )
515
+ )
516
+
517
+ return results
518
+
519
+ def _postprocess(
520
+ self,
521
+ output: np.ndarray,
522
+ ratio: float,
523
+ pad: tuple[float, float],
524
+ orig_size: tuple[int, int],
525
+ ) -> list[BoundingBox]:
526
+ """
527
+ Prefer final detections first.
528
+ Fallback to raw decode only if needed.
529
+ """
530
+ # final detections: [N,6]
531
+ if output.ndim == 2 and output.shape[1] >= 6:
532
+ return self._decode_final_dets(output, ratio, pad, orig_size)
533
+
534
+ # final detections: [1,N,6]
535
+ if output.ndim == 3 and output.shape[0] == 1 and output.shape[2] == 6:
536
+ return self._decode_final_dets(output, ratio, pad, orig_size)
537
+
538
+ # fallback raw decode
539
+ return self._decode_raw_yolo(output, ratio, pad, orig_size)
540
+
541
+ def _predict_single(self, image: np.ndarray) -> list[BoundingBox]:
542
+ if image is None:
543
+ raise ValueError("Input image is None")
544
+ if not isinstance(image, np.ndarray):
545
+ raise TypeError(f"Input is not numpy array: {type(image)}")
546
+ if image.ndim != 3:
547
+ raise ValueError(f"Expected HWC image, got shape={image.shape}")
548
+ if image.shape[0] <= 0 or image.shape[1] <= 0:
549
+ raise ValueError(f"Invalid image shape={image.shape}")
550
+ if image.shape[2] != 3:
551
+ raise ValueError(f"Expected 3 channels, got shape={image.shape}")
552
+
553
+ if image.dtype != np.uint8:
554
+ image = image.astype(np.uint8)
555
+
556
+ input_tensor, ratio, pad, orig_size = self._preprocess(image)
557
+
558
+ expected_shape = (1, 3, self.input_height, self.input_width)
559
+ if input_tensor.shape != expected_shape:
560
+ raise ValueError(
561
+ f"Bad input tensor shape={input_tensor.shape}, expected={expected_shape}"
562
+ )
563
+
564
+ outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
565
+ det_output = outputs[0]
566
+ return self._postprocess(det_output, ratio, pad, orig_size)
567
+
568
+ def _predict_tta(self, image: np.ndarray) -> list[BoundingBox]:
569
+ """Horizontal-flip TTA: merge original + flipped via hard NMS."""
570
+ boxes_orig = self._predict_single(image)
571
+
572
+ flipped = cv2.flip(image, 1)
573
+ boxes_flip = self._predict_single(flipped)
574
+
575
+ w = image.shape[1]
576
+ boxes_flip = [
577
+ BoundingBox(
578
+ x1=w - b.x2, y1=b.y1, x2=w - b.x1, y2=b.y2,
579
+ cls_id=b.cls_id, conf=b.conf,
580
+ )
581
+ for b in boxes_flip
582
+ ]
583
+
584
+ all_boxes = boxes_orig + boxes_flip
585
+ if len(all_boxes) == 0:
586
+ return []
587
+
588
+ coords = np.array(
589
+ [[b.x1, b.y1, b.x2, b.y2] for b in all_boxes], dtype=np.float32
590
+ )
591
+ scores = np.array([b.conf for b in all_boxes], dtype=np.float32)
592
+
593
+ hard_keep = self._hard_nms(coords, scores, self.iou_thres)
594
+ if len(hard_keep) == 0:
595
+ return []
596
+
597
+ # _hard_nms already orders kept indices by descending score.
598
+ hard_keep = hard_keep[: self.max_det]
599
+
600
+ return [
601
+ BoundingBox(
602
+ x1=all_boxes[i].x1,
603
+ y1=all_boxes[i].y1,
604
+ x2=all_boxes[i].x2,
605
+ y2=all_boxes[i].y2,
606
+ cls_id=all_boxes[i].cls_id,
607
+ conf=float(scores[i]),
608
+ )
609
+ for i in hard_keep
610
+ ]
611
+
612
+ def predict_image(self, image: np.ndarray) -> list[BoundingBox]:
613
+ if image is None:
614
+ raise ValueError("Input image is None")
615
+ if not isinstance(image, np.ndarray):
616
+ raise TypeError(f"Input is not numpy array: {type(image)}")
617
+ if image.ndim != 3 or image.shape[2] != 3:
618
+ raise ValueError(f"Expected HWC image with 3 channels, got shape={image.shape}")
619
+
620
+ H, W = image.shape[:2]
621
+ all_boxes: list[list[float]] = []
622
+ all_scores: list[float] = []
623
+ all_cls: list[int] = []
624
+
625
+ if self.use_slicer:
626
+ tiles = self._slice_image(image)
627
+ for tile_img, (ox, oy), (vw, vh) in tiles:
628
+ try:
629
+ dets = self._predict_tta(tile_img) if self.use_tta else self._predict_single(tile_img)
630
+ except Exception as e:
631
+ print(f"⚠️ Tile inference failed at ({ox}, {oy}): {e}")
632
+ continue
633
+
634
+ left_edge = ox == 0
635
+ top_edge = oy == 0
636
+ right_edge = (ox + vw) >= W
637
+ bottom_edge = (oy + vh) >= H
638
+
639
+ for b in dets:
640
+ bw = b.x2 - b.x1
641
+ bh = b.y2 - b.y1
642
+ m = max(8, int(min(bw, bh) * 0.2))
643
+ if not left_edge and b.x1 < m:
644
+ continue
645
+ if not top_edge and b.y1 < m:
646
+ continue
647
+ if not right_edge and b.x2 > (vw - m):
648
+ continue
649
+ if not bottom_edge and b.y2 > (vh - m):
650
+ continue
651
+
652
+ x1 = max(0, min(W - 1, int(b.x1 + ox)))
653
+ y1 = max(0, min(H - 1, int(b.y1 + oy)))
654
+ x2 = max(0, min(W - 1, int(b.x2 + ox)))
655
+ y2 = max(0, min(H - 1, int(b.y2 + oy)))
656
+ if x2 > x1 and y2 > y1:
657
+ all_boxes.append([x1, y1, x2, y2])
658
+ all_scores.append(float(b.conf))
659
+ all_cls.append(int(b.cls_id))
660
+
661
+ if self.use_full_image_merge or not self.use_slicer:
662
+ full_dets = self._predict_tta(image) if self.use_tta else self._predict_single(image)
663
+ for b in full_dets:
664
+ if b.x2 > b.x1 and b.y2 > b.y1:
665
+ all_boxes.append([b.x1, b.y1, b.x2, b.y2])
666
+ all_scores.append(float(b.conf))
667
+ all_cls.append(int(b.cls_id))
668
+
669
+ if not all_boxes:
670
+ return []
671
+
672
+ boxes = np.asarray(all_boxes, dtype=np.float32)
673
+ scores = np.asarray(all_scores, dtype=np.float32)
674
+ cls_ids = np.asarray(all_cls, dtype=np.int32)
675
+ keep = self._hard_nms_by_class(boxes, scores, cls_ids, self.iou_thres)
676
+
677
+ out: list[BoundingBox] = []
678
+ for i in keep:
679
+ b = boxes[i]
680
+ out.append(
681
+ BoundingBox(
682
+ x1=int(math.floor(b[0])),
683
+ y1=int(math.floor(b[1])),
684
+ x2=int(math.ceil(b[2])),
685
+ y2=int(math.ceil(b[3])),
686
+ cls_id=int(cls_ids[i]),
687
+ conf=float(scores[i]),
688
+ )
689
+ )
690
+ return out
691
+
692
+ def predict_batch(
693
+ self,
694
+ batch_images: list[ndarray],
695
+ offset: int,
696
+ n_keypoints: int,
697
+ ) -> list[TVFrameResult]:
698
+ results: list[TVFrameResult] = []
699
+
700
+ for frame_number_in_batch, image in enumerate(batch_images):
701
+ try:
702
+ boxes = self.predict_image(image)
703
+ except Exception as e:
704
+ print(f"⚠️ Inference failed for frame {offset + frame_number_in_batch}: {e}")
705
+ boxes = []
706
+
707
+ results.append(
708
+ TVFrameResult(
709
+ frame_id=offset + frame_number_in_batch,
710
+ boxes=boxes,
711
+ keypoints=[(0, 0) for _ in range(max(0, int(n_keypoints)))],
712
+ )
713
+ )
714
+
715
+ return results
weights.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73bafd7fd46bc604398e16eca609244474a177d229762da5fab4dfa61e5d259b
3
+ size 29210566