fitleech commited on
Commit
7234045
·
verified ·
1 Parent(s): 5cd1feb

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. chute_config.yml +22 -0
  2. class_names.txt +4 -0
  3. miner.py +632 -0
  4. weights.onnx +3 -0
chute_config.yml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image:
2
+ from_base: parachutes/python:3.12
3
+ run_command:
4
+ - pip install --upgrade setuptools wheel
5
+ - pip install huggingface_hub==0.19.4 ultralytics==8.2.40 'torch<2.6' opencv-python-headless onnxruntime-gpu
6
+ set_workdir: /app
7
+
8
+ NodeSelector:
9
+ gpu_count: 1
10
+ min_vram_gb_per_gpu: 16
11
+ max_hourly_price_per_gpu: 0.8
12
+ exclude:
13
+ - b200
14
+ - h200
15
+ - h100
16
+ - mi300x
17
+
18
+ Chute:
19
+ shutdown_after_seconds: 300000
20
+ concurrency: 4
21
+ max_instances: 1
22
+ scaling_threshold: 0.5
class_names.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ petrol hose
2
+ petrol pump
3
+ price board
4
+ roof canopy
miner.py ADDED
@@ -0,0 +1,632 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import math
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import onnxruntime as ort
7
+ from numpy import ndarray
8
+ from pydantic import BaseModel
9
+
10
+
11
+ class BoundingBox(BaseModel):
12
+ x1: int
13
+ y1: int
14
+ x2: int
15
+ y2: int
16
+ cls_id: int
17
+ conf: float
18
+
19
+
20
+ class TVFrameResult(BaseModel):
21
+ frame_id: int
22
+ boxes: list[BoundingBox]
23
+ keypoints: list[tuple[int, int]]
24
+
25
+ SIZE = 1280
26
+
27
+
28
+ class Miner:
29
+ def __init__(self, path_hf_repo: Path) -> None:
30
+ model_path = path_hf_repo / "weights.onnx"
31
+ cn_path = model_path.with_name("class_names.txt")
32
+ if cn_path.is_file():
33
+ lines = cn_path.read_text(encoding="utf-8").splitlines()
34
+ self.class_names = [
35
+ ln.strip()
36
+ for ln in lines
37
+ if ln.strip() and not ln.strip().startswith("#")
38
+ ]
39
+ else:
40
+ self.class_names = ["person"]
41
+ print("ORT version:", ort.__version__)
42
+
43
+ try:
44
+ ort.preload_dlls()
45
+ print("✅ onnxruntime.preload_dlls() success")
46
+ except Exception as e:
47
+ print(f"⚠️ preload_dlls failed: {e}")
48
+
49
+ print("ORT available providers BEFORE session:", ort.get_available_providers())
50
+
51
+ sess_options = ort.SessionOptions()
52
+ sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
53
+
54
+ try:
55
+ self.session = ort.InferenceSession(
56
+ str(model_path),
57
+ sess_options=sess_options,
58
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
59
+ )
60
+ print("✅ Created ORT session with preferred CUDA provider list")
61
+ except Exception as e:
62
+ print(f"⚠️ CUDA session creation failed, falling back to CPU: {e}")
63
+ self.session = ort.InferenceSession(
64
+ str(model_path),
65
+ sess_options=sess_options,
66
+ providers=["CPUExecutionProvider"],
67
+ )
68
+
69
+ print("ORT session providers:", self.session.get_providers())
70
+
71
+ for inp in self.session.get_inputs():
72
+ print("INPUT:", inp.name, inp.shape, inp.type)
73
+
74
+ for out in self.session.get_outputs():
75
+ print("OUTPUT:", out.name, out.shape, out.type)
76
+
77
+ self.input_name = self.session.get_inputs()[0].name
78
+ self.output_names = [output.name for output in self.session.get_outputs()]
79
+ self.input_shape = self.session.get_inputs()[0].shape
80
+
81
+ self.input_height = self._safe_dim(self.input_shape[2], default=SIZE)
82
+ self.input_width = self._safe_dim(self.input_shape[3], default=SIZE)
83
+
84
+ self.conf_thres = 0.12
85
+ self.iou_thres = 0.43
86
+ self.max_det = 14
87
+ self.use_tta = True
88
+
89
+ print(f"✅ ONNX model loaded from: {model_path}")
90
+ print(f"✅ ONNX providers: {self.session.get_providers()}")
91
+ print(f"✅ ONNX input: name={self.input_name}, shape={self.input_shape}")
92
+
93
+ def __repr__(self) -> str:
94
+ return (
95
+ f"ONNXRuntime(session={type(self.session).__name__}, "
96
+ f"providers={self.session.get_providers()})"
97
+ )
98
+
99
+ @staticmethod
100
+ def _safe_dim(value, default: int) -> int:
101
+ return value if isinstance(value, int) and value > 0 else default
102
+
103
+ def _letterbox(
104
+ self,
105
+ image: ndarray,
106
+ new_shape: tuple[int, int],
107
+ color=(114, 114, 114),
108
+ ) -> tuple[ndarray, float, tuple[float, float]]:
109
+ """
110
+ Resize with unchanged aspect ratio and pad to target shape.
111
+ Returns:
112
+ padded_image,
113
+ ratio,
114
+ (pad_w, pad_h) # half-padding
115
+ """
116
+ h, w = image.shape[:2]
117
+ new_w, new_h = new_shape
118
+
119
+ ratio = min(new_w / w, new_h / h)
120
+ resized_w = int(round(w * ratio))
121
+ resized_h = int(round(h * ratio))
122
+
123
+ if (resized_w, resized_h) != (w, h):
124
+ interp = cv2.INTER_CUBIC if ratio > 1.0 else cv2.INTER_LINEAR
125
+ image = cv2.resize(image, (resized_w, resized_h), interpolation=interp)
126
+
127
+ dw = new_w - resized_w
128
+ dh = new_h - resized_h
129
+ dw /= 2.0
130
+ dh /= 2.0
131
+
132
+ left = int(round(dw - 0.1))
133
+ right = int(round(dw + 0.1))
134
+ top = int(round(dh - 0.1))
135
+ bottom = int(round(dh + 0.1))
136
+
137
+ padded = cv2.copyMakeBorder(
138
+ image,
139
+ top,
140
+ bottom,
141
+ left,
142
+ right,
143
+ borderType=cv2.BORDER_CONSTANT,
144
+ value=color,
145
+ )
146
+ return padded, ratio, (dw, dh)
147
+
148
+ def _preprocess(
149
+ self, image: ndarray
150
+ ) -> tuple[np.ndarray, float, tuple[float, float], tuple[int, int]]:
151
+ """
152
+ Preprocess for fixed-size ONNX export:
153
+ - enhance image quality (CLAHE, denoise, sharpen)
154
+ - letterbox to model input size
155
+ - BGR -> RGB
156
+ - normalize to [0,1]
157
+ - HWC -> NCHW float32
158
+ """
159
+ orig_h, orig_w = image.shape[:2]
160
+
161
+ img, ratio, pad = self._letterbox(
162
+ image, (self.input_width, self.input_height)
163
+ )
164
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
165
+ img = img.astype(np.float32) / 255.0
166
+ img = np.transpose(img, (2, 0, 1))[None, ...]
167
+ img = np.ascontiguousarray(img, dtype=np.float32)
168
+
169
+ return img, ratio, pad, (orig_w, orig_h)
170
+
171
+ @staticmethod
172
+ def _clip_boxes(boxes: np.ndarray, image_size: tuple[int, int]) -> np.ndarray:
173
+ w, h = image_size
174
+ boxes[:, 0] = np.clip(boxes[:, 0], 0, w - 1)
175
+ boxes[:, 1] = np.clip(boxes[:, 1], 0, h - 1)
176
+ boxes[:, 2] = np.clip(boxes[:, 2], 0, w - 1)
177
+ boxes[:, 3] = np.clip(boxes[:, 3], 0, h - 1)
178
+ return boxes
179
+
180
+ @staticmethod
181
+ def _xywh_to_xyxy(boxes: np.ndarray) -> np.ndarray:
182
+ out = np.empty_like(boxes)
183
+ out[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
184
+ out[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
185
+ out[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
186
+ out[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
187
+ return out
188
+
189
+ def _soft_nms(
190
+ self,
191
+ boxes: np.ndarray,
192
+ scores: np.ndarray,
193
+ sigma: float = 0.5,
194
+ score_thresh: float = 0.01,
195
+ ) -> tuple[np.ndarray, np.ndarray]:
196
+ """
197
+ Soft-NMS: Gaussian decay of overlapping scores instead of hard removal.
198
+ Returns (kept_original_indices, updated_scores).
199
+ """
200
+ N = len(boxes)
201
+ if N == 0:
202
+ return np.array([], dtype=np.intp), np.array([], dtype=np.float32)
203
+
204
+ boxes = boxes.astype(np.float32, copy=True)
205
+ scores = scores.astype(np.float32, copy=True)
206
+ order = np.arange(N)
207
+
208
+ for i in range(N):
209
+ max_pos = i + int(np.argmax(scores[i:]))
210
+ boxes[[i, max_pos]] = boxes[[max_pos, i]]
211
+ scores[[i, max_pos]] = scores[[max_pos, i]]
212
+ order[[i, max_pos]] = order[[max_pos, i]]
213
+
214
+ if i + 1 >= N:
215
+ break
216
+
217
+ xx1 = np.maximum(boxes[i, 0], boxes[i + 1:, 0])
218
+ yy1 = np.maximum(boxes[i, 1], boxes[i + 1:, 1])
219
+ xx2 = np.minimum(boxes[i, 2], boxes[i + 1:, 2])
220
+ yy2 = np.minimum(boxes[i, 3], boxes[i + 1:, 3])
221
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
222
+
223
+ area_i = max(0.0, float(
224
+ (boxes[i, 2] - boxes[i, 0]) * (boxes[i, 3] - boxes[i, 1])
225
+ ))
226
+ areas_j = (
227
+ np.maximum(0.0, boxes[i + 1:, 2] - boxes[i + 1:, 0])
228
+ * np.maximum(0.0, boxes[i + 1:, 3] - boxes[i + 1:, 1])
229
+ )
230
+ iou = inter / (area_i + areas_j - inter + 1e-7)
231
+ scores[i + 1:] *= np.exp(-(iou ** 2) / sigma)
232
+
233
+ mask = scores > score_thresh
234
+ return order[mask], scores[mask]
235
+
236
+ @staticmethod
237
+ def _hard_nms(
238
+ boxes: np.ndarray,
239
+ scores: np.ndarray,
240
+ iou_thresh: float,
241
+ ) -> np.ndarray:
242
+ """
243
+ Standard NMS: keep one box per overlapping cluster (the one with highest score).
244
+ Returns indices of kept boxes (into the boxes/scores arrays).
245
+ """
246
+ N = len(boxes)
247
+ if N == 0:
248
+ return np.array([], dtype=np.intp)
249
+ boxes = np.asarray(boxes, dtype=np.float32)
250
+ scores = np.asarray(scores, dtype=np.float32)
251
+ order = np.argsort(scores)[::-1]
252
+ keep: list[int] = []
253
+ suppressed = np.zeros(N, dtype=bool)
254
+ for i in range(N):
255
+ idx = order[i]
256
+ if suppressed[idx]:
257
+ continue
258
+ keep.append(idx)
259
+ bi = boxes[idx]
260
+ for k in range(i + 1, N):
261
+ jdx = order[k]
262
+ if suppressed[jdx]:
263
+ continue
264
+ bj = boxes[jdx]
265
+ xx1 = max(bi[0], bj[0])
266
+ yy1 = max(bi[1], bj[1])
267
+ xx2 = min(bi[2], bj[2])
268
+ yy2 = min(bi[3], bj[3])
269
+ inter = max(0.0, xx2 - xx1) * max(0.0, yy2 - yy1)
270
+ area_i = (bi[2] - bi[0]) * (bi[3] - bi[1])
271
+ area_j = (bj[2] - bj[0]) * (bj[3] - bj[1])
272
+ iou = inter / (area_i + area_j - inter + 1e-7)
273
+ if iou > iou_thresh:
274
+ suppressed[jdx] = True
275
+ return np.array(keep)
276
+
277
+ @staticmethod
278
+ def _max_score_per_cluster(
279
+ coords: np.ndarray,
280
+ scores: np.ndarray,
281
+ keep_indices: np.ndarray,
282
+ iou_thresh: float,
283
+ ) -> np.ndarray:
284
+ """
285
+ For each kept box, return the max original score among itself and any
286
+ box that overlaps it with IOU >= iou_thresh (so TTA cluster keeps best conf).
287
+ """
288
+ n_keep = len(keep_indices)
289
+ if n_keep == 0:
290
+ return np.array([], dtype=np.float32)
291
+ out = np.empty(n_keep, dtype=np.float32)
292
+ coords = np.asarray(coords, dtype=np.float32)
293
+ scores = np.asarray(scores, dtype=np.float32)
294
+ for i in range(n_keep):
295
+ idx = keep_indices[i]
296
+ bi = coords[idx]
297
+ xx1 = np.maximum(bi[0], coords[:, 0])
298
+ yy1 = np.maximum(bi[1], coords[:, 1])
299
+ xx2 = np.minimum(bi[2], coords[:, 2])
300
+ yy2 = np.minimum(bi[3], coords[:, 3])
301
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
302
+ area_i = (bi[2] - bi[0]) * (bi[3] - bi[1])
303
+ areas_j = (coords[:, 2] - coords[:, 0]) * (coords[:, 3] - coords[:, 1])
304
+ iou = inter / (area_i + areas_j - inter + 1e-7)
305
+ in_cluster = iou >= iou_thresh
306
+ out[i] = float(np.max(scores[in_cluster]))
307
+ return out
308
+
309
+ def _decode_final_dets(
310
+ self,
311
+ preds: np.ndarray,
312
+ ratio: float,
313
+ pad: tuple[float, float],
314
+ orig_size: tuple[int, int],
315
+ apply_optional_dedup: bool = False,
316
+ ) -> list[BoundingBox]:
317
+ """
318
+ Primary path:
319
+ expected output rows like [x1, y1, x2, y2, conf, cls_id]
320
+ in letterboxed input coordinates.
321
+ """
322
+ if preds.ndim == 3 and preds.shape[0] == 1:
323
+ preds = preds[0]
324
+
325
+ if preds.ndim != 2 or preds.shape[1] < 6:
326
+ raise ValueError(f"Unexpected ONNX final-det output shape: {preds.shape}")
327
+
328
+ boxes = preds[:, :4].astype(np.float32)
329
+ scores = preds[:, 4].astype(np.float32)
330
+ cls_ids = preds[:, 5].astype(np.int32)
331
+
332
+ keep = scores >= self.conf_thres
333
+ boxes = boxes[keep]
334
+ scores = scores[keep]
335
+ cls_ids = cls_ids[keep]
336
+
337
+ if len(boxes) == 0:
338
+ return []
339
+
340
+ pad_w, pad_h = pad
341
+ orig_w, orig_h = orig_size
342
+
343
+ # reverse letterbox
344
+ boxes[:, [0, 2]] -= pad_w
345
+ boxes[:, [1, 3]] -= pad_h
346
+ boxes /= ratio
347
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
348
+
349
+ if apply_optional_dedup and len(boxes) > 1:
350
+ keep_idx, scores = self._soft_nms(boxes, scores)
351
+ boxes = boxes[keep_idx]
352
+ cls_ids = cls_ids[keep_idx]
353
+
354
+ results: list[BoundingBox] = []
355
+ for box, conf, cls_id in zip(boxes, scores, cls_ids):
356
+ x1, y1, x2, y2 = box.tolist()
357
+
358
+ if x2 <= x1 or y2 <= y1:
359
+ continue
360
+
361
+ results.append(
362
+ BoundingBox(
363
+ x1=int(math.floor(x1)),
364
+ y1=int(math.floor(y1)),
365
+ x2=int(math.ceil(x2)),
366
+ y2=int(math.ceil(y2)),
367
+ cls_id=int(cls_id),
368
+ conf=float(conf),
369
+ )
370
+ )
371
+
372
+ return results
373
+
374
+ def _decode_raw_yolo(
375
+ self,
376
+ preds: np.ndarray,
377
+ ratio: float,
378
+ pad: tuple[float, float],
379
+ orig_size: tuple[int, int],
380
+ ) -> list[BoundingBox]:
381
+ """
382
+ Fallback path for raw YOLO predictions.
383
+ Supports common layouts:
384
+ - [1, C, N]
385
+ - [1, N, C]
386
+ """
387
+ if preds.ndim != 3:
388
+ raise ValueError(f"Unexpected raw ONNX output shape: {preds.shape}")
389
+
390
+ if preds.shape[0] != 1:
391
+ raise ValueError(f"Unexpected batch dimension in raw output: {preds.shape}")
392
+
393
+ preds = preds[0]
394
+
395
+ # Normalize to [N, C]
396
+ if preds.shape[0] <= 16 and preds.shape[1] > preds.shape[0]:
397
+ preds = preds.T
398
+
399
+ if preds.ndim != 2 or preds.shape[1] < 5:
400
+ raise ValueError(f"Unexpected normalized raw output shape: {preds.shape}")
401
+
402
+ boxes_xywh = preds[:, :4].astype(np.float32)
403
+ cls_part = preds[:, 4:].astype(np.float32)
404
+
405
+ if cls_part.shape[1] == 1:
406
+ scores = cls_part[:, 0]
407
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
408
+ else:
409
+ cls_ids = np.argmax(cls_part, axis=1).astype(np.int32)
410
+ scores = cls_part[np.arange(len(cls_part)), cls_ids]
411
+
412
+ keep = scores >= self.conf_thres
413
+ boxes_xywh = boxes_xywh[keep]
414
+ scores = scores[keep]
415
+ cls_ids = cls_ids[keep]
416
+
417
+ if len(boxes_xywh) == 0:
418
+ return []
419
+
420
+ boxes = self._xywh_to_xyxy(boxes_xywh)
421
+ keep_idx, scores = self._soft_nms(boxes, scores)
422
+ keep_idx = keep_idx[: self.max_det]
423
+ scores = scores[: self.max_det]
424
+
425
+ boxes = boxes[keep_idx]
426
+ cls_ids = cls_ids[keep_idx]
427
+
428
+ pad_w, pad_h = pad
429
+ orig_w, orig_h = orig_size
430
+
431
+ boxes[:, [0, 2]] -= pad_w
432
+ boxes[:, [1, 3]] -= pad_h
433
+ boxes /= ratio
434
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
435
+
436
+ results: list[BoundingBox] = []
437
+ for box, conf, cls_id in zip(boxes, scores, cls_ids):
438
+ x1, y1, x2, y2 = box.tolist()
439
+
440
+ if x2 <= x1 or y2 <= y1:
441
+ continue
442
+
443
+ results.append(
444
+ BoundingBox(
445
+ x1=int(math.floor(x1)),
446
+ y1=int(math.floor(y1)),
447
+ x2=int(math.ceil(x2)),
448
+ y2=int(math.ceil(y2)),
449
+ cls_id=int(cls_id),
450
+ conf=float(conf),
451
+ )
452
+ )
453
+
454
+ return results
455
+
456
+ def _postprocess(
457
+ self,
458
+ output: np.ndarray,
459
+ ratio: float,
460
+ pad: tuple[float, float],
461
+ orig_size: tuple[int, int],
462
+ ) -> list[BoundingBox]:
463
+ """
464
+ Prefer final detections first.
465
+ Fallback to raw decode only if needed.
466
+ """
467
+ # final detections: [N,6]
468
+ if output.ndim == 2 and output.shape[1] >= 6:
469
+ return self._decode_final_dets(output, ratio, pad, orig_size)
470
+
471
+ # final detections: [1,N,6]
472
+ if output.ndim == 3 and output.shape[0] == 1 and output.shape[2] == 6:
473
+ return self._decode_final_dets(output, ratio, pad, orig_size)
474
+
475
+ # fallback raw decode
476
+ return self._decode_raw_yolo(output, ratio, pad, orig_size)
477
+
478
+ def _predict_single(self, image: np.ndarray) -> list[BoundingBox]:
479
+ if image is None:
480
+ raise ValueError("Input image is None")
481
+ if not isinstance(image, np.ndarray):
482
+ raise TypeError(f"Input is not numpy array: {type(image)}")
483
+ if image.ndim != 3:
484
+ raise ValueError(f"Expected HWC image, got shape={image.shape}")
485
+ if image.shape[0] <= 0 or image.shape[1] <= 0:
486
+ raise ValueError(f"Invalid image shape={image.shape}")
487
+ if image.shape[2] != 3:
488
+ raise ValueError(f"Expected 3 channels, got shape={image.shape}")
489
+
490
+ if image.dtype != np.uint8:
491
+ image = image.astype(np.uint8)
492
+
493
+ input_tensor, ratio, pad, orig_size = self._preprocess(image)
494
+
495
+ expected_shape = (1, 3, self.input_height, self.input_width)
496
+ if input_tensor.shape != expected_shape:
497
+ raise ValueError(
498
+ f"Bad input tensor shape={input_tensor.shape}, expected={expected_shape}"
499
+ )
500
+
501
+ outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
502
+ det_output = outputs[0]
503
+ return self._postprocess(det_output, ratio, pad, orig_size)
504
+
505
+ def _predict_tta(self, image: np.ndarray) -> list[BoundingBox]:
506
+ """Horizontal-flip TTA: merge original + flipped via hard NMS."""
507
+ boxes_orig = self._predict_single(image)
508
+
509
+ flipped = cv2.flip(image, 1)
510
+ boxes_flip = self._predict_single(flipped)
511
+
512
+ w = image.shape[1]
513
+ boxes_flip = [
514
+ BoundingBox(
515
+ x1=w - b.x2, y1=b.y1, x2=w - b.x1, y2=b.y2,
516
+ cls_id=b.cls_id, conf=b.conf,
517
+ )
518
+ for b in boxes_flip
519
+ ]
520
+
521
+ all_boxes = boxes_orig + boxes_flip
522
+ if len(all_boxes) == 0:
523
+ return []
524
+
525
+ coords = np.array(
526
+ [[b.x1, b.y1, b.x2, b.y2] for b in all_boxes], dtype=np.float32
527
+ )
528
+ scores = np.array([b.conf for b in all_boxes], dtype=np.float32)
529
+
530
+ hard_keep = self._hard_nms(coords, scores, self.iou_thres)
531
+ if len(hard_keep) == 0:
532
+ return []
533
+
534
+ # _hard_nms already orders kept indices by descending score.
535
+ hard_keep = hard_keep[: self.max_det]
536
+
537
+ return [
538
+ BoundingBox(
539
+ x1=all_boxes[i].x1,
540
+ y1=all_boxes[i].y1,
541
+ x2=all_boxes[i].x2,
542
+ y2=all_boxes[i].y2,
543
+ cls_id=all_boxes[i].cls_id,
544
+ conf=float(scores[i]),
545
+ )
546
+ for i in hard_keep
547
+ ]
548
+
549
+ def predict_batch(
550
+ self,
551
+ batch_images: list[ndarray],
552
+ offset: int,
553
+ n_keypoints: int,
554
+ ) -> list[TVFrameResult]:
555
+ results: list[TVFrameResult] = []
556
+
557
+ for frame_number_in_batch, image in enumerate(batch_images):
558
+ try:
559
+ if self.use_tta:
560
+ boxes = self._predict_tta(image)
561
+ else:
562
+ boxes = self._predict_single(image)
563
+ except Exception as e:
564
+ print(f"⚠️ Inference failed for frame {offset + frame_number_in_batch}: {e}")
565
+ boxes = []
566
+ # for box in boxes:
567
+ # if box.cls_id == 2:
568
+ # box.cls_id = 3
569
+ # elif box.cls_id == 3:
570
+ # box.cls_id = 2
571
+
572
+
573
+
574
+ results.append(
575
+ TVFrameResult(
576
+ frame_id=offset + frame_number_in_batch,
577
+ boxes=boxes,
578
+ keypoints=[(0, 0) for _ in range(max(0, int(n_keypoints)))],
579
+ )
580
+ )
581
+
582
+ return results
583
+
584
+
585
+ if __name__ == "__main__":
586
+ # Simple manual test: load weights.onnx, run on 1.png, and draw bboxes
587
+ repo_dir = Path(__file__).parent
588
+ miner = Miner(repo_dir)
589
+
590
+ image_path = repo_dir / "car1.png"
591
+ if not image_path.exists():
592
+ raise FileNotFoundError(f"Test image not found: {image_path}")
593
+
594
+ image = cv2.imread(str(image_path), cv2.IMREAD_COLOR)
595
+ if image is None:
596
+ raise RuntimeError(f"Failed to read image: {image_path}")
597
+
598
+ results = miner.predict_batch([image], offset=0, n_keypoints=0)
599
+ # Draw bounding boxes on a copy of the image
600
+ vis = image.copy()
601
+ colors = [(0, 255, 0), (0, 0, 255), (255, 0, 0)]
602
+ for frame in results:
603
+ print(f"Frame {frame.frame_id}:")
604
+ for i, box in enumerate(frame.boxes):
605
+ color = colors[i % len(colors)]
606
+ cv2.rectangle(
607
+ vis,
608
+ (box.x1, box.y1),
609
+ (box.x2, box.y2),
610
+ color,
611
+ 2,
612
+ )
613
+ label = f"{box.cls_id }_{miner.class_names[box.cls_id] if box.cls_id < len(miner.class_names) else box.cls_id}:{box.conf:.2f}"
614
+ cv2.putText(
615
+ vis,
616
+ label,
617
+ (box.x1, max(0, box.y1 - 5)),
618
+ cv2.FONT_HERSHEY_SIMPLEX,
619
+ box.conf,
620
+ color,
621
+ 1,
622
+ cv2.LINE_AA,
623
+ )
624
+ print(
625
+ f" cls={box.cls_id} conf={box.conf:.3f} "
626
+ f"box=({box.x1},{box.y1},{box.x2},{box.y2})"
627
+ )
628
+ print(len(frame.boxes))
629
+
630
+ out_path = repo_dir / f"1_out_iou{miner.iou_thres:.2f}.png"
631
+ cv2.imwrite(str(out_path), vis)
632
+ print(f"Saved visualization to: {out_path}")
weights.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdce24eff7daa6a1413a8fd4647f6b8308bc8d097c6db50ca0cf9664026d9de4
3
+ size 29197728