iotaminer commited on
Commit
79a04e5
·
verified ·
1 Parent(s): f5e841d

v2: YOLOv11m INT8 QDQ (21.5MB) + lowered hose threshold 0.22

Browse files
__pycache__/miner.cpython-312.pyc ADDED
Binary file (15.7 kB). View file
 
chute_config.yml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image:
2
+ from_base: parachutes/python:3.12
3
+ run_command:
4
+ - pip install --upgrade setuptools wheel
5
+ - pip install huggingface_hub==0.19.4 opencv-python-headless numpy pydantic pyyaml aiohttp
6
+ - pip install --index-url https://download.pytorch.org/whl/cu128 torch==2.8.0
7
+ - pip install 'onnxruntime-gpu>=1.20,<1.25'
8
+ set_workdir: /app
9
+
10
+ NodeSelector:
11
+ gpu_count: 1
12
+ min_vram_gb_per_gpu: 16
13
+ exclude:
14
+ - b200
15
+ - h200
16
+ - mi300x
17
+ - b300
18
+
19
+ Chute:
20
+ shutdown_after_seconds: 604800
21
+ concurrency: 4
22
+ max_instances: 1
23
+ scaling_threshold: 0.5
class_names.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ petrol hose
2
+ petrol pump
3
+ price board
4
+ roof canopy
miner.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TurboVision miner for Detect-petrol-station-1-0.
2
+
3
+ YOLOv11m static-INT8 QDQ ONNX (21.5MB) + horizontal-flip TTA.
4
+ 4 classes: 0=petrol hose, 1=petrol pump, 2=price board, 3=roof canopy.
5
+
6
+ Competitive tuning notes:
7
+ - Lower per-class confidence thresholds to capture more petrol hoses (small thin objects
8
+ that our previous 0.43 threshold was filtering out).
9
+ - YOLOv11m body (166 Conv) is more capable than YOLOv11s at detecting small objects.
10
+ - Static QDQ INT8 keeps size <30MB while preserving mAP within a few percent of FP32.
11
+ """
12
+ from __future__ import annotations
13
+
14
+ from pathlib import Path
15
+ from typing import List, Tuple
16
+
17
+ import cv2
18
+ import numpy as np
19
+ import onnxruntime as ort
20
+ from pydantic import BaseModel
21
+
22
+
23
+ class BoundingBox(BaseModel):
24
+ x1: int
25
+ y1: int
26
+ x2: int
27
+ y2: int
28
+ cls_id: int
29
+ conf: float
30
+
31
+
32
+ class TVFrameResult(BaseModel):
33
+ frame_id: int
34
+ boxes: list[BoundingBox]
35
+ keypoints: list[tuple[int, int]]
36
+
37
+
38
+ class Miner:
39
+ IMGSZ = 1280
40
+ CLASS_CONF_THRES = (0.22, 0.35, 0.22, 0.30)
41
+ CONF_THRES = 0.22
42
+ IOU_THRES = 0.45
43
+ NUM_CLASSES = 4
44
+ MIN_BOX_FRAC = 0.003
45
+ USE_TTA = True
46
+ MAX_DETS = 300
47
+
48
+ def __init__(self, path_hf_repo: Path) -> None:
49
+ self.onnx_path = path_hf_repo / 'weights.onnx'
50
+ if not self.onnx_path.exists():
51
+ raise FileNotFoundError(f'Model not found at {self.onnx_path}')
52
+
53
+ import os as _os
54
+ import site as _site
55
+ import glob as _glob
56
+ cuda_lib_dirs: list[str] = []
57
+ for sp in _site.getsitepackages() + [_site.getusersitepackages()]:
58
+ for sub in ('nvidia/cuda_runtime/lib', 'nvidia/cublas/lib', 'nvidia/cudnn/lib',
59
+ 'nvidia/cufft/lib', 'nvidia/cuda_nvrtc/lib', 'nvidia/curand/lib',
60
+ 'nvidia/cusparse/lib', 'nvidia/cusolver/lib', 'nvidia/nvjitlink/lib'):
61
+ p = f'{sp}/{sub}'
62
+ if _glob.glob(f'{p}/*.so*'):
63
+ cuda_lib_dirs.append(p)
64
+ if cuda_lib_dirs:
65
+ existing = _os.environ.get('LD_LIBRARY_PATH', '')
66
+ _os.environ['LD_LIBRARY_PATH'] = ':'.join(cuda_lib_dirs + ([existing] if existing else []))
67
+
68
+ providers: list = []
69
+ try:
70
+ ort.preload_dlls()
71
+ except Exception as _pe:
72
+ print(f'[Miner] preload_dlls failed: {_pe}')
73
+ available = ort.get_available_providers()
74
+ if 'CUDAExecutionProvider' in available:
75
+ providers.append(('CUDAExecutionProvider', {'device_id': 0}))
76
+ providers.append('CPUExecutionProvider')
77
+ so = ort.SessionOptions()
78
+ so.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
79
+ self.session = ort.InferenceSession(str(self.onnx_path), sess_options=so, providers=providers)
80
+ self.input_name = self.session.get_inputs()[0].name
81
+ inp = self.session.get_inputs()[0]
82
+ self.input_shape = inp.shape
83
+ self.input_dtype = np.float16 if inp.type == 'tensor(float16)' else np.float32
84
+ self.active_providers = self.session.get_providers()
85
+ print(f'[Miner] Loaded {self.onnx_path.name} | providers={self.active_providers} | dtype={self.input_dtype}')
86
+ print(f'[Miner] Thresholds: CLASS_CONF={self.CLASS_CONF_THRES}, TTA={self.USE_TTA}')
87
+
88
+ def __repr__(self) -> str:
89
+ return f'PetrolMiner(yolo11m-qdq-int8, tta={self.USE_TTA}, conf={self.CONF_THRES}, providers={getattr(self, "active_providers", "?")})'
90
+
91
+ @staticmethod
92
+ def _letterbox(img, new_size=1280, color=(114, 114, 114)):
93
+ h, w = img.shape[:2]
94
+ r = min(new_size / h, new_size / w)
95
+ nh, nw = int(round(h * r)), int(round(w * r))
96
+ resized = cv2.resize(img, (nw, nh), interpolation=cv2.INTER_LINEAR)
97
+ top = (new_size - nh) // 2
98
+ bottom = new_size - nh - top
99
+ left = (new_size - nw) // 2
100
+ right = new_size - nw - left
101
+ padded = cv2.copyMakeBorder(resized, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
102
+ return padded, r, (left, top)
103
+
104
+ def _preprocess(self, img):
105
+ h, w = img.shape[:2]
106
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
107
+ padded, r, (lx, ty) = self._letterbox(img_rgb, self.IMGSZ)
108
+ x = padded.astype(self.input_dtype) / 255.0
109
+ x = x.transpose(2, 0, 1)[None, ...]
110
+ return np.ascontiguousarray(x), r, (lx, ty), (w, h)
111
+
112
+ def _run_onnx(self, img):
113
+ x, r, (lx, ty), (W, H) = self._preprocess(img)
114
+ outputs = self.session.run(None, {self.input_name: x})
115
+ det = outputs[0]
116
+ if det.ndim == 3:
117
+ det = det[0]
118
+ if det.size == 0:
119
+ return np.empty((0, 4)), np.empty((0,)), np.empty((0,), dtype=int), W, H
120
+ det = np.asarray(det, dtype=np.float32)
121
+ if det.shape[-1] < 6:
122
+ return np.empty((0, 4)), np.empty((0,)), np.empty((0,), dtype=int), W, H
123
+ xyxy = det[:, :4].copy()
124
+ conf = det[:, 4].copy()
125
+ cls_id = det[:, 5].astype(int)
126
+ keep = conf >= self.CONF_THRES
127
+ xyxy, conf, cls_id = xyxy[keep], conf[keep], cls_id[keep]
128
+ if len(xyxy) == 0:
129
+ return np.empty((0, 4)), np.empty((0,)), np.empty((0,), dtype=int), W, H
130
+ xyxy[:, [0, 2]] = (xyxy[:, [0, 2]] - lx) / r
131
+ xyxy[:, [1, 3]] = (xyxy[:, [1, 3]] - ty) / r
132
+ xyxy[:, 0::2] = np.clip(xyxy[:, 0::2], 0, W - 1)
133
+ xyxy[:, 1::2] = np.clip(xyxy[:, 1::2], 0, H - 1)
134
+ min_side = self.MIN_BOX_FRAC * min(W, H)
135
+ mask = (
136
+ (cls_id >= 0) & (cls_id < self.NUM_CLASSES)
137
+ & ((xyxy[:, 2] - xyxy[:, 0]) >= min_side)
138
+ & ((xyxy[:, 3] - xyxy[:, 1]) >= min_side)
139
+ )
140
+ return xyxy[mask], conf[mask], cls_id[mask], W, H
141
+
142
+ @staticmethod
143
+ def _hard_nms_per_class(xyxy, conf, cls_id, iou_thres=0.5, max_per_class=100):
144
+ if len(xyxy) == 0:
145
+ return np.empty((0,), dtype=int)
146
+ keep = []
147
+ for c in np.unique(cls_id):
148
+ idx = np.where(cls_id == c)[0]
149
+ b = xyxy[idx]
150
+ s = conf[idx]
151
+ order = np.argsort(-s)
152
+ b = b[order]; s = s[order]; idx = idx[order]
153
+ areas = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
154
+ suppressed = np.zeros(len(b), dtype=bool)
155
+ for i in range(len(b)):
156
+ if suppressed[i]:
157
+ continue
158
+ keep.append(idx[i])
159
+ if len([k for k in keep if cls_id[k] == c]) >= max_per_class:
160
+ break
161
+ xx1 = np.maximum(b[i, 0], b[i+1:, 0])
162
+ yy1 = np.maximum(b[i, 1], b[i+1:, 1])
163
+ xx2 = np.minimum(b[i, 2], b[i+1:, 2])
164
+ yy2 = np.minimum(b[i, 3], b[i+1:, 3])
165
+ inter = np.maximum(0, xx2 - xx1) * np.maximum(0, yy2 - yy1)
166
+ iou = inter / (areas[i] + areas[i+1:] - inter + 1e-9)
167
+ suppressed[i+1:][iou > iou_thres] = True
168
+ return np.array(keep, dtype=int)
169
+
170
+ def _predict_single(self, img):
171
+ xyxy1, conf1, cls1, W, H = self._run_onnx(img)
172
+ if not self.USE_TTA:
173
+ xyxy, conf, cls_id = xyxy1, conf1, cls1
174
+ else:
175
+ img_f = cv2.flip(img, 1)
176
+ xyxy2, conf2, cls2, _, _ = self._run_onnx(img_f)
177
+ if len(xyxy2) > 0:
178
+ tmp = xyxy2.copy()
179
+ tmp[:, 0] = W - xyxy2[:, 2]
180
+ tmp[:, 2] = W - xyxy2[:, 0]
181
+ xyxy2 = tmp
182
+ pieces_xyxy = [a for a in (xyxy1, xyxy2) if len(a) > 0]
183
+ pieces_conf = [a for a in (conf1, conf2) if len(a) > 0]
184
+ pieces_cls = [a for a in (cls1, cls2) if len(a) > 0]
185
+ xyxy = np.vstack(pieces_xyxy) if pieces_xyxy else np.empty((0, 4))
186
+ conf = np.concatenate(pieces_conf) if pieces_conf else np.empty((0,))
187
+ cls_id = np.concatenate(pieces_cls) if pieces_cls else np.empty((0,), dtype=int)
188
+ if len(xyxy) > 0:
189
+ keep = self._hard_nms_per_class(xyxy, conf, cls_id, iou_thres=self.IOU_THRES)
190
+ xyxy, conf, cls_id = xyxy[keep], conf[keep], cls_id[keep]
191
+ boxes = []
192
+ order = np.argsort(-conf) if len(conf) else np.empty((0,), dtype=int)
193
+ for i in order[:self.MAX_DETS]:
194
+ ci = int(cls_id[i])
195
+ if 0 <= ci < self.NUM_CLASSES and float(conf[i]) < self.CLASS_CONF_THRES[ci]:
196
+ continue
197
+ boxes.append(BoundingBox(
198
+ x1=int(round(float(xyxy[i, 0]))),
199
+ y1=int(round(float(xyxy[i, 1]))),
200
+ x2=int(round(float(xyxy[i, 2]))),
201
+ y2=int(round(float(xyxy[i, 3]))),
202
+ cls_id=ci,
203
+ conf=float(conf[i]),
204
+ ))
205
+ return boxes
206
+
207
+ def predict_batch(self, batch_images, offset, n_keypoints):
208
+ results = []
209
+ for i, img in enumerate(batch_images):
210
+ try:
211
+ boxes = self._predict_single(img)
212
+ except Exception as e:
213
+ print(f'[Miner] predict error on frame {offset + i}: {e}')
214
+ boxes = []
215
+ kps = [(0, 0) for _ in range(n_keypoints)]
216
+ results.append(TVFrameResult(frame_id=offset + i, boxes=boxes, keypoints=kps))
217
+ return results
weights.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66c160d43617435be25490d4709d696e05d9d395a7c7271590f381e5568ca42e
3
+ size 21506995