fitleech commited on
Commit
be0e105
·
verified ·
1 Parent(s): 8277986

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +3 -0
  2. chute_config.yml +25 -0
  3. miner.py +470 -0
  4. weights.onnx +3 -0
README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: mit
3
+ ---
chute_config.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image:
2
+ from_base: parachutes/python:3.12
3
+ run_command:
4
+ - pip install --upgrade setuptools wheel
5
+ - pip install 'numpy>=1.23' 'onnxruntime-gpu[cuda,cudnn]>=1.16' 'opencv-python>=4.7' 'pillow>=9.5' 'huggingface_hub>=0.19.4' 'pydantic>=2.0' 'pyyaml>=6.0' 'aiohttp>=3.9'
6
+ - pip install torch torchvision
7
+ set_workdir: /app
8
+
9
+ NodeSelector:
10
+ gpu_count: 1
11
+ min_vram_gb_per_gpu: 16
12
+ max_hourly_price_per_gpu: 1.0
13
+ exclude:
14
+ - "5090"
15
+ - b200
16
+ - h200
17
+ - h100
18
+ - mi300x
19
+
20
+ Chute:
21
+ timeout_seconds: 900
22
+ concurrency: 4
23
+ max_instances: 5
24
+ scaling_threshold: 0.5
25
+ shutdown_after_seconds: 288000
miner.py ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import math
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import onnxruntime as ort
7
+ from numpy import ndarray
8
+ from pydantic import BaseModel
9
+
10
+
11
+ class BoundingBox(BaseModel):
12
+ x1: int
13
+ y1: int
14
+ x2: int
15
+ y2: int
16
+ cls_id: int
17
+ conf: float
18
+
19
+
20
+ class TVFrameResult(BaseModel):
21
+ frame_id: int
22
+ boxes: list[BoundingBox]
23
+ keypoints: list[tuple[int, int]]
24
+
25
+
26
+ class Miner:
27
+ """
28
+ ONNX-backed version of the petrol-tracking miner.
29
+
30
+ This class is responsible for:
31
+ - Loading the ONNX model via onnxruntime.
32
+ - Running predictions on images.
33
+ - Parsing ONNX outputs into structured results (TVFrameResult).
34
+
35
+ It must have the following to be compatible with the chute:
36
+ - be named `Miner`
37
+ - have a `predict_batch` function with the inputs and outputs specified
38
+ - be stored in a file called `miner.py` which lives in the root of the
39
+ HFHub repo (rename/copy this file to `miner.py` before deploying)
40
+ """
41
+
42
+ def __init__(self, path_hf_repo: Path) -> None:
43
+ model_path = path_hf_repo / "weights.onnx"
44
+
45
+ # Class order as exported from the training pt: must match model.names
46
+ self.class_names = ["petrol hose", "petrol pump", "price board", "roof canopy"]
47
+
48
+ print("ORT version:", ort.__version__)
49
+
50
+ try:
51
+ ort.preload_dlls()
52
+ print("✅ onnxruntime.preload_dlls() success")
53
+ except Exception as e:
54
+ print(f"⚠️ preload_dlls failed: {e}")
55
+
56
+ print("ORT available providers BEFORE session:", ort.get_available_providers())
57
+
58
+ sess_options = ort.SessionOptions()
59
+ sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
60
+
61
+ try:
62
+ self.session = ort.InferenceSession(
63
+ str(model_path),
64
+ sess_options=sess_options,
65
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
66
+ )
67
+ print("✅ Created ORT session with preferred CUDA provider list")
68
+ except Exception as e:
69
+ print(f"⚠️ CUDA session creation failed, falling back to CPU: {e}")
70
+ self.session = ort.InferenceSession(
71
+ str(model_path),
72
+ sess_options=sess_options,
73
+ providers=["CPUExecutionProvider"],
74
+ )
75
+
76
+ print("ORT session providers:", self.session.get_providers())
77
+
78
+ for inp in self.session.get_inputs():
79
+ print("INPUT:", inp.name, inp.shape, inp.type)
80
+
81
+ for out in self.session.get_outputs():
82
+ print("OUTPUT:", out.name, out.shape, out.type)
83
+
84
+ self.input_name = self.session.get_inputs()[0].name
85
+ self.output_names = [output.name for output in self.session.get_outputs()]
86
+ self.input_shape = self.session.get_inputs()[0].shape
87
+
88
+ self.input_height = self._safe_dim(self.input_shape[2], default=640)
89
+ self.input_width = self._safe_dim(self.input_shape[3], default=640)
90
+
91
+ # Thresholds
92
+ self.conf_thres = 0.4
93
+ self.iou_thres = 0.50
94
+ self.max_det = 300
95
+
96
+ print(f"✅ Petrol ONNX model loaded from: {model_path}")
97
+ print(f"✅ ONNX providers: {self.session.get_providers()}")
98
+ print(f"✅ ONNX input: name={self.input_name}, shape={self.input_shape}")
99
+
100
+ def __repr__(self) -> str:
101
+ return (
102
+ f"Petrol ONNXRuntime(session={type(self.session).__name__}, "
103
+ f"providers={self.session.get_providers()})"
104
+ )
105
+
106
+ @staticmethod
107
+ def _safe_dim(value, default: int) -> int:
108
+ return value if isinstance(value, int) and value > 0 else default
109
+
110
+ def _letterbox(
111
+ self,
112
+ image: ndarray,
113
+ new_shape: tuple[int, int],
114
+ color=(114, 114, 114),
115
+ ) -> tuple[ndarray, float, tuple[float, float]]:
116
+ h, w = image.shape[:2]
117
+ new_w, new_h = new_shape
118
+
119
+ ratio = min(new_w / w, new_h / h)
120
+ resized_w = int(round(w * ratio))
121
+ resized_h = int(round(h * ratio))
122
+
123
+ if (resized_w, resized_h) != (w, h):
124
+ interp = cv2.INTER_CUBIC if ratio > 1.0 else cv2.INTER_LINEAR
125
+ image = cv2.resize(image, (resized_w, resized_h), interpolation=interp)
126
+
127
+ dw = new_w - resized_w
128
+ dh = new_h - resized_h
129
+ dw /= 2.0
130
+ dh /= 2.0
131
+
132
+ left = int(round(dw - 0.1))
133
+ right = int(round(dw + 0.1))
134
+ top = int(round(dh - 0.1))
135
+ bottom = int(round(dh + 0.1))
136
+
137
+ padded = cv2.copyMakeBorder(
138
+ image,
139
+ top,
140
+ bottom,
141
+ left,
142
+ right,
143
+ borderType=cv2.BORDER_CONSTANT,
144
+ value=color,
145
+ )
146
+ return padded, ratio, (dw, dh)
147
+
148
+ def _preprocess(
149
+ self, image: ndarray
150
+ ) -> tuple[np.ndarray, float, tuple[float, float], tuple[int, int]]:
151
+ orig_h, orig_w = image.shape[:2]
152
+
153
+ img, ratio, pad = self._letterbox(
154
+ image, (self.input_width, self.input_height)
155
+ )
156
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
157
+ img = img.astype(np.float32) / 255.0
158
+ img = np.transpose(img, (2, 0, 1))[None, ...]
159
+ img = np.ascontiguousarray(img, dtype=np.float32)
160
+
161
+ return img, ratio, pad, (orig_w, orig_h)
162
+
163
+ @staticmethod
164
+ def _clip_boxes(boxes: np.ndarray, image_size: tuple[int, int]) -> np.ndarray:
165
+ w, h = image_size
166
+ boxes[:, 0] = np.clip(boxes[:, 0], 0, w - 1)
167
+ boxes[:, 1] = np.clip(boxes[:, 1], 0, h - 1)
168
+ boxes[:, 2] = np.clip(boxes[:, 2], 0, w - 1)
169
+ boxes[:, 3] = np.clip(boxes[:, 3], 0, h - 1)
170
+ return boxes
171
+
172
+ @staticmethod
173
+ def _xywh_to_xyxy(boxes: np.ndarray) -> np.ndarray:
174
+ out = np.empty_like(boxes)
175
+ out[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
176
+ out[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
177
+ out[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
178
+ out[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
179
+ return out
180
+
181
+ @staticmethod
182
+ def _hard_nms(
183
+ boxes: np.ndarray,
184
+ scores: np.ndarray,
185
+ iou_thresh: float,
186
+ ) -> np.ndarray:
187
+ if len(boxes) == 0:
188
+ return np.array([], dtype=np.intp)
189
+
190
+ boxes = np.asarray(boxes, dtype=np.float32)
191
+ scores = np.asarray(scores, dtype=np.float32)
192
+ order = np.argsort(scores)[::-1]
193
+ keep = []
194
+
195
+ while len(order) > 0:
196
+ i = order[0]
197
+ keep.append(i)
198
+ if len(order) == 1:
199
+ break
200
+
201
+ rest = order[1:]
202
+
203
+ xx1 = np.maximum(boxes[i, 0], boxes[rest, 0])
204
+ yy1 = np.maximum(boxes[i, 1], boxes[rest, 1])
205
+ xx2 = np.minimum(boxes[i, 2], boxes[rest, 2])
206
+ yy2 = np.minimum(boxes[i, 3], boxes[rest, 3])
207
+
208
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
209
+
210
+ area_i = max(0.0, (boxes[i, 2] - boxes[i, 0])) * max(0.0, (boxes[i, 3] - boxes[i, 1]))
211
+ area_r = np.maximum(0.0, boxes[rest, 2] - boxes[rest, 0]) * np.maximum(0.0, boxes[rest, 3] - boxes[rest, 1])
212
+
213
+ iou = inter / (area_i + area_r - inter + 1e-7)
214
+ order = rest[iou <= iou_thresh]
215
+
216
+ return np.array(keep, dtype=np.intp)
217
+
218
+ @classmethod
219
+ def _nms_per_class(
220
+ cls,
221
+ boxes: np.ndarray,
222
+ scores: np.ndarray,
223
+ cls_ids: np.ndarray,
224
+ iou_thresh: float,
225
+ max_det: int,
226
+ ) -> np.ndarray:
227
+ if len(boxes) == 0:
228
+ return np.array([], dtype=np.intp)
229
+ keep_all: list[int] = []
230
+ for c in np.unique(cls_ids):
231
+ idxs = np.nonzero(cls_ids == c)[0]
232
+ if len(idxs) == 0:
233
+ continue
234
+ local_keep = cls._hard_nms(boxes[idxs], scores[idxs], iou_thresh)
235
+ keep_all.extend(idxs[local_keep].tolist())
236
+ keep_all_arr = np.array(keep_all, dtype=np.intp)
237
+ order = np.argsort(scores[keep_all_arr])[::-1]
238
+ return keep_all_arr[order[:max_det]]
239
+
240
+ def _decode_yolov8(
241
+ self,
242
+ preds: np.ndarray,
243
+ ratio: float,
244
+ pad: tuple[float, float],
245
+ orig_size: tuple[int, int],
246
+ ) -> list[BoundingBox]:
247
+ """
248
+ Decode a raw YOLOv8-style ONNX detection output.
249
+
250
+ Expected shape: [1, 4 + nc, num_boxes] (no objectness channel).
251
+ Some exporters emit [1, num_boxes, 4 + nc]; both are handled.
252
+ """
253
+ if preds.ndim != 3 or preds.shape[0] != 1:
254
+ raise ValueError(f"Unexpected ONNX output shape: {preds.shape}")
255
+
256
+ preds = preds[0]
257
+
258
+ # Normalize to [N, C] where C = 4 + nc
259
+ nc = len(self.class_names)
260
+ expected_c = 4 + nc
261
+ if preds.shape[0] == expected_c:
262
+ preds = preds.T
263
+ elif preds.shape[1] != expected_c:
264
+ # Fall back: treat smaller dim as channels
265
+ if preds.shape[0] < preds.shape[1]:
266
+ preds = preds.T
267
+
268
+ if preds.ndim != 2 or preds.shape[1] < 5:
269
+ raise ValueError(f"Unexpected normalized output shape: {preds.shape}")
270
+
271
+ boxes_xywh = preds[:, :4].astype(np.float32)
272
+ class_probs = preds[:, 4:].astype(np.float32)
273
+
274
+ cls_ids = np.argmax(class_probs, axis=1).astype(np.int32)
275
+ scores = class_probs[np.arange(len(class_probs)), cls_ids]
276
+
277
+ keep = scores >= self.conf_thres
278
+ boxes_xywh = boxes_xywh[keep]
279
+ scores = scores[keep]
280
+ cls_ids = cls_ids[keep]
281
+
282
+ if len(boxes_xywh) == 0:
283
+ return []
284
+
285
+ boxes = self._xywh_to_xyxy(boxes_xywh)
286
+
287
+ pad_w, pad_h = pad
288
+ orig_w, orig_h = orig_size
289
+
290
+ boxes[:, [0, 2]] -= pad_w
291
+ boxes[:, [1, 3]] -= pad_h
292
+ boxes /= ratio
293
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
294
+
295
+ keep_idx = self._nms_per_class(
296
+ boxes, scores, cls_ids, self.iou_thres, self.max_det
297
+ )
298
+
299
+ boxes = boxes[keep_idx]
300
+ scores = scores[keep_idx]
301
+ cls_ids = cls_ids[keep_idx]
302
+
303
+ return [
304
+ BoundingBox(
305
+ x1=int(math.floor(box[0])),
306
+ y1=int(math.floor(box[1])),
307
+ x2=int(math.ceil(box[2])),
308
+ y2=int(math.ceil(box[3])),
309
+ cls_id=int(cls_id),
310
+ conf=float(conf),
311
+ )
312
+ for box, conf, cls_id in zip(boxes, scores, cls_ids)
313
+ if box[2] > box[0] and box[3] > box[1]
314
+ ]
315
+
316
+ def _predict_single(self, image: np.ndarray) -> list[BoundingBox]:
317
+ if image is None:
318
+ raise ValueError("Input image is None")
319
+ if not isinstance(image, np.ndarray):
320
+ raise TypeError(f"Input is not numpy array: {type(image)}")
321
+ if image.ndim != 3:
322
+ raise ValueError(f"Expected HWC image, got shape={image.shape}")
323
+ if image.shape[0] <= 0 or image.shape[1] <= 0:
324
+ raise ValueError(f"Invalid image shape={image.shape}")
325
+ if image.shape[2] != 3:
326
+ raise ValueError(f"Expected 3 channels, got shape={image.shape}")
327
+
328
+ if image.dtype != np.uint8:
329
+ image = image.astype(np.uint8)
330
+
331
+ input_tensor, ratio, pad, orig_size = self._preprocess(image)
332
+
333
+ expected_shape = (1, 3, self.input_height, self.input_width)
334
+ if input_tensor.shape != expected_shape:
335
+ raise ValueError(
336
+ f"Bad input tensor shape={input_tensor.shape}, expected={expected_shape}"
337
+ )
338
+
339
+ outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
340
+ det_output = outputs[0]
341
+ return self._decode_yolov8(det_output, ratio, pad, orig_size)
342
+
343
+ def predict_batch(
344
+ self,
345
+ batch_images: list[ndarray],
346
+ offset: int,
347
+ n_keypoints: int,
348
+ ) -> list[TVFrameResult]:
349
+ """
350
+ Miner prediction for a batch of images using ONNX Runtime.
351
+
352
+ The petrol detector is a plain object-detection model (no pose),
353
+ so keypoints are returned as `n_keypoints` padding entries of (0, 0)
354
+ to keep the TVFrameResult schema stable across challenge types.
355
+ """
356
+ results: list[TVFrameResult] = []
357
+ n_kp = max(0, int(n_keypoints))
358
+
359
+ for frame_number_in_batch, image in enumerate(batch_images):
360
+ frame_idx = offset + frame_number_in_batch
361
+ try:
362
+ boxes = self._predict_single(image)
363
+ except Exception as e:
364
+ print(f"⚠️ Inference failed for frame {frame_idx}: {e}")
365
+ boxes = []
366
+
367
+ results.append(
368
+ TVFrameResult(
369
+ frame_id=frame_idx,
370
+ boxes=boxes,
371
+ keypoints=[(0, 0) for _ in range(n_kp)],
372
+ )
373
+ )
374
+
375
+ print("✅ Petrol ONNX predictions complete")
376
+ return results
377
+
378
+
379
+ def main() -> None:
380
+ """
381
+ Example runner for the ONNX Miner class.
382
+
383
+ Loads `weights.onnx` from the current directory and runs `predict_batch`
384
+ on one or more image files.
385
+
386
+ Usage:
387
+ python miner_onnx.py # dummy blank image
388
+ python miner_onnx.py image1.jpg # single image
389
+ python miner_onnx.py image1.jpg image2.jpg # batch of images
390
+ """
391
+ import sys
392
+
393
+ import numpy as np
394
+
395
+ repo_path = Path(__file__).parent
396
+ print(f"Loading miner from: {repo_path}")
397
+ miner = Miner(path_hf_repo=repo_path)
398
+ print(repr(miner))
399
+
400
+ batch_images: list[np.ndarray] = []
401
+
402
+ if len(sys.argv) > 1:
403
+ for image_path in sys.argv[1:]:
404
+ image = cv2.imread(image_path)
405
+ if image is None:
406
+ raise ValueError(f"Cannot read image: {image_path}")
407
+ batch_images.append(image)
408
+ print(f"Loaded {len(batch_images)} image(s)")
409
+ else:
410
+ batch_images = [np.zeros((640, 640, 3), dtype=np.uint8)]
411
+ print("No image provided — running on a single blank dummy frame")
412
+
413
+ results = miner.predict_batch(
414
+ batch_images=batch_images,
415
+ offset=0,
416
+ n_keypoints=32,
417
+ )
418
+
419
+ output_dir = repo_path / "predictions"
420
+ output_dir.mkdir(exist_ok=True)
421
+
422
+ class_names = {i: n for i, n in enumerate(miner.class_names)}
423
+
424
+ def color_for_class(cls_id: int) -> tuple[int, int, int]:
425
+ hue = (cls_id * 47) % 180
426
+ hsv = np.uint8([[[hue, 220, 255]]])
427
+ bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)[0, 0]
428
+ return int(bgr[0]), int(bgr[1]), int(bgr[2])
429
+
430
+ for image, r in zip(batch_images, results):
431
+ print(
432
+ f"frame={r.frame_id} "
433
+ f"boxes={len(r.boxes)} "
434
+ f"keypoints={len(r.keypoints)}"
435
+ )
436
+
437
+ vis = image.copy()
438
+ for box in r.boxes:
439
+ name = class_names.get(box.cls_id, str(box.cls_id))
440
+ color = color_for_class(box.cls_id)
441
+ print(
442
+ f" box cls={box.cls_id}({name}) conf={box.conf:.2f} "
443
+ f"[{box.x1},{box.y1},{box.x2},{box.y2}]"
444
+ )
445
+ cv2.rectangle(vis, (box.x1, box.y1), (box.x2, box.y2), color, 2)
446
+ label = f"{name} {box.conf:.2f}"
447
+ (tw, th), baseline = cv2.getTextSize(
448
+ label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1
449
+ )
450
+ top = max(box.y1 - th - baseline, 0)
451
+ cv2.rectangle(
452
+ vis, (box.x1, top), (box.x1 + tw, top + th + baseline), color, -1
453
+ )
454
+ cv2.putText(
455
+ vis, label, (box.x1, top + th),
456
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA,
457
+ )
458
+
459
+ for x, y in r.keypoints:
460
+ if x == 0 and y == 0:
461
+ continue
462
+ cv2.circle(vis, (x, y), 3, (0, 0, 255), -1)
463
+
464
+ out_path = output_dir / f"frame_{r.frame_id:04d}.jpg"
465
+ cv2.imwrite(str(out_path), vis)
466
+ print(f" saved: {out_path}")
467
+
468
+
469
+ if __name__ == "__main__":
470
+ main()
weights.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6c8059cd1f69fd0ed8860fec08b24656d6ed177c6adf76dee85c14cd571cf7a
3
+ size 22664875