MTerryJack commited on
Commit
24479fa
·
verified ·
1 Parent(s): e328df0

subnet_bridge: add miner runtime files

Browse files
Files changed (2) hide show
  1. chute_config.yml +15 -0
  2. miner.py +175 -0
chute_config.yml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image:
2
+ from_base: parachutes/python:3.12
3
+ run_command:
4
+ - pip install --upgrade setuptools wheel
5
+ - pip install 'numpy>=1.23' 'onnxruntime>=1.16' 'opencv-python>=4.7' 'pillow>=9.5' 'pydantic>=2.0'
6
+
7
+ NodeSelector:
8
+ gpu_count: 1
9
+ min_vram_gb_per_gpu: 16
10
+
11
+ Chute:
12
+ timeout_seconds: 300
13
+ concurrency: 4
14
+ max_instances: 5
15
+ scaling_threshold: 0.5
miner.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import math
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import onnxruntime as ort
7
+ from numpy import ndarray
8
+ from pydantic import BaseModel
9
+
10
+
11
+ class BoundingBox(BaseModel):
12
+ x1: int
13
+ y1: int
14
+ x2: int
15
+ y2: int
16
+ cls_id: int
17
+ conf: float
18
+
19
+
20
+ class TVFrameResult(BaseModel):
21
+ frame_id: int
22
+ boxes: list[BoundingBox]
23
+ keypoints: list[tuple[int, int]]
24
+
25
+
26
+ class Miner:
27
+ """
28
+ Auto-generated by subnet_bridge from a Manako element repo.
29
+ This miner is intentionally self-contained for chute import restrictions.
30
+ """
31
+
32
+ def __init__(self, path_hf_repo: Path) -> None:
33
+ self.path_hf_repo = path_hf_repo
34
+ self.class_names = ['bus', 'car', 'motorcycle', 'truck', 'van']
35
+ self.session = ort.InferenceSession(
36
+ str(path_hf_repo / "weights.onnx"),
37
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
38
+ )
39
+ self.input_name = self.session.get_inputs()[0].name
40
+ input_shape = self.session.get_inputs()[0].shape
41
+ # expected [N, C, H, W]
42
+ self.input_h = int(input_shape[2])
43
+ self.input_w = int(input_shape[3])
44
+ self.conf_threshold = 0.25
45
+ self.iou_threshold = 0.45
46
+
47
+ def __repr__(self) -> str:
48
+ return f"ONNX Miner session={type(self.session).__name__} classes={len(self.class_names)}"
49
+
50
+ def _preprocess(self, image_bgr: ndarray) -> tuple[np.ndarray, tuple[int, int]]:
51
+ h, w = image_bgr.shape[:2]
52
+ rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
53
+ resized = cv2.resize(rgb, (self.input_w, self.input_h))
54
+ x = resized.astype(np.float32) / 255.0
55
+ x = np.transpose(x, (2, 0, 1))[None, ...]
56
+ return x, (h, w)
57
+
58
+ def _normalize_predictions(self, raw: np.ndarray) -> np.ndarray:
59
+ # Common ultralytics export shapes:
60
+ # - [1, C, N] where C=4+num_classes
61
+ # - [1, N, C]
62
+ pred = raw[0]
63
+ if pred.ndim != 2:
64
+ raise ValueError(f"Unexpected prediction shape: {raw.shape}")
65
+ if pred.shape[0] < pred.shape[1]:
66
+ pred = pred.transpose(1, 0)
67
+ return pred
68
+
69
+ def _nms(self, dets: list[tuple[float, float, float, float, float, int]]) -> list[tuple[float, float, float, float, float, int]]:
70
+ if not dets:
71
+ return []
72
+
73
+ boxes = np.array([[d[0], d[1], d[2], d[3]] for d in dets], dtype=np.float32)
74
+ scores = np.array([d[4] for d in dets], dtype=np.float32)
75
+ order = scores.argsort()[::-1]
76
+ keep = []
77
+
78
+ while order.size > 0:
79
+ i = order[0]
80
+ keep.append(i)
81
+
82
+ xx1 = np.maximum(boxes[i, 0], boxes[order[1:], 0])
83
+ yy1 = np.maximum(boxes[i, 1], boxes[order[1:], 1])
84
+ xx2 = np.minimum(boxes[i, 2], boxes[order[1:], 2])
85
+ yy2 = np.minimum(boxes[i, 3], boxes[order[1:], 3])
86
+
87
+ w = np.maximum(0.0, xx2 - xx1)
88
+ h = np.maximum(0.0, yy2 - yy1)
89
+ inter = w * h
90
+
91
+ area_i = (boxes[i, 2] - boxes[i, 0]) * (boxes[i, 3] - boxes[i, 1])
92
+ area_rest = (boxes[order[1:], 2] - boxes[order[1:], 0]) * (boxes[order[1:], 3] - boxes[order[1:], 1])
93
+ union = np.maximum(area_i + area_rest - inter, 1e-6)
94
+ iou = inter / union
95
+
96
+ remaining = np.where(iou <= self.iou_threshold)[0]
97
+ order = order[remaining + 1]
98
+
99
+ return [dets[idx] for idx in keep]
100
+
101
+ def _infer_single(self, image_bgr: ndarray) -> list[BoundingBox]:
102
+ inp, (orig_h, orig_w) = self._preprocess(image_bgr)
103
+ out = self.session.run(None, {self.input_name: inp})[0]
104
+ pred = self._normalize_predictions(out)
105
+
106
+ if pred.shape[1] < 5:
107
+ return []
108
+
109
+ boxes = pred[:, :4]
110
+ cls_scores = pred[:, 4:]
111
+
112
+ if cls_scores.shape[1] == 0:
113
+ return []
114
+
115
+ cls_ids = np.argmax(cls_scores, axis=1)
116
+ confs = np.max(cls_scores, axis=1)
117
+ keep = confs >= self.conf_threshold
118
+
119
+ boxes = boxes[keep]
120
+ confs = confs[keep]
121
+ cls_ids = cls_ids[keep]
122
+
123
+ if boxes.shape[0] == 0:
124
+ return []
125
+
126
+ sx = orig_w / float(self.input_w)
127
+ sy = orig_h / float(self.input_h)
128
+
129
+ dets: list[tuple[float, float, float, float, float, int]] = []
130
+ for i in range(boxes.shape[0]):
131
+ cx, cy, bw, bh = boxes[i].tolist()
132
+ x1 = (cx - bw / 2.0) * sx
133
+ y1 = (cy - bh / 2.0) * sy
134
+ x2 = (cx + bw / 2.0) * sx
135
+ y2 = (cy + bh / 2.0) * sy
136
+ dets.append((x1, y1, x2, y2, float(confs[i]), int(cls_ids[i])))
137
+
138
+ dets = self._nms(dets)
139
+
140
+ out_boxes: list[BoundingBox] = []
141
+ for x1, y1, x2, y2, conf, cls_id in dets:
142
+ ix1 = max(0, min(orig_w, math.floor(x1)))
143
+ iy1 = max(0, min(orig_h, math.floor(y1)))
144
+ ix2 = max(0, min(orig_w, math.ceil(x2)))
145
+ iy2 = max(0, min(orig_h, math.ceil(y2)))
146
+ out_boxes.append(
147
+ BoundingBox(
148
+ x1=ix1,
149
+ y1=iy1,
150
+ x2=ix2,
151
+ y2=iy2,
152
+ cls_id=cls_id,
153
+ conf=max(0.0, min(1.0, conf)),
154
+ )
155
+ )
156
+ return out_boxes
157
+
158
+ def predict_batch(
159
+ self,
160
+ batch_images: list[ndarray],
161
+ offset: int,
162
+ n_keypoints: int,
163
+ ) -> list[TVFrameResult]:
164
+ results: list[TVFrameResult] = []
165
+ for idx, image in enumerate(batch_images):
166
+ boxes = self._infer_single(image)
167
+ keypoints = [(0, 0) for _ in range(max(0, int(n_keypoints)))]
168
+ results.append(
169
+ TVFrameResult(
170
+ frame_id=offset + idx,
171
+ boxes=boxes,
172
+ keypoints=keypoints,
173
+ )
174
+ )
175
+ return results