meaculpitt commited on
Commit
6bf6441
Β·
verified Β·
1 Parent(s): cac815a

scorevision: push artifact

Browse files
Files changed (1) hide show
  1. miner.py +155 -317
miner.py CHANGED
@@ -1,339 +1,177 @@
1
  """
2
- Score Vision SN44 β€” VehicleDetect miner endpoint.
3
 
4
- Class mapping (output indices):
5
- 0 = bus (COCO class 5)
6
- 1 = car (COCO class 2)
7
- 2 = truck (COCO class 7)
8
- 3 = motorcycle (COCO class 3)
9
 
10
- Accepts: base64-encoded image or raw image bytes via chutes cord.
11
- Returns: list of {bbox: [x1,y1,x2,y2], score: float, class_id: int, class_name: str}
12
-
13
- CUDA fix: onnxruntime-gpu finds cuDNN via ldconfig (registered during image build),
14
- with ctypes preload as belt-and-suspenders fallback.
15
  """
16
 
17
- from __future__ import annotations
18
-
19
- import base64
20
- import io
21
- import os
22
- import time
23
  from pathlib import Path
24
- from typing import Any
25
 
26
- import ctypes
27
  import cv2
28
  import numpy as np
29
- from PIL import Image
30
-
31
- # ── cuDNN preload (belt-and-suspenders fallback) ──────────────────────────────
32
- # Primary fix is ldconfig at image build time (see Image builder below).
33
- # This ctypes preload catches any edge cases where ld.so.cache isn't used.
34
- def _preload_cuda_libs() -> None:
35
- _NVIDIA = "/usr/local/lib/python3.12/dist-packages/nvidia"
36
- _LIBS = [
37
- "/usr/lib/x86_64-linux-gnu/libcuda.so.1", # driver stub β€” must be first
38
- f"{_NVIDIA}/cublas/lib/libcublasLt.so.12",
39
- f"{_NVIDIA}/cublas/lib/libcublas.so.12",
40
- f"{_NVIDIA}/cudnn/lib/libcudnn.so.9",
41
- ]
42
- for path in _LIBS:
43
- if os.path.exists(path):
44
- try:
45
- ctypes.CDLL(path, mode=ctypes.RTLD_GLOBAL)
46
- except OSError:
47
- pass
48
 
49
- _preload_cuda_libs()
50
 
51
- import onnxruntime as ort # noqa: E402 β€” must come after preload
52
-
53
- # ── Constants ────────────────────────────────────────────────────────────────
54
- MODEL_DIR = Path(__file__).parent
55
- WEIGHTS = MODEL_DIR / "weights.onnx"
56
- IMG_SIZE = 640
57
- CONF_THRESH = 0.55 # sweep: max composite score (0.60Γ—mAP + 0.40Γ—FP_score) at conf=0.55
58
- IOU_THRESH = 0.45
59
-
60
- # COCO class index β†’ submission class index (official order: 0=bus, 1=car, 2=truck, 3=motorcycle)
61
- COCO_TO_OUT: dict[int, int] = {5: 0, 2: 1, 7: 2, 3: 3}
62
- COCO_VEHICLE_IDX = list(COCO_TO_OUT.keys())
63
  OUT_NAMES = ["bus", "car", "truck", "motorcycle"]
64
 
65
- # ── Model loader (singleton) ─────────────────────────────────────────────────
66
- _SESSION: ort.InferenceSession | None = None
67
-
68
-
69
- def get_session() -> ort.InferenceSession:
70
- global _SESSION
71
- if _SESSION is None:
72
- opts = ort.SessionOptions()
73
- opts.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
74
- opts.enable_mem_pattern = True
75
- opts.enable_mem_reuse = True
76
- cuda_opts = {
77
- "device_id": 0,
78
- "arena_extend_strategy": "kNextPowerOfTwo",
79
- "gpu_mem_limit": 2 * 1024 ** 3,
80
- "cudnn_conv_algo_search": "EXHAUSTIVE",
81
- "do_copy_in_default_stream": True,
82
- }
83
- _SESSION = ort.InferenceSession(
84
- str(WEIGHTS),
85
- sess_options=opts,
86
- providers=[
87
- ("CUDAExecutionProvider", cuda_opts),
88
- "CPUExecutionProvider",
89
- ],
90
- )
91
- provider = _SESSION.get_providers()[0]
92
- print(f"[miner] Model loaded. Provider: {provider}", flush=True)
93
- return _SESSION
94
-
95
-
96
- # ── Preprocessing ────────────────────────────────────────────────────────────
97
-
98
- def letterbox(img: np.ndarray, size: int = IMG_SIZE) -> tuple[np.ndarray, float, int, int]:
99
- h, w = img.shape[:2]
100
- r = min(size / h, size / w)
101
- new_w, new_h = int(round(w * r)), int(round(h * r))
102
- img_r = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
103
- dw, dh = size - new_w, size - new_h
104
- pad_l, pad_t = dw // 2, dh // 2
105
- img_p = cv2.copyMakeBorder(
106
- img_r, pad_t, dh - pad_t, pad_l, dw - pad_l,
107
- cv2.BORDER_CONSTANT, value=(114, 114, 114),
108
- )
109
- return img_p, r, pad_l, pad_t
110
-
111
-
112
- def preprocess(img_bgr: np.ndarray) -> tuple[np.ndarray, float, int, int]:
113
- img_p, ratio, pad_l, pad_t = letterbox(img_bgr)
114
- img_rgb = cv2.cvtColor(img_p, cv2.COLOR_BGR2RGB)
115
- inp = img_rgb.transpose(2, 0, 1).astype(np.float32) * (1.0 / 255.0)
116
- return np.ascontiguousarray(inp[np.newaxis]), ratio, pad_l, pad_t
117
-
118
-
119
- # ── NMS ──────────────────────────────────────────────────────────────────────
120
-
121
- def nms(boxes: np.ndarray, scores: np.ndarray, iou_thresh: float = IOU_THRESH) -> list[int]:
122
- if not len(boxes):
123
- return []
124
- x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
125
- areas = (x2 - x1) * (y2 - y1)
126
- order = scores.argsort()[::-1]
127
- keep: list[int] = []
128
- while len(order):
129
- i = order[0]
130
- keep.append(int(i))
131
- xx1 = np.maximum(x1[i], x1[order[1:]])
132
- yy1 = np.maximum(y1[i], y1[order[1:]])
133
- xx2 = np.minimum(x2[i], x2[order[1:]])
134
- yy2 = np.minimum(y2[i], y2[order[1:]])
135
- inter = np.maximum(0, xx2 - xx1) * np.maximum(0, yy2 - yy1)
136
- iou = inter / (areas[i] + areas[order[1:]] - inter + 1e-7)
137
- order = order[1:][iou <= iou_thresh]
138
- return keep
139
-
140
-
141
- # ── Postprocessing ───────────────────────────────────────────────────────────
142
-
143
- def postprocess(
144
- raw: np.ndarray,
145
- ratio: float,
146
- pad_l: int,
147
- pad_t: int,
148
- orig_w: int,
149
- orig_h: int,
150
- ) -> list[dict[str, Any]]:
151
- pred = raw # [84, 8400]
152
- veh_row_idx = np.array([4 + c for c in COCO_VEHICLE_IDX])
153
- max_veh_score = pred[veh_row_idx].max(axis=0)
154
- mask = max_veh_score > CONF_THRESH
155
- if not mask.any():
156
- return []
157
-
158
- pred_f = pred[:, mask]
159
- cx, cy, bw, bh = pred_f[0], pred_f[1], pred_f[2], pred_f[3]
160
-
161
- x1 = np.clip((cx - bw / 2 - pad_l) / ratio, 0, orig_w)
162
- y1 = np.clip((cy - bh / 2 - pad_t) / ratio, 0, orig_h)
163
- x2 = np.clip((cx + bw / 2 - pad_l) / ratio, 0, orig_w)
164
- y2 = np.clip((cy + bh / 2 - pad_t) / ratio, 0, orig_h)
165
- boxes = np.stack([x1, y1, x2, y2], axis=1)
166
 
167
- results: list[dict[str, Any]] = []
168
- for coco_cls in COCO_VEHICLE_IDX:
169
- scores = pred_f[4 + coco_cls]
170
- cls_mask = scores > CONF_THRESH
171
- if not cls_mask.any():
172
- continue
173
- keep = nms(boxes[cls_mask], scores[cls_mask])
174
- out_cls = COCO_TO_OUT[coco_cls]
175
- for k in keep:
176
- box = boxes[cls_mask][k]
177
- results.append({
178
- "bbox": [
179
- float(box[0]), float(box[1]),
180
- float(box[2]), float(box[3]),
181
- ],
182
- "score": float(scores[cls_mask][k]),
183
- "class_id": out_cls,
184
- "class_name": OUT_NAMES[out_cls],
185
- })
186
- return results
187
 
 
 
 
 
 
 
 
188
 
189
- # ── Image decoding helpers ───────────────────────────────────────────────────
190
 
191
- def decode_image(data: bytes | str) -> np.ndarray:
192
- if isinstance(data, str):
193
- data = base64.b64decode(data)
194
- elif isinstance(data, (bytes, bytearray)):
195
- try:
196
- data = base64.b64decode(data)
197
- except Exception:
198
- pass
199
- arr = np.frombuffer(data, dtype=np.uint8)
200
- img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
201
- if img is None:
202
- pil = Image.open(io.BytesIO(data)).convert("RGB")
203
- img = cv2.cvtColor(np.array(pil), cv2.COLOR_RGB2BGR)
204
- return img
205
 
206
 
207
- # ── Core predict function ────────────────────────────────────────────────────
 
 
 
208
 
209
- def predict(image_data: bytes | str | np.ndarray) -> dict[str, Any]:
210
- sess = get_session()
211
-
212
- if isinstance(image_data, np.ndarray):
213
- img_bgr = image_data
214
- else:
215
- img_bgr = decode_image(image_data)
216
-
217
- orig_h, orig_w = img_bgr.shape[:2]
218
- inp, ratio, pad_l, pad_t = preprocess(img_bgr)
219
-
220
- t0 = time.perf_counter()
221
- outputs = sess.run(None, {"images": inp})
222
- infer_ms = (time.perf_counter() - t0) * 1000.0
223
-
224
- raw = outputs[0][0] # [84, 8400]
225
- detections = postprocess(raw, ratio, pad_l, pad_t, orig_w, orig_h)
226
-
227
- return {
228
- "detections": detections,
229
- "inference_ms": round(infer_ms, 3),
230
- "provider": sess.get_providers()[0],
231
- }
232
-
233
-
234
- # ── Chutes cord wrapper ─────────────────────────────���────────────────────────
235
-
236
- try:
237
- from chutes.chute import Chute
238
- from chutes.chute.node_selector import NodeSelector
239
- from chutes.image import Image as ChuteImage
240
-
241
- chute_image = (
242
- ChuteImage(
243
- username="lculpitt",
244
- name="vehicle-detect-sn44",
245
- tag="v4-cuda",
246
- readme=(Path(__file__).parent / "README.md").read_text(),
247
  )
248
- .from_base("parachutes/python:3.12")
249
- .run_command("pip install --upgrade setuptools wheel")
250
- .run_command(
251
- "pip install 'numpy>=1.23' 'onnxruntime-gpu>=1.16' "
252
- "'opencv-python-headless>=4.7' 'pillow>=9.5' "
253
- "'huggingface_hub>=0.19.4' 'pydantic>=2.0' "
254
- "'pyyaml>=6.0' 'aiohttp>=3.9'"
 
 
 
 
 
 
 
 
 
 
255
  )
256
- # Bake cuDNN/cuBLAS paths into the image as Docker ENV so onnxruntime
257
- # CUDAExecutionProvider finds libcudnn.so.9 on every node at container start.
258
- .with_env(
259
- "LD_LIBRARY_PATH",
260
- "/usr/local/lib/python3.12/dist-packages/nvidia/cudnn/lib"
261
- ":/usr/local/lib/python3.12/dist-packages/nvidia/cublas/lib",
262
- )
263
- )
264
-
265
- chute = Chute(
266
- username="lculpitt",
267
- name="vehicle-detect-sn44",
268
- tagline="YOLO11n vehicle detector β€” car, bus, truck, motorcycle",
269
- readme=(Path(__file__).parent / "README.md").read_text(),
270
- image=chute_image,
271
- concurrency=4,
272
- max_instances=5,
273
- shutdown_after_seconds=300,
274
- scaling_threshold=0.5,
275
- node_selector=NodeSelector(
276
- gpu_count=1,
277
- min_vram_gb_per_gpu=16,
278
- # All CUDA 12.x, all $0.40–$0.85/hr (within 2.5Γ— spread from cheapest)
279
- include=["4090", "a40", "a6000", "l40", "l40s"],
280
- ),
281
- )
282
-
283
- @chute.cord(path="/predict", method="POST")
284
- async def predict_cord(image_b64: str) -> dict:
285
- """
286
- POST /predict
287
- Body: {"image_b64": "<base64-encoded image>"}
288
- Returns detection JSON.
289
- """
290
- return predict(image_b64)
291
-
292
- except ImportError:
293
- pass
294
-
295
-
296
- # ── Local test ───────────────────────────────────────────────────────────────
297
-
298
- if __name__ == "__main__":
299
- import sys
300
-
301
- print("=" * 55)
302
- print(" miner.py β€” local smoke test")
303
- print("=" * 55)
304
-
305
- dummy_bgr = np.full((720, 1280, 3), 128, dtype=np.uint8)
306
- cv2.rectangle(dummy_bgr, (100, 100), (400, 300), (0, 255, 0), 3)
307
-
308
- if len(sys.argv) > 1:
309
- loaded = cv2.imread(sys.argv[1])
310
- if loaded is not None:
311
- dummy_bgr = loaded
312
- print(f" Using image: {sys.argv[1]} ({loaded.shape[1]}x{loaded.shape[0]})")
313
- else:
314
- print(f" Could not load {sys.argv[1]}, using dummy.")
315
- else:
316
- print(" Using synthetic 1280x720 dummy image.")
317
-
318
- result = predict(dummy_bgr)
319
- print(f"\n Provider : {result['provider']}")
320
- print(f" Inference : {result['inference_ms']:.2f} ms")
321
- print(f" Detections : {len(result['detections'])}")
322
- for d in result["detections"]:
323
- x1, y1, x2, y2 = [round(v, 1) for v in d["bbox"]]
324
- print(f" [{d['class_id']}] {d['class_name']:12s} score={d['score']:.3f} "
325
- f"bbox=[{x1},{y1},{x2},{y2}]")
326
-
327
- print("\n Latency benchmark (50 runs)...")
328
- times = []
329
- for _ in range(50):
330
- t0 = time.perf_counter()
331
- predict(dummy_bgr)
332
- times.append((time.perf_counter() - t0) * 1000)
333
- times.sort()
334
- p50, p95 = times[25], times[47]
335
- fps = 1000.0 / p50
336
- print(f" P50={p50:.2f}ms P95={p95:.2f}ms FPS={fps:.1f}")
337
- print(f" Target >=30 FPS : {'PASS' if fps >= 30 else 'FAIL'}")
338
- print(f" Target P95<50ms : {'PASS' if p95 < 50 else 'FAIL'}")
339
- print("=" * 55)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  """
2
+ Score Vision SN44 β€” VehicleDetect miner.
3
 
4
+ Model: YOLO11n ONNX, 4 classes trained as:
5
+ 0 = car, 1 = bus, 2 = truck, 3 = motorcycle
 
 
 
6
 
7
+ Official submission order (remapped in MODEL_TO_OUT):
8
+ 0 = bus, 1 = car, 2 = truck, 3 = motorcycle
 
 
 
9
  """
10
 
 
 
 
 
 
 
11
  from pathlib import Path
12
+ import math
13
 
 
14
  import cv2
15
  import numpy as np
16
+ import onnxruntime as ort
17
+ from numpy import ndarray
18
+ from pydantic import BaseModel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
 
20
 
21
+ # ── Model class index β†’ submission class index ───────────────────────────────
22
+ # Trained order: car=0, bus=1, truck=2, motorcycle=3
23
+ # Official order: bus=0, car=1, truck=2, motorcycle=3
24
+ MODEL_TO_OUT: dict[int, int] = {0: 1, 1: 0, 2: 2, 3: 3}
 
 
 
 
 
 
 
 
25
  OUT_NAMES = ["bus", "car", "truck", "motorcycle"]
26
 
27
+ IMG_SIZE = 640
28
+ CONF_THRESH = 0.55 # sweep-optimised: max composite (0.60Γ—mAP + 0.40Γ—FP_score)
29
+ IOU_THRESH = 0.45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ class BoundingBox(BaseModel):
33
+ x1: int
34
+ y1: int
35
+ x2: int
36
+ y2: int
37
+ cls_id: int
38
+ conf: float
39
 
 
40
 
41
+ class TVFrameResult(BaseModel):
42
+ frame_id: int
43
+ boxes: list[BoundingBox]
44
+ keypoints: list[tuple[int, int]]
 
 
 
 
 
 
 
 
 
 
45
 
46
 
47
+ class Miner:
48
+ """
49
+ VehicleDetect miner for SN44. Loaded by turbovision template at startup.
50
+ """
51
 
52
+ def __init__(self, path_hf_repo: Path) -> None:
53
+ self.path_hf_repo = path_hf_repo
54
+ self.session = ort.InferenceSession(
55
+ str(path_hf_repo / "weights.onnx"),
56
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  )
58
+ self.input_name = self.session.get_inputs()[0].name
59
+ self.conf_threshold = CONF_THRESH
60
+ self.iou_threshold = IOU_THRESH
61
+
62
+ def __repr__(self) -> str:
63
+ return f"VehicleDetect Miner session={type(self.session).__name__}"
64
+
65
+ def _letterbox(self, img: ndarray) -> tuple[np.ndarray, float, int, int]:
66
+ h, w = img.shape[:2]
67
+ r = min(IMG_SIZE / h, IMG_SIZE / w)
68
+ new_w, new_h = int(round(w * r)), int(round(h * r))
69
+ img_r = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
70
+ dw, dh = IMG_SIZE - new_w, IMG_SIZE - new_h
71
+ pad_l, pad_t = dw // 2, dh // 2
72
+ img_p = cv2.copyMakeBorder(
73
+ img_r, pad_t, dh - pad_t, pad_l, dw - pad_l,
74
+ cv2.BORDER_CONSTANT, value=(114, 114, 114),
75
  )
76
+ return img_p, r, pad_l, pad_t
77
+
78
+ def _preprocess(self, image_bgr: ndarray) -> tuple[np.ndarray, float, int, int]:
79
+ img_p, ratio, pad_l, pad_t = self._letterbox(image_bgr)
80
+ img_rgb = cv2.cvtColor(img_p, cv2.COLOR_BGR2RGB)
81
+ inp = img_rgb.astype(np.float32) / 255.0
82
+ inp = np.ascontiguousarray(inp.transpose(2, 0, 1)[np.newaxis])
83
+ return inp, ratio, pad_l, pad_t
84
+
85
+ def _nms(self, boxes: np.ndarray, scores: np.ndarray) -> list[int]:
86
+ if not len(boxes):
87
+ return []
88
+ x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
89
+ areas = (x2 - x1) * (y2 - y1)
90
+ order = scores.argsort()[::-1]
91
+ keep: list[int] = []
92
+ while len(order):
93
+ i = order[0]
94
+ keep.append(int(i))
95
+ xx1 = np.maximum(x1[i], x1[order[1:]])
96
+ yy1 = np.maximum(y1[i], y1[order[1:]])
97
+ xx2 = np.minimum(x2[i], x2[order[1:]])
98
+ yy2 = np.minimum(y2[i], y2[order[1:]])
99
+ inter = np.maximum(0, xx2 - xx1) * np.maximum(0, yy2 - yy1)
100
+ iou = inter / (areas[i] + areas[order[1:]] - inter + 1e-7)
101
+ order = order[1:][iou <= self.iou_threshold]
102
+ return keep
103
+
104
+ def _infer_single(self, image_bgr: ndarray) -> list[BoundingBox]:
105
+ orig_h, orig_w = image_bgr.shape[:2]
106
+ inp, ratio, pad_l, pad_t = self._preprocess(image_bgr)
107
+ raw = self.session.run(None, {self.input_name: inp})[0]
108
+
109
+ # Output: [1, 8, 8400] β†’ pred: [8, 8400] β†’ [8400, 8]
110
+ pred = raw[0]
111
+ if pred.shape[0] < pred.shape[1]:
112
+ pred = pred.T # [8400, 8]
113
+
114
+ bboxes_cx = pred[:, :4] # cx, cy, w, h in letterboxed coords
115
+ cls_scores = pred[:, 4:] # [8400, 4]
116
+
117
+ cls_ids = np.argmax(cls_scores, axis=1)
118
+ confs = np.max(cls_scores, axis=1)
119
+ mask = confs >= self.conf_threshold
120
+
121
+ if not mask.any():
122
+ return []
123
+
124
+ bboxes_cx = bboxes_cx[mask]
125
+ confs = confs[mask]
126
+ cls_ids = cls_ids[mask]
127
+
128
+ # cx,cy,w,h β†’ x1,y1,x2,y2 (in letterboxed image coords)
129
+ cx, cy, bw, bh = bboxes_cx[:, 0], bboxes_cx[:, 1], bboxes_cx[:, 2], bboxes_cx[:, 3]
130
+ lx1 = cx - bw / 2
131
+ ly1 = cy - bh / 2
132
+ lx2 = cx + bw / 2
133
+ ly2 = cy + bh / 2
134
+
135
+ # Unletterbox back to original image coords
136
+ x1 = np.clip((lx1 - pad_l) / ratio, 0, orig_w)
137
+ y1 = np.clip((ly1 - pad_t) / ratio, 0, orig_h)
138
+ x2 = np.clip((lx2 - pad_l) / ratio, 0, orig_w)
139
+ y2 = np.clip((ly2 - pad_t) / ratio, 0, orig_h)
140
+ boxes = np.stack([x1, y1, x2, y2], axis=1)
141
+
142
+ out_boxes: list[BoundingBox] = []
143
+ for model_cls in range(4):
144
+ cls_mask = cls_ids == model_cls
145
+ if not cls_mask.any():
146
+ continue
147
+ keep = self._nms(boxes[cls_mask], confs[cls_mask])
148
+ sub_cls = MODEL_TO_OUT[model_cls]
149
+ for k in keep:
150
+ box = boxes[cls_mask][k]
151
+ conf = float(confs[cls_mask][k])
152
+ out_boxes.append(BoundingBox(
153
+ x1=max(0, min(orig_w, math.floor(box[0]))),
154
+ y1=max(0, min(orig_h, math.floor(box[1]))),
155
+ x2=max(0, min(orig_w, math.ceil(box[2]))),
156
+ y2=max(0, min(orig_h, math.ceil(box[3]))),
157
+ cls_id=sub_cls,
158
+ conf=max(0.0, min(1.0, conf)),
159
+ ))
160
+ return out_boxes
161
+
162
+ def predict_batch(
163
+ self,
164
+ batch_images: list[ndarray],
165
+ offset: int,
166
+ n_keypoints: int,
167
+ ) -> list[TVFrameResult]:
168
+ results: list[TVFrameResult] = []
169
+ for idx, image in enumerate(batch_images):
170
+ boxes = self._infer_single(image)
171
+ keypoints = [(0, 0) for _ in range(max(0, int(n_keypoints)))]
172
+ results.append(TVFrameResult(
173
+ frame_id=offset + idx,
174
+ boxes=boxes,
175
+ keypoints=keypoints,
176
+ ))
177
+ return results