fitleech commited on
Commit
ab07abd
·
verified ·
1 Parent(s): 6a7cc37

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. chute_config.yml +21 -0
  2. miner.py +433 -0
  3. readme.md +4 -0
  4. weights.onnx +3 -0
chute_config.yml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image:
2
+ from_base: parachutes/python:3.12
3
+ run_command:
4
+ - pip install --upgrade setuptools wheel
5
+ - pip install 'numpy>=1.23' 'onnxruntime-gpu[cuda,cudnn]>=1.16' 'opencv-python>=4.7' 'pillow>=9.5' 'huggingface_hub>=0.19.4' 'pydantic>=2.0' 'pyyaml>=6.0' 'aiohttp>=3.9'
6
+ - pip install torch torchvision
7
+
8
+ NodeSelector:
9
+ gpu_count: 1
10
+ min_vram_gb_per_gpu: 16
11
+ max_hourly_price_per_gpu: 2
12
+ include:
13
+ - pro_6000
14
+
15
+ Chute:
16
+ timeout_seconds: 900
17
+ concurrency: 4
18
+ max_instances: 5
19
+ scaling_threshold: 0.5
20
+ shutdown_after_seconds: 288000
21
+ tee: true
miner.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import math
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import onnxruntime as ort
7
+ from numpy import ndarray
8
+ from pydantic import BaseModel
9
+
10
+
11
+ class BoundingBox(BaseModel):
12
+ x1: int
13
+ y1: int
14
+ x2: int
15
+ y2: int
16
+ cls_id: int
17
+ conf: float
18
+
19
+
20
+ class TVFrameResult(BaseModel):
21
+ frame_id: int
22
+ boxes: list[BoundingBox]
23
+ keypoints: list[tuple[int, int]]
24
+
25
+
26
+ class Miner:
27
+ """ONNX Runtime miner. Hard per-class NMS + cross-class dedup + flip TTA."""
28
+
29
+ class_names = ["cup", "bottle", "can"]
30
+ input_size = 1280
31
+ iou_thres = 0.3
32
+ cross_iou_thresh = 0.6
33
+ max_det = 150
34
+ _conf_thres_array = np.array([0.45, 0.35, 0.45], dtype=np.float32)
35
+
36
+ def __init__(self, path_hf_repo: Path) -> None:
37
+ model_path = path_hf_repo / "weights.onnx"
38
+ print("ORT version:", ort.__version__)
39
+
40
+ try:
41
+ ort.preload_dlls()
42
+ print("preload_dlls success")
43
+ except Exception as e:
44
+ print(f"preload_dlls failed: {e}")
45
+
46
+ print("ORT available providers BEFORE session:", ort.get_available_providers())
47
+
48
+ sess_options = ort.SessionOptions()
49
+ sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
50
+
51
+ try:
52
+ self.session = ort.InferenceSession(
53
+ str(model_path),
54
+ sess_options=sess_options,
55
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
56
+ )
57
+ print("Created ORT session with preferred CUDA provider list")
58
+ except Exception as e:
59
+ print(f"CUDA session creation failed, falling back to CPU: {e}")
60
+ self.session = ort.InferenceSession(
61
+ str(model_path),
62
+ sess_options=sess_options,
63
+ providers=["CPUExecutionProvider"],
64
+ )
65
+
66
+ print("ORT session providers:", self.session.get_providers())
67
+
68
+ for inp in self.session.get_inputs():
69
+ print("INPUT:", inp.name, inp.shape, inp.type)
70
+ for out in self.session.get_outputs():
71
+ print("OUTPUT:", out.name, out.shape, out.type)
72
+
73
+ self.input_name = self.session.get_inputs()[0].name
74
+ self.output_names = [output.name for output in self.session.get_outputs()]
75
+ self.input_shape = self.session.get_inputs()[0].shape
76
+
77
+ self.input_height = self._safe_dim(self.input_shape[2], default=self.input_size)
78
+ self.input_width = self._safe_dim(self.input_shape[3], default=self.input_size)
79
+
80
+ print(f"ONNX model loaded from: {model_path}")
81
+ print(f"ONNX input: name={self.input_name}, shape={self.input_shape}")
82
+ print("per-class conf: " + ", ".join(
83
+ f"{n}={t:.3f}" for n, t in zip(self.class_names,
84
+ self._conf_thres_array.tolist())))
85
+
86
+ def __repr__(self) -> str:
87
+ return (
88
+ f"ONNXRuntime(session={type(self.session).__name__}, "
89
+ f"providers={self.session.get_providers()})"
90
+ )
91
+
92
+ @staticmethod
93
+ def _safe_dim(value, default: int) -> int:
94
+ return value if isinstance(value, int) and value > 0 else default
95
+
96
+ def _letterbox(self, image: ndarray, new_shape: tuple[int, int],
97
+ color=(114, 114, 114)
98
+ ) -> tuple[ndarray, float, tuple[float, float]]:
99
+ h, w = image.shape[:2]
100
+ new_w, new_h = new_shape
101
+ ratio = min(new_w / w, new_h / h)
102
+ resized_w = int(round(w * ratio))
103
+ resized_h = int(round(h * ratio))
104
+ if (resized_w, resized_h) != (w, h):
105
+ interp = cv2.INTER_CUBIC if ratio > 1.0 else cv2.INTER_LINEAR
106
+ image = cv2.resize(image, (resized_w, resized_h), interpolation=interp)
107
+ dw = (new_w - resized_w) / 2.0
108
+ dh = (new_h - resized_h) / 2.0
109
+ left = int(round(dw - 0.1))
110
+ right = int(round(dw + 0.1))
111
+ top = int(round(dh - 0.1))
112
+ bottom = int(round(dh + 0.1))
113
+ padded = cv2.copyMakeBorder(image, top, bottom, left, right,
114
+ borderType=cv2.BORDER_CONSTANT, value=color)
115
+ return padded, ratio, (dw, dh)
116
+
117
+ def _preprocess(self, image: ndarray
118
+ ) -> tuple[np.ndarray, float, tuple[float, float],
119
+ tuple[int, int]]:
120
+ orig_h, orig_w = image.shape[:2]
121
+ img, ratio, pad = self._letterbox(image, (self.input_width, self.input_height))
122
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
123
+ img = img.astype(np.float32) / 255.0
124
+ img = np.transpose(img, (2, 0, 1))[None, ...]
125
+ img = np.ascontiguousarray(img, dtype=np.float32)
126
+ return img, ratio, pad, (orig_w, orig_h)
127
+
128
+ @staticmethod
129
+ def _clip_boxes(boxes: np.ndarray, image_size: tuple[int, int]) -> np.ndarray:
130
+ w, h = image_size
131
+ boxes[:, 0] = np.clip(boxes[:, 0], 0, w - 1)
132
+ boxes[:, 1] = np.clip(boxes[:, 1], 0, h - 1)
133
+ boxes[:, 2] = np.clip(boxes[:, 2], 0, w - 1)
134
+ boxes[:, 3] = np.clip(boxes[:, 3], 0, h - 1)
135
+ return boxes
136
+
137
+ @staticmethod
138
+ def _xywh_to_xyxy(boxes: np.ndarray) -> np.ndarray:
139
+ out = np.empty_like(boxes)
140
+ out[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
141
+ out[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
142
+ out[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
143
+ out[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
144
+ return out
145
+
146
+ @staticmethod
147
+ def _hard_nms(boxes: np.ndarray, scores: np.ndarray,
148
+ iou_thresh: float) -> np.ndarray:
149
+ n = len(boxes)
150
+ if n == 0:
151
+ return np.array([], dtype=np.intp)
152
+ order = np.argsort(-scores)
153
+ keep: list[int] = []
154
+ while len(order) > 0:
155
+ i = int(order[0])
156
+ keep.append(i)
157
+ if len(order) == 1:
158
+ break
159
+ rest = order[1:]
160
+ xx1 = np.maximum(boxes[i, 0], boxes[rest, 0])
161
+ yy1 = np.maximum(boxes[i, 1], boxes[rest, 1])
162
+ xx2 = np.minimum(boxes[i, 2], boxes[rest, 2])
163
+ yy2 = np.minimum(boxes[i, 3], boxes[rest, 3])
164
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
165
+ a_i = (max(0.0, boxes[i, 2] - boxes[i, 0]) *
166
+ max(0.0, boxes[i, 3] - boxes[i, 1]))
167
+ a_r = (np.maximum(0.0, boxes[rest, 2] - boxes[rest, 0]) *
168
+ np.maximum(0.0, boxes[rest, 3] - boxes[rest, 1]))
169
+ iou = inter / (a_i + a_r - inter + 1e-7)
170
+ order = rest[iou <= iou_thresh]
171
+ return np.array(keep, dtype=np.intp)
172
+
173
+ def _per_class_hard_nms(self, boxes: np.ndarray, scores: np.ndarray,
174
+ cls_ids: np.ndarray, iou_thresh: float
175
+ ) -> np.ndarray:
176
+ if len(boxes) == 0:
177
+ return np.array([], dtype=np.intp)
178
+ all_keep: list[int] = []
179
+ for c in np.unique(cls_ids):
180
+ mask = cls_ids == c
181
+ indices = np.where(mask)[0]
182
+ keep = self._hard_nms(boxes[mask], scores[mask], iou_thresh)
183
+ all_keep.extend(indices[keep].tolist())
184
+ all_keep.sort()
185
+ return np.array(all_keep, dtype=np.intp)
186
+
187
+ @staticmethod
188
+ def _cross_class_dedup_op(boxes: np.ndarray, scores: np.ndarray,
189
+ cls_ids: np.ndarray, iou_thresh: float
190
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
191
+ n = len(boxes)
192
+ if n <= 1:
193
+ return boxes, scores, cls_ids
194
+ boxes = np.asarray(boxes, dtype=np.float32)
195
+ scores = np.asarray(scores, dtype=np.float32)
196
+ cls_ids = np.asarray(cls_ids, dtype=np.int32)
197
+ areas = (np.maximum(0.0, boxes[:, 2] - boxes[:, 0]) *
198
+ np.maximum(0.0, boxes[:, 3] - boxes[:, 1]))
199
+ order = np.lexsort((-scores, -areas))
200
+ suppressed = np.zeros(n, dtype=bool)
201
+ keep: list[int] = []
202
+ for i in order:
203
+ if suppressed[i]:
204
+ continue
205
+ keep.append(int(i))
206
+ bi = boxes[i]
207
+ xx1 = np.maximum(bi[0], boxes[:, 0])
208
+ yy1 = np.maximum(bi[1], boxes[:, 1])
209
+ xx2 = np.minimum(bi[2], boxes[:, 2])
210
+ yy2 = np.minimum(bi[3], boxes[:, 3])
211
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
212
+ a_i = max(1e-7, float((bi[2] - bi[0]) * (bi[3] - bi[1])))
213
+ iou = inter / (a_i + areas - inter + 1e-7)
214
+ dup = iou > iou_thresh
215
+ dup[i] = False
216
+ suppressed |= dup
217
+ keep_idx = np.array(keep, dtype=np.intp)
218
+ return boxes[keep_idx], scores[keep_idx], cls_ids[keep_idx]
219
+
220
+ @staticmethod
221
+ def _max_score_per_cluster(post_boxes: np.ndarray,
222
+ full_boxes: np.ndarray,
223
+ full_scores: np.ndarray,
224
+ iou_thresh: float) -> np.ndarray:
225
+ n = len(post_boxes)
226
+ if n == 0:
227
+ return np.empty(0, dtype=np.float32)
228
+ full_areas = (np.maximum(0.0, full_boxes[:, 2] - full_boxes[:, 0]) *
229
+ np.maximum(0.0, full_boxes[:, 3] - full_boxes[:, 1]))
230
+ out = np.empty(n, dtype=np.float32)
231
+ for i in range(n):
232
+ bi = post_boxes[i]
233
+ xx1 = np.maximum(bi[0], full_boxes[:, 0])
234
+ yy1 = np.maximum(bi[1], full_boxes[:, 1])
235
+ xx2 = np.minimum(bi[2], full_boxes[:, 2])
236
+ yy2 = np.minimum(bi[3], full_boxes[:, 3])
237
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
238
+ a_i = max(0.0, float((bi[2] - bi[0]) * (bi[3] - bi[1])))
239
+ iou = inter / (a_i + full_areas - inter + 1e-7)
240
+ cluster = iou >= iou_thresh
241
+ out[i] = float(np.max(full_scores[cluster])) if np.any(cluster) else 0.0
242
+ return out
243
+
244
+ def _per_view_pipeline(self, boxes: np.ndarray, scores: np.ndarray,
245
+ cls_ids: np.ndarray
246
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
247
+ if len(boxes) > 1:
248
+ keep = self._per_class_hard_nms(boxes, scores, cls_ids, self.iou_thres)
249
+ boxes, scores, cls_ids = boxes[keep], scores[keep], cls_ids[keep]
250
+ if len(scores) > self.max_det:
251
+ top = np.argsort(-scores)[: self.max_det]
252
+ boxes, scores, cls_ids = boxes[top], scores[top], cls_ids[top]
253
+ if len(boxes) > 1:
254
+ boxes, scores, cls_ids = self._cross_class_dedup_op(
255
+ boxes, scores, cls_ids, self.cross_iou_thresh
256
+ )
257
+ return boxes, scores, cls_ids
258
+
259
+ def _decode_final_dets(self, preds: np.ndarray, ratio: float,
260
+ pad: tuple[float, float],
261
+ orig_size: tuple[int, int]) -> list[BoundingBox]:
262
+ if preds.ndim == 3 and preds.shape[0] == 1:
263
+ preds = preds[0]
264
+ if preds.ndim != 2 or preds.shape[1] < 6:
265
+ raise ValueError(f"Unexpected ONNX final-det output shape: {preds.shape}")
266
+
267
+ boxes = preds[:, :4].astype(np.float32)
268
+ scores = preds[:, 4].astype(np.float32)
269
+ cls_ids = preds[:, 5].astype(np.int32)
270
+
271
+ keep = scores >= self._conf_thres_array[cls_ids]
272
+ boxes = boxes[keep]
273
+ scores = scores[keep]
274
+ cls_ids = cls_ids[keep]
275
+ if len(boxes) == 0:
276
+ return []
277
+
278
+ pad_w, pad_h = pad
279
+ boxes[:, [0, 2]] -= pad_w
280
+ boxes[:, [1, 3]] -= pad_h
281
+ boxes /= ratio
282
+ boxes = self._clip_boxes(boxes, orig_size)
283
+
284
+ boxes, scores, cls_ids = self._per_view_pipeline(boxes, scores, cls_ids)
285
+ return self._build_results(boxes, scores, cls_ids)
286
+
287
+ def _decode_raw_yolo(self, preds: np.ndarray, ratio: float,
288
+ pad: tuple[float, float],
289
+ orig_size: tuple[int, int]) -> list[BoundingBox]:
290
+ if preds.ndim != 3 or preds.shape[0] != 1:
291
+ raise ValueError(f"Unexpected raw ONNX output shape: {preds.shape}")
292
+ preds = preds[0]
293
+ if preds.shape[0] <= 16 and preds.shape[1] > preds.shape[0]:
294
+ preds = preds.T
295
+ if preds.ndim != 2 or preds.shape[1] < 5:
296
+ raise ValueError(f"Unexpected raw output shape: {preds.shape}")
297
+
298
+ boxes_xywh = preds[:, :4].astype(np.float32)
299
+ cls_part = preds[:, 4:].astype(np.float32)
300
+ if cls_part.shape[1] == 1:
301
+ scores = cls_part[:, 0]
302
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
303
+ else:
304
+ cls_ids = np.argmax(cls_part, axis=1).astype(np.int32)
305
+ scores = cls_part[np.arange(len(cls_part)), cls_ids]
306
+
307
+ keep = scores >= self._conf_thres_array[cls_ids]
308
+ boxes_xywh = boxes_xywh[keep]
309
+ scores = scores[keep]
310
+ cls_ids = cls_ids[keep]
311
+ if len(boxes_xywh) == 0:
312
+ return []
313
+ boxes = self._xywh_to_xyxy(boxes_xywh)
314
+
315
+ pad_w, pad_h = pad
316
+ boxes[:, [0, 2]] -= pad_w
317
+ boxes[:, [1, 3]] -= pad_h
318
+ boxes /= ratio
319
+ boxes = self._clip_boxes(boxes, orig_size)
320
+
321
+ boxes, scores, cls_ids = self._per_view_pipeline(boxes, scores, cls_ids)
322
+ return self._build_results(boxes, scores, cls_ids)
323
+
324
+ @staticmethod
325
+ def _build_results(boxes: np.ndarray, scores: np.ndarray,
326
+ cls_ids: np.ndarray) -> list[BoundingBox]:
327
+ results: list[BoundingBox] = []
328
+ for box, conf, cls_id in zip(boxes, scores, cls_ids):
329
+ x1, y1, x2, y2 = box.tolist()
330
+ if x2 <= x1 or y2 <= y1:
331
+ continue
332
+ results.append(
333
+ BoundingBox(
334
+ x1=int(math.floor(x1)),
335
+ y1=int(math.floor(y1)),
336
+ x2=int(math.ceil(x2)),
337
+ y2=int(math.ceil(y2)),
338
+ cls_id=int(cls_id),
339
+ conf=float(conf),
340
+ )
341
+ )
342
+ return results
343
+
344
+ def _postprocess(self, output: np.ndarray, ratio: float,
345
+ pad: tuple[float, float],
346
+ orig_size: tuple[int, int]) -> list[BoundingBox]:
347
+ if output.ndim == 2 and output.shape[1] >= 6:
348
+ return self._decode_final_dets(output, ratio, pad, orig_size)
349
+ if output.ndim == 3 and output.shape[0] == 1 and output.shape[2] == 6:
350
+ return self._decode_final_dets(output, ratio, pad, orig_size)
351
+ return self._decode_raw_yolo(output, ratio, pad, orig_size)
352
+
353
+ def _predict_single(self, image: np.ndarray) -> list[BoundingBox]:
354
+ if image is None:
355
+ raise ValueError("Input image is None")
356
+ if not isinstance(image, np.ndarray):
357
+ raise TypeError(f"Input is not numpy array: {type(image)}")
358
+ if image.ndim != 3:
359
+ raise ValueError(f"Expected HWC image, got shape={image.shape}")
360
+ if image.shape[2] != 3:
361
+ raise ValueError(f"Expected 3 channels, got shape={image.shape}")
362
+ if image.dtype != np.uint8:
363
+ image = image.astype(np.uint8)
364
+
365
+ input_tensor, ratio, pad, orig_size = self._preprocess(image)
366
+ expected = (1, 3, self.input_height, self.input_width)
367
+ if input_tensor.shape != expected:
368
+ raise ValueError(
369
+ f"Bad input tensor shape={input_tensor.shape}, expected={expected}"
370
+ )
371
+
372
+ outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
373
+ return self._postprocess(outputs[0], ratio, pad, orig_size)
374
+
375
+ def _predict_tta(self, image: np.ndarray) -> list[BoundingBox]:
376
+ boxes_orig = self._predict_single(image)
377
+ flipped = cv2.flip(image, 1)
378
+ boxes_flip = self._predict_single(flipped)
379
+ w = image.shape[1]
380
+ boxes_flip = [
381
+ BoundingBox(
382
+ x1=w - b.x2, y1=b.y1, x2=w - b.x1, y2=b.y2,
383
+ cls_id=b.cls_id, conf=b.conf,
384
+ )
385
+ for b in boxes_flip
386
+ ]
387
+ all_boxes = boxes_orig + boxes_flip
388
+ if not all_boxes:
389
+ return []
390
+
391
+ coords = np.array(
392
+ [[b.x1, b.y1, b.x2, b.y2] for b in all_boxes], dtype=np.float32
393
+ )
394
+ scores = np.array([b.conf for b in all_boxes], dtype=np.float32)
395
+ cls_ids = np.array([b.cls_id for b in all_boxes], dtype=np.int32)
396
+
397
+ hard_keep = self._per_class_hard_nms(coords, scores, cls_ids, self.iou_thres)
398
+ if len(hard_keep) == 0:
399
+ return []
400
+ hard_keep = hard_keep[: self.max_det]
401
+ boosted = self._max_score_per_cluster(
402
+ coords[hard_keep], coords, scores, self.iou_thres
403
+ )
404
+
405
+ return [
406
+ BoundingBox(
407
+ x1=all_boxes[i].x1,
408
+ y1=all_boxes[i].y1,
409
+ x2=all_boxes[i].x2,
410
+ y2=all_boxes[i].y2,
411
+ cls_id=all_boxes[i].cls_id,
412
+ conf=float(boosted[j]),
413
+ )
414
+ for j, i in enumerate(hard_keep)
415
+ ]
416
+
417
+ def predict_batch(self, batch_images: list[ndarray], offset: int,
418
+ n_keypoints: int) -> list[TVFrameResult]:
419
+ results: list[TVFrameResult] = []
420
+ for frame_number_in_batch, image in enumerate(batch_images):
421
+ try:
422
+ boxes = self._predict_tta(image)
423
+ except Exception as e:
424
+ print(f"Inference failed for frame {offset + frame_number_in_batch}: {e}")
425
+ boxes = []
426
+ results.append(
427
+ TVFrameResult(
428
+ frame_id=offset + frame_number_in_batch,
429
+ boxes=boxes,
430
+ keypoints=[(0, 0) for _ in range(max(0, int(n_keypoints)))],
431
+ )
432
+ )
433
+ return results
readme.md ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # beverage-detect
2
+
3
+ YOLO26 ONNX detector for the Detect-beverage-detect element.
4
+ Classes: cup, bottle, can.
weights.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d25fd87ec8e9696f5e4acae3a9ab6c81e4e407ecb0e0207adaf4a6a0c64d5308
3
+ size 19407441