scorevision: push artifact
Browse files- README.md +20 -25
- class_names.txt +0 -79
- miner.py +107 -137
- model_type.json +1 -1
- weights.onnx +2 -2
README.md
CHANGED
|
@@ -1,13 +1,14 @@
|
|
| 1 |
---
|
| 2 |
tags:
|
| 3 |
- element_type:detect
|
| 4 |
-
- model:yolov11-
|
| 5 |
-
- object:
|
| 6 |
manako:
|
| 7 |
description: >
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
| 11 |
prompt_hints: null
|
| 12 |
input_payload:
|
| 13 |
- name: frame
|
|
@@ -16,32 +17,26 @@ manako:
|
|
| 16 |
output_payload:
|
| 17 |
- name: detections
|
| 18 |
type: detections
|
| 19 |
-
description: Bounding boxes for detected
|
| 20 |
-
evaluation_score: 0.
|
| 21 |
last_benchmark:
|
| 22 |
-
type:
|
| 23 |
-
ran_at: 2026-03-
|
| 24 |
result_path: null
|
| 25 |
---
|
| 26 |
|
| 27 |
-
# Detect-
|
| 28 |
|
| 29 |
-
|
| 30 |
|
| 31 |
| Metric | Value |
|
| 32 |
|--------|-------|
|
| 33 |
-
| mAP@50 |
|
| 34 |
-
|
|
| 35 |
-
|
|
| 36 |
-
|
|
| 37 |
-
|
|
| 38 |
-
|
|
| 39 |
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
| Output ID | Class |
|
| 43 |
-
|-----------|-------|
|
| 44 |
-
| 0 | car |
|
| 45 |
-
| 1 | bus |
|
| 46 |
-
| 2 | truck |
|
| 47 |
-
| 3 | motorcycle |
|
|
|
|
| 1 |
---
|
| 2 |
tags:
|
| 3 |
- element_type:detect
|
| 4 |
+
- model:yolov11-nano
|
| 5 |
+
- object:person
|
| 6 |
manako:
|
| 7 |
description: >
|
| 8 |
+
YOLOv11-nano fine-tuned for ground-level CCTV person detection on SN44.
|
| 9 |
+
Trained on CrowdHuman (15k, dense crowds) + BDD100K street pedestrians.
|
| 10 |
+
Conf threshold raised to 0.35 to minimise false positives.
|
| 11 |
+
source: meaculpitt/Detect-Person
|
| 12 |
prompt_hints: null
|
| 13 |
input_payload:
|
| 14 |
- name: frame
|
|
|
|
| 17 |
output_payload:
|
| 18 |
- name: detections
|
| 19 |
type: detections
|
| 20 |
+
description: Bounding boxes for detected persons
|
| 21 |
+
evaluation_score: 0.5563
|
| 22 |
last_benchmark:
|
| 23 |
+
type: coco_val2017
|
| 24 |
+
ran_at: '2026-03-25T02:58:57+00:00'
|
| 25 |
result_path: null
|
| 26 |
---
|
| 27 |
|
| 28 |
+
# Detect-Person — SN44
|
| 29 |
|
| 30 |
+
YOLOv11-nano fine-tuned for ground-level CCTV person detection.
|
| 31 |
|
| 32 |
| Metric | Value |
|
| 33 |
|--------|-------|
|
| 34 |
+
| mAP@50 (COCO val2017) | 55.63% |
|
| 35 |
+
| Precision (conf=0.35) | 56.86% |
|
| 36 |
+
| Recall | 50.67% |
|
| 37 |
+
| Baseline to beat | 37.55% |
|
| 38 |
+
| Model size | 5.6 MB |
|
| 39 |
+
| Input size | 1280×1280 |
|
| 40 |
|
| 41 |
+
**Training data**: CrowdHuman (15k) + BDD100K (3.2k pedestrians)
|
| 42 |
+
**Validation**: COCO val2017 persons (2,693 images)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class_names.txt
CHANGED
|
@@ -1,80 +1 @@
|
|
| 1 |
person
|
| 2 |
-
bicycle
|
| 3 |
-
car
|
| 4 |
-
motorcycle
|
| 5 |
-
airplane
|
| 6 |
-
bus
|
| 7 |
-
train
|
| 8 |
-
truck
|
| 9 |
-
boat
|
| 10 |
-
traffic light
|
| 11 |
-
fire hydrant
|
| 12 |
-
stop sign
|
| 13 |
-
parking meter
|
| 14 |
-
bench
|
| 15 |
-
bird
|
| 16 |
-
cat
|
| 17 |
-
dog
|
| 18 |
-
horse
|
| 19 |
-
sheep
|
| 20 |
-
cow
|
| 21 |
-
elephant
|
| 22 |
-
bear
|
| 23 |
-
zebra
|
| 24 |
-
giraffe
|
| 25 |
-
backpack
|
| 26 |
-
umbrella
|
| 27 |
-
handbag
|
| 28 |
-
tie
|
| 29 |
-
suitcase
|
| 30 |
-
frisbee
|
| 31 |
-
skis
|
| 32 |
-
snowboard
|
| 33 |
-
sports ball
|
| 34 |
-
kite
|
| 35 |
-
baseball bat
|
| 36 |
-
baseball glove
|
| 37 |
-
skateboard
|
| 38 |
-
surfboard
|
| 39 |
-
tennis racket
|
| 40 |
-
bottle
|
| 41 |
-
wine glass
|
| 42 |
-
cup
|
| 43 |
-
fork
|
| 44 |
-
knife
|
| 45 |
-
spoon
|
| 46 |
-
bowl
|
| 47 |
-
banana
|
| 48 |
-
apple
|
| 49 |
-
sandwich
|
| 50 |
-
orange
|
| 51 |
-
broccoli
|
| 52 |
-
carrot
|
| 53 |
-
hot dog
|
| 54 |
-
pizza
|
| 55 |
-
donut
|
| 56 |
-
cake
|
| 57 |
-
chair
|
| 58 |
-
couch
|
| 59 |
-
potted plant
|
| 60 |
-
bed
|
| 61 |
-
dining table
|
| 62 |
-
toilet
|
| 63 |
-
tv
|
| 64 |
-
laptop
|
| 65 |
-
mouse
|
| 66 |
-
remote
|
| 67 |
-
keyboard
|
| 68 |
-
cell phone
|
| 69 |
-
microwave
|
| 70 |
-
oven
|
| 71 |
-
toaster
|
| 72 |
-
sink
|
| 73 |
-
refrigerator
|
| 74 |
-
book
|
| 75 |
-
clock
|
| 76 |
-
vase
|
| 77 |
-
scissors
|
| 78 |
-
teddy bear
|
| 79 |
-
hair drier
|
| 80 |
-
toothbrush
|
|
|
|
| 1 |
person
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
miner.py
CHANGED
|
@@ -1,13 +1,7 @@
|
|
| 1 |
"""
|
| 2 |
-
Score Vision SN44 —
|
| 3 |
-
TTA (2-pass: original + hflip) + inline WBF.
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
Model: YOLO11s ONNX, 4 classes trained as:
|
| 7 |
-
0 = car, 1 = bus, 2 = truck, 3 = motorcycle
|
| 8 |
-
|
| 9 |
-
Official submission order (remapped in MODEL_TO_OUT):
|
| 10 |
-
0 = bus, 1 = car, 2 = truck, 3 = motorcycle
|
| 11 |
"""
|
| 12 |
|
| 13 |
from pathlib import Path
|
|
@@ -19,93 +13,75 @@ import onnxruntime as ort
|
|
| 19 |
from numpy import ndarray
|
| 20 |
from pydantic import BaseModel
|
| 21 |
|
| 22 |
-
|
| 23 |
-
OUT_NAMES = ["bus", "car", "truck", "motorcycle"]
|
| 24 |
-
NUM_CLASSES = 4
|
| 25 |
-
|
| 26 |
-
IMG_SIZE = 1280
|
| 27 |
-
# Per-class confidence thresholds (output class IDs):
|
| 28 |
-
# 0=bus, 1=car, 2=truck, 3=motorcycle
|
| 29 |
-
CONF_PER_CLASS = {0: 0.33, 1: 0.50, 2: 0.40, 3: 0.36}
|
| 30 |
-
CONF_THRESH_DEFAULT = 0.35 # fallback
|
| 31 |
TTA_CONF_THRESH = 0.25
|
| 32 |
IOU_THRESH = 0.45
|
| 33 |
-
WBF_IOU_THR = 0.
|
| 34 |
WBF_SKIP_THR = 0.0001
|
| 35 |
|
| 36 |
|
| 37 |
def _wbf(boxes_list: list[np.ndarray], scores_list: list[np.ndarray],
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
"""Weighted Boxes Fusion
|
| 41 |
if not boxes_list:
|
| 42 |
-
return np.empty((0, 4)), np.empty(0)
|
| 43 |
|
| 44 |
-
all_boxes, all_scores
|
| 45 |
-
for
|
| 46 |
for i in range(len(bx)):
|
| 47 |
if sc[i] < skip_box_thr:
|
| 48 |
continue
|
| 49 |
all_boxes.append(bx[i])
|
| 50 |
all_scores.append(sc[i])
|
| 51 |
-
all_labels.append(int(lb[i]))
|
| 52 |
|
| 53 |
if not all_boxes:
|
| 54 |
-
return np.empty((0, 4)), np.empty(0)
|
| 55 |
|
| 56 |
all_boxes = np.array(all_boxes)
|
| 57 |
all_scores = np.array(all_scores)
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
clusters.append([i])
|
| 97 |
-
cluster_boxes.append(cls_boxes[i].copy())
|
| 98 |
-
|
| 99 |
-
for c_idx, idxs in enumerate(clusters):
|
| 100 |
-
weights = cls_scores[idxs]
|
| 101 |
-
score = weights.mean()
|
| 102 |
-
fused_boxes.append(cluster_boxes[c_idx])
|
| 103 |
-
fused_scores.append(score)
|
| 104 |
-
fused_labels.append(cls)
|
| 105 |
|
| 106 |
if not fused_boxes:
|
| 107 |
-
return np.empty((0, 4)), np.empty(0)
|
| 108 |
-
return np.array(fused_boxes), np.array(fused_scores)
|
| 109 |
|
| 110 |
|
| 111 |
class BoundingBox(BaseModel):
|
|
@@ -126,100 +102,100 @@ class TVFrameResult(BaseModel):
|
|
| 126 |
class Miner:
|
| 127 |
def __init__(self, path_hf_repo: Path) -> None:
|
| 128 |
self.path_hf_repo = path_hf_repo
|
|
|
|
| 129 |
self.session = ort.InferenceSession(
|
| 130 |
str(path_hf_repo / "weights.onnx"),
|
| 131 |
providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
|
| 132 |
)
|
| 133 |
self.input_name = self.session.get_inputs()[0].name
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
self.tta_conf_threshold = TTA_CONF_THRESH
|
| 135 |
self.iou_threshold = IOU_THRESH
|
| 136 |
|
| 137 |
def __repr__(self) -> str:
|
| 138 |
-
return f"
|
| 139 |
-
|
| 140 |
-
def
|
| 141 |
-
h, w =
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
)
|
| 151 |
-
return img_p, r, pad_l, pad_t
|
| 152 |
-
|
| 153 |
-
def _preprocess(self, image_bgr: ndarray) -> tuple[np.ndarray, float, int, int]:
|
| 154 |
-
img_p, ratio, pad_l, pad_t = self._letterbox(image_bgr)
|
| 155 |
-
img_rgb = cv2.cvtColor(img_p, cv2.COLOR_BGR2RGB)
|
| 156 |
-
inp = img_rgb.astype(np.float32) / 255.0
|
| 157 |
-
inp = np.ascontiguousarray(inp.transpose(2, 0, 1)[np.newaxis])
|
| 158 |
-
return inp, ratio, pad_l, pad_t
|
| 159 |
-
|
| 160 |
-
def _decode_raw(self, raw: np.ndarray, ratio: float, pad_l: int, pad_t: int,
|
| 161 |
-
orig_w: int, orig_h: int, conf_thresh: float | None = None
|
| 162 |
-
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
|
| 163 |
pred = raw[0]
|
|
|
|
|
|
|
| 164 |
if pred.shape[0] < pred.shape[1]:
|
| 165 |
-
pred = pred.
|
| 166 |
-
|
|
|
|
|
|
|
|
|
|
| 167 |
cls_scores = pred[:, 4:]
|
| 168 |
-
|
|
|
|
|
|
|
| 169 |
confs = np.max(cls_scores, axis=1)
|
| 170 |
-
thresh = conf_thresh if conf_thresh is not None else
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
|
|
|
|
|
|
|
|
|
| 181 |
|
| 182 |
def _run_single_pass(self, image_bgr: ndarray, conf_thresh: float | None = None
|
| 183 |
-
) -> tuple[np.ndarray, np.ndarray
|
| 184 |
orig_h, orig_w = image_bgr.shape[:2]
|
| 185 |
-
inp,
|
| 186 |
raw = self.session.run(None, {self.input_name: inp})[0]
|
| 187 |
-
return self._decode_raw(raw,
|
| 188 |
|
| 189 |
def _infer_single(self, image_bgr: ndarray) -> list[BoundingBox]:
|
| 190 |
orig_h, orig_w = image_bgr.shape[:2]
|
| 191 |
|
| 192 |
-
all_boxes, all_scores
|
| 193 |
|
| 194 |
-
def _collect(boxes, confs
|
| 195 |
if len(boxes) == 0:
|
| 196 |
return
|
| 197 |
-
out_cls = np.array([MODEL_TO_OUT[int(c)] for c in cls_ids])
|
| 198 |
norm = boxes.copy()
|
| 199 |
norm[:, [0, 2]] /= orig_w
|
| 200 |
norm[:, [1, 3]] /= orig_h
|
| 201 |
norm = np.clip(norm, 0, 1)
|
| 202 |
all_boxes.append(norm)
|
| 203 |
all_scores.append(confs)
|
| 204 |
-
all_labels.append(out_cls)
|
| 205 |
|
| 206 |
# Pass 1: original (low threshold for TTA)
|
| 207 |
_collect(*self._run_single_pass(image_bgr, self.tta_conf_threshold))
|
| 208 |
|
| 209 |
# Pass 2: horizontal flip
|
| 210 |
flipped = cv2.flip(image_bgr, 1)
|
| 211 |
-
boxes_f, confs_f
|
| 212 |
if len(boxes_f):
|
| 213 |
boxes_f[:, 0], boxes_f[:, 2] = orig_w - boxes_f[:, 2], orig_w - boxes_f[:, 0]
|
| 214 |
-
_collect(boxes_f, confs_f
|
| 215 |
|
| 216 |
# (1.2x crop pass REMOVED — adds more FPs than TPs)
|
| 217 |
|
| 218 |
if not all_boxes:
|
| 219 |
return []
|
| 220 |
|
| 221 |
-
fused_boxes, fused_scores
|
| 222 |
-
all_boxes, all_scores,
|
| 223 |
iou_thr=WBF_IOU_THR, skip_box_thr=WBF_SKIP_THR,
|
| 224 |
)
|
| 225 |
if len(fused_boxes) == 0:
|
|
@@ -229,16 +205,10 @@ class Miner:
|
|
| 229 |
fused_boxes[:, [0, 2]] *= orig_w
|
| 230 |
fused_boxes[:, [1, 3]] *= orig_h
|
| 231 |
|
| 232 |
-
# Apply
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
])
|
| 237 |
-
if not keep_mask.any():
|
| 238 |
-
return []
|
| 239 |
-
fused_boxes = fused_boxes[keep_mask]
|
| 240 |
-
fused_scores = fused_scores[keep_mask]
|
| 241 |
-
fused_labels = fused_labels[keep_mask]
|
| 242 |
|
| 243 |
out: list[BoundingBox] = []
|
| 244 |
for i in range(len(fused_boxes)):
|
|
@@ -248,7 +218,7 @@ class Miner:
|
|
| 248 |
y1=max(0, min(orig_h, math.floor(b[1]))),
|
| 249 |
x2=max(0, min(orig_w, math.ceil(b[2]))),
|
| 250 |
y2=max(0, min(orig_h, math.ceil(b[3]))),
|
| 251 |
-
cls_id=
|
| 252 |
conf=max(0.0, min(1.0, float(fused_scores[i]))),
|
| 253 |
))
|
| 254 |
return out
|
|
|
|
| 1 |
"""
|
| 2 |
+
Score Vision SN44 — DetectPerson miner v6 (2026-03-27).
|
| 3 |
+
TTA (2-pass: original + hflip) + inline WBF. Stretch resize preprocessing.
|
| 4 |
+
Single class: person (cls_id=0).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
"""
|
| 6 |
|
| 7 |
from pathlib import Path
|
|
|
|
| 13 |
from numpy import ndarray
|
| 14 |
from pydantic import BaseModel
|
| 15 |
|
| 16 |
+
CONF_THRESH = 0.35
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
TTA_CONF_THRESH = 0.25
|
| 18 |
IOU_THRESH = 0.45
|
| 19 |
+
WBF_IOU_THR = 0.45
|
| 20 |
WBF_SKIP_THR = 0.0001
|
| 21 |
|
| 22 |
|
| 23 |
def _wbf(boxes_list: list[np.ndarray], scores_list: list[np.ndarray],
|
| 24 |
+
iou_thr: float = 0.45, skip_box_thr: float = 0.0001
|
| 25 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
| 26 |
+
"""Weighted Boxes Fusion for single-class detection. Boxes in [0,1] normalized coords."""
|
| 27 |
if not boxes_list:
|
| 28 |
+
return np.empty((0, 4)), np.empty(0)
|
| 29 |
|
| 30 |
+
all_boxes, all_scores = [], []
|
| 31 |
+
for bx, sc in zip(boxes_list, scores_list):
|
| 32 |
for i in range(len(bx)):
|
| 33 |
if sc[i] < skip_box_thr:
|
| 34 |
continue
|
| 35 |
all_boxes.append(bx[i])
|
| 36 |
all_scores.append(sc[i])
|
|
|
|
| 37 |
|
| 38 |
if not all_boxes:
|
| 39 |
+
return np.empty((0, 4)), np.empty(0)
|
| 40 |
|
| 41 |
all_boxes = np.array(all_boxes)
|
| 42 |
all_scores = np.array(all_scores)
|
| 43 |
+
|
| 44 |
+
order = all_scores.argsort()[::-1]
|
| 45 |
+
all_boxes = all_boxes[order]
|
| 46 |
+
all_scores = all_scores[order]
|
| 47 |
+
|
| 48 |
+
clusters: list[list[int]] = []
|
| 49 |
+
cluster_boxes: list[np.ndarray] = []
|
| 50 |
+
|
| 51 |
+
for i in range(len(all_boxes)):
|
| 52 |
+
matched = -1
|
| 53 |
+
best_iou = iou_thr
|
| 54 |
+
for c_idx, c_box in enumerate(cluster_boxes):
|
| 55 |
+
xx1 = max(all_boxes[i, 0], c_box[0])
|
| 56 |
+
yy1 = max(all_boxes[i, 1], c_box[1])
|
| 57 |
+
xx2 = min(all_boxes[i, 2], c_box[2])
|
| 58 |
+
yy2 = min(all_boxes[i, 3], c_box[3])
|
| 59 |
+
inter = max(0, xx2 - xx1) * max(0, yy2 - yy1)
|
| 60 |
+
a1 = (all_boxes[i, 2] - all_boxes[i, 0]) * (all_boxes[i, 3] - all_boxes[i, 1])
|
| 61 |
+
a2 = (c_box[2] - c_box[0]) * (c_box[3] - c_box[1])
|
| 62 |
+
iou = inter / (a1 + a2 - inter + 1e-9)
|
| 63 |
+
if iou > best_iou:
|
| 64 |
+
best_iou = iou
|
| 65 |
+
matched = c_idx
|
| 66 |
+
if matched >= 0:
|
| 67 |
+
clusters[matched].append(i)
|
| 68 |
+
idxs = clusters[matched]
|
| 69 |
+
weights = all_scores[idxs]
|
| 70 |
+
w_sum = weights.sum()
|
| 71 |
+
cluster_boxes[matched] = (all_boxes[idxs] * weights[:, None]).sum(0) / w_sum
|
| 72 |
+
else:
|
| 73 |
+
clusters.append([i])
|
| 74 |
+
cluster_boxes.append(all_boxes[i].copy())
|
| 75 |
+
|
| 76 |
+
fused_boxes, fused_scores = [], []
|
| 77 |
+
for c_idx, idxs in enumerate(clusters):
|
| 78 |
+
weights = all_scores[idxs]
|
| 79 |
+
fused_boxes.append(cluster_boxes[c_idx])
|
| 80 |
+
fused_scores.append(weights.mean())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
if not fused_boxes:
|
| 83 |
+
return np.empty((0, 4)), np.empty(0)
|
| 84 |
+
return np.array(fused_boxes), np.array(fused_scores)
|
| 85 |
|
| 86 |
|
| 87 |
class BoundingBox(BaseModel):
|
|
|
|
| 102 |
class Miner:
|
| 103 |
def __init__(self, path_hf_repo: Path) -> None:
|
| 104 |
self.path_hf_repo = path_hf_repo
|
| 105 |
+
self.class_names = ['person']
|
| 106 |
self.session = ort.InferenceSession(
|
| 107 |
str(path_hf_repo / "weights.onnx"),
|
| 108 |
providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
|
| 109 |
)
|
| 110 |
self.input_name = self.session.get_inputs()[0].name
|
| 111 |
+
input_shape = self.session.get_inputs()[0].shape
|
| 112 |
+
self.input_h = int(input_shape[2])
|
| 113 |
+
self.input_w = int(input_shape[3])
|
| 114 |
+
self.conf_threshold = CONF_THRESH
|
| 115 |
self.tta_conf_threshold = TTA_CONF_THRESH
|
| 116 |
self.iou_threshold = IOU_THRESH
|
| 117 |
|
| 118 |
def __repr__(self) -> str:
|
| 119 |
+
return f"DetectPerson Miner v6 2-pass TTA + WBF iou={WBF_IOU_THR}"
|
| 120 |
+
|
| 121 |
+
def _preprocess(self, image_bgr: ndarray) -> tuple[np.ndarray, tuple[int, int]]:
|
| 122 |
+
h, w = image_bgr.shape[:2]
|
| 123 |
+
rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
|
| 124 |
+
resized = cv2.resize(rgb, (self.input_w, self.input_h))
|
| 125 |
+
x = resized.astype(np.float32) / 255.0
|
| 126 |
+
x = np.transpose(x, (2, 0, 1))[None, ...]
|
| 127 |
+
return x, (h, w)
|
| 128 |
+
|
| 129 |
+
def _decode_raw(self, raw: np.ndarray, orig_h: int, orig_w: int,
|
| 130 |
+
conf_thresh: float | None = None) -> tuple[np.ndarray, np.ndarray]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
pred = raw[0]
|
| 132 |
+
if pred.ndim != 2:
|
| 133 |
+
return np.empty((0, 4)), np.empty(0)
|
| 134 |
if pred.shape[0] < pred.shape[1]:
|
| 135 |
+
pred = pred.transpose(1, 0)
|
| 136 |
+
if pred.shape[1] < 5:
|
| 137 |
+
return np.empty((0, 4)), np.empty(0)
|
| 138 |
+
|
| 139 |
+
boxes = pred[:, :4]
|
| 140 |
cls_scores = pred[:, 4:]
|
| 141 |
+
if cls_scores.shape[1] == 0:
|
| 142 |
+
return np.empty((0, 4)), np.empty(0)
|
| 143 |
+
|
| 144 |
confs = np.max(cls_scores, axis=1)
|
| 145 |
+
thresh = conf_thresh if conf_thresh is not None else self.conf_threshold
|
| 146 |
+
keep = confs >= thresh
|
| 147 |
+
boxes, confs = boxes[keep], confs[keep]
|
| 148 |
+
if boxes.shape[0] == 0:
|
| 149 |
+
return np.empty((0, 4)), np.empty(0)
|
| 150 |
+
|
| 151 |
+
sx = orig_w / float(self.input_w)
|
| 152 |
+
sy = orig_h / float(self.input_h)
|
| 153 |
+
cx, cy, bw, bh = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
|
| 154 |
+
x1 = np.clip((cx - bw / 2) * sx, 0, orig_w)
|
| 155 |
+
y1 = np.clip((cy - bh / 2) * sy, 0, orig_h)
|
| 156 |
+
x2 = np.clip((cx + bw / 2) * sx, 0, orig_w)
|
| 157 |
+
y2 = np.clip((cy + bh / 2) * sy, 0, orig_h)
|
| 158 |
+
return np.stack([x1, y1, x2, y2], axis=1), confs
|
| 159 |
|
| 160 |
def _run_single_pass(self, image_bgr: ndarray, conf_thresh: float | None = None
|
| 161 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
| 162 |
orig_h, orig_w = image_bgr.shape[:2]
|
| 163 |
+
inp, _ = self._preprocess(image_bgr)
|
| 164 |
raw = self.session.run(None, {self.input_name: inp})[0]
|
| 165 |
+
return self._decode_raw(raw, orig_h, orig_w, conf_thresh)
|
| 166 |
|
| 167 |
def _infer_single(self, image_bgr: ndarray) -> list[BoundingBox]:
|
| 168 |
orig_h, orig_w = image_bgr.shape[:2]
|
| 169 |
|
| 170 |
+
all_boxes, all_scores = [], []
|
| 171 |
|
| 172 |
+
def _collect(boxes, confs):
|
| 173 |
if len(boxes) == 0:
|
| 174 |
return
|
|
|
|
| 175 |
norm = boxes.copy()
|
| 176 |
norm[:, [0, 2]] /= orig_w
|
| 177 |
norm[:, [1, 3]] /= orig_h
|
| 178 |
norm = np.clip(norm, 0, 1)
|
| 179 |
all_boxes.append(norm)
|
| 180 |
all_scores.append(confs)
|
|
|
|
| 181 |
|
| 182 |
# Pass 1: original (low threshold for TTA)
|
| 183 |
_collect(*self._run_single_pass(image_bgr, self.tta_conf_threshold))
|
| 184 |
|
| 185 |
# Pass 2: horizontal flip
|
| 186 |
flipped = cv2.flip(image_bgr, 1)
|
| 187 |
+
boxes_f, confs_f = self._run_single_pass(flipped, self.tta_conf_threshold)
|
| 188 |
if len(boxes_f):
|
| 189 |
boxes_f[:, 0], boxes_f[:, 2] = orig_w - boxes_f[:, 2], orig_w - boxes_f[:, 0]
|
| 190 |
+
_collect(boxes_f, confs_f)
|
| 191 |
|
| 192 |
# (1.2x crop pass REMOVED — adds more FPs than TPs)
|
| 193 |
|
| 194 |
if not all_boxes:
|
| 195 |
return []
|
| 196 |
|
| 197 |
+
fused_boxes, fused_scores = _wbf(
|
| 198 |
+
all_boxes, all_scores,
|
| 199 |
iou_thr=WBF_IOU_THR, skip_box_thr=WBF_SKIP_THR,
|
| 200 |
)
|
| 201 |
if len(fused_boxes) == 0:
|
|
|
|
| 205 |
fused_boxes[:, [0, 2]] *= orig_w
|
| 206 |
fused_boxes[:, [1, 3]] *= orig_h
|
| 207 |
|
| 208 |
+
# Apply final conf threshold after WBF
|
| 209 |
+
keep = fused_scores >= self.conf_threshold
|
| 210 |
+
fused_boxes = fused_boxes[keep]
|
| 211 |
+
fused_scores = fused_scores[keep]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
|
| 213 |
out: list[BoundingBox] = []
|
| 214 |
for i in range(len(fused_boxes)):
|
|
|
|
| 218 |
y1=max(0, min(orig_h, math.floor(b[1]))),
|
| 219 |
x2=max(0, min(orig_w, math.ceil(b[2]))),
|
| 220 |
y2=max(0, min(orig_h, math.ceil(b[3]))),
|
| 221 |
+
cls_id=0,
|
| 222 |
conf=max(0.0, min(1.0, float(fused_scores[i]))),
|
| 223 |
))
|
| 224 |
return out
|
model_type.json
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"task_type": "object-detection", "model_type": "yolov11-
|
|
|
|
| 1 |
+
{"task_type": "object-detection", "model_type": "yolov11-nano", "deploy": "2026-03-26T07:46Z"}
|
weights.onnx
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f32ed65b9024a69693f675d494c7fc813a964766c54b241464a463377342da60
|
| 3 |
+
size 5607862
|