Add files using upload-large-folder tool
Browse files- config.yml +21 -0
- keypointdetect.pt +3 -0
- miner.py +414 -0
- objdetect.pt +3 -0
config.yml
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Image:
|
| 2 |
+
from_base: parachutes/python:3.12
|
| 3 |
+
run_command:
|
| 4 |
+
- pip install --upgrade setuptools wheel
|
| 5 |
+
- pip install ultralytics==8.3.222 torch==2.9.0 opencv-python-headless numpy pydantic
|
| 6 |
+
set_workdir: /app
|
| 7 |
+
|
| 8 |
+
NodeSelector:
|
| 9 |
+
gpu_count: 1
|
| 10 |
+
min_vram_gb_per_gpu: 18 # Slightly higher for aggressive multi-scale
|
| 11 |
+
exclude:
|
| 12 |
+
- "5090"
|
| 13 |
+
- b200
|
| 14 |
+
- h200
|
| 15 |
+
- mi300x
|
| 16 |
+
|
| 17 |
+
Chute:
|
| 18 |
+
timeout_seconds: 350 # Slightly higher timeout for aggressive processing
|
| 19 |
+
concurrency: 3 # Lower concurrency to handle more intensive processing
|
| 20 |
+
max_instances: 4
|
| 21 |
+
scaling_threshold: 0.6
|
keypointdetect.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6dd10dba85895c92760cdb5a99c5cfca899c68f361a66c5448f38a187280ee1f
|
| 3 |
+
size 6849672
|
miner.py
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from typing import List, Tuple, Dict, Optional
|
| 3 |
+
|
| 4 |
+
from ultralytics import YOLO
|
| 5 |
+
from numpy import ndarray
|
| 6 |
+
from pydantic import BaseModel
|
| 7 |
+
import numpy as np
|
| 8 |
+
import cv2
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class BoundingBox(BaseModel):
|
| 12 |
+
x1: int
|
| 13 |
+
y1: int
|
| 14 |
+
x2: int
|
| 15 |
+
y2: int
|
| 16 |
+
cls_id: int
|
| 17 |
+
conf: float
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class TVFrameResult(BaseModel):
|
| 21 |
+
frame_id: int
|
| 22 |
+
boxes: List[BoundingBox]
|
| 23 |
+
keypoints: List[Tuple[int, int]]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class Miner:
|
| 27 |
+
# Optimized for enumeration and placement - more aggressive detection
|
| 28 |
+
QUASI_TOTAL_IOA: float = 0.88 # Slightly lower to keep more detections
|
| 29 |
+
SMALL_CONTAINED_IOA: float = 0.82 # More lenient for small objects
|
| 30 |
+
SMALL_RATIO_MAX: float = 0.55 # Allow slightly larger size differences
|
| 31 |
+
SINGLE_PLAYER_HUE_PIVOT: float = 90.0
|
| 32 |
+
CORNER_INDICES = {0, 5, 24, 29}
|
| 33 |
+
|
| 34 |
+
# Enumeration-specific constants
|
| 35 |
+
AGGRESSIVE_SCALES = [1.0, 1.3, 0.7, 1.1, 0.9] # More scales for better coverage
|
| 36 |
+
ENUMERATION_NMS_THRESHOLD = 0.4 # Lower NMS for better enumeration
|
| 37 |
+
SMALL_OBJECT_CONF_BOOST = 1.15 # Boost confidence for small objects
|
| 38 |
+
|
| 39 |
+
def __init__(self, path_hf_repo: Path) -> None:
|
| 40 |
+
self.bbox_model = YOLO(path_hf_repo / "objdetect.pt")
|
| 41 |
+
print("BBox Model (objdetect.pt) Loaded")
|
| 42 |
+
self.keypoints_model = YOLO(path_hf_repo / "keypointdetect.pt")
|
| 43 |
+
print("Keypoints Model (keypointdetect.pt) Loaded")
|
| 44 |
+
|
| 45 |
+
def __repr__(self) -> str:
|
| 46 |
+
return (
|
| 47 |
+
f"BBox Model: {type(self.bbox_model).__name__}\n"
|
| 48 |
+
f"Keypoints Model: {type(self.keypoints_model).__name__}"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
@staticmethod
|
| 52 |
+
def _clip_box_to_image(x1: int, y1: int, x2: int, y2: int, w: int, h: int) -> Tuple[int, int, int, int]:
|
| 53 |
+
x1 = max(0, min(int(x1), w - 1))
|
| 54 |
+
y1 = max(0, min(int(y1), h - 1))
|
| 55 |
+
x2 = max(0, min(int(x2), w - 1))
|
| 56 |
+
y2 = max(0, min(int(y2), h - 1))
|
| 57 |
+
if x2 <= x1:
|
| 58 |
+
x2 = min(w - 1, x1 + 1)
|
| 59 |
+
if y2 <= y1:
|
| 60 |
+
y2 = min(h - 1, y1 + 1)
|
| 61 |
+
return x1, y1, x2, y2
|
| 62 |
+
|
| 63 |
+
@staticmethod
|
| 64 |
+
def _area(bb: BoundingBox) -> int:
|
| 65 |
+
return max(0, bb.x2 - bb.x1) * max(0, bb.y2 - bb.y1)
|
| 66 |
+
|
| 67 |
+
@staticmethod
|
| 68 |
+
def _intersect_area(a: BoundingBox, b: BoundingBox) -> int:
|
| 69 |
+
ix1 = max(a.x1, b.x1)
|
| 70 |
+
iy1 = max(a.y1, b.y1)
|
| 71 |
+
ix2 = min(a.x2, b.x2)
|
| 72 |
+
iy2 = min(a.y2, b.y2)
|
| 73 |
+
if ix2 <= ix1 or iy2 <= iy1:
|
| 74 |
+
return 0
|
| 75 |
+
return (ix2 - ix1) * (iy2 - iy1)
|
| 76 |
+
|
| 77 |
+
@staticmethod
|
| 78 |
+
def _center(bb: BoundingBox) -> Tuple[float, float]:
|
| 79 |
+
return (0.5 * (bb.x1 + bb.x2), 0.5 * (bb.y1 + bb.y2))
|
| 80 |
+
|
| 81 |
+
@staticmethod
|
| 82 |
+
def _mean_hs(img_bgr: np.ndarray) -> Tuple[float, float]:
|
| 83 |
+
hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
|
| 84 |
+
return float(np.mean(hsv[:, :, 0])), float(np.mean(hsv[:, :, 1]))
|
| 85 |
+
|
| 86 |
+
def _hs_feature_from_roi(self, img_bgr: np.ndarray, box: BoundingBox) -> np.ndarray:
|
| 87 |
+
H, W = img_bgr.shape[:2]
|
| 88 |
+
x1, y1, x2, y2 = self._clip_box_to_image(box.x1, box.y1, box.x2, box.y2, W, H)
|
| 89 |
+
roi = img_bgr[y1:y2, x1:x2]
|
| 90 |
+
if roi.size == 0:
|
| 91 |
+
return np.array([0.0, 0.0], dtype=np.float32)
|
| 92 |
+
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
|
| 93 |
+
lower_green = np.array([35, 60, 60], dtype=np.uint8)
|
| 94 |
+
upper_green = np.array([85, 255, 255], dtype=np.uint8)
|
| 95 |
+
green_mask = cv2.inRange(hsv, lower_green, upper_green)
|
| 96 |
+
non_green_mask = cv2.bitwise_not(green_mask)
|
| 97 |
+
num_non_green = int(np.count_nonzero(non_green_mask))
|
| 98 |
+
total = hsv.shape[0] * hsv.shape[1]
|
| 99 |
+
if num_non_green > max(50, total // 20):
|
| 100 |
+
h_vals = hsv[:, :, 0][non_green_mask > 0]
|
| 101 |
+
s_vals = hsv[:, :, 1][non_green_mask > 0]
|
| 102 |
+
h_mean = float(np.mean(h_vals)) if h_vals.size else 0.0
|
| 103 |
+
s_mean = float(np.mean(s_vals)) if s_vals.size else 0.0
|
| 104 |
+
else:
|
| 105 |
+
h_mean, s_mean = self._mean_hs(roi)
|
| 106 |
+
return np.array([h_mean, s_mean], dtype=np.float32)
|
| 107 |
+
|
| 108 |
+
def _ioa(self, a: BoundingBox, b: BoundingBox) -> float:
|
| 109 |
+
inter = self._intersect_area(a, b)
|
| 110 |
+
aa = self._area(a)
|
| 111 |
+
if aa <= 0:
|
| 112 |
+
return 0.0
|
| 113 |
+
return inter / aa
|
| 114 |
+
|
| 115 |
+
def suppress_quasi_total_containment(self, boxes: List[BoundingBox]) -> List[BoundingBox]:
|
| 116 |
+
if len(boxes) <= 1:
|
| 117 |
+
return boxes
|
| 118 |
+
keep = [True] * len(boxes)
|
| 119 |
+
for i in range(len(boxes)):
|
| 120 |
+
if not keep[i]:
|
| 121 |
+
continue
|
| 122 |
+
for j in range(len(boxes)):
|
| 123 |
+
if i == j or not keep[j]:
|
| 124 |
+
continue
|
| 125 |
+
ioa_i_in_j = self._ioa(boxes[i], boxes[j])
|
| 126 |
+
if ioa_i_in_j >= self.QUASI_TOTAL_IOA:
|
| 127 |
+
keep[i] = False
|
| 128 |
+
break
|
| 129 |
+
return [bb for bb, k in zip(boxes, keep) if k]
|
| 130 |
+
|
| 131 |
+
def suppress_small_contained(self, boxes: List[BoundingBox]) -> List[BoundingBox]:
|
| 132 |
+
if len(boxes) <= 1:
|
| 133 |
+
return boxes
|
| 134 |
+
keep = [True] * len(boxes)
|
| 135 |
+
areas = [self._area(bb) for bb in boxes]
|
| 136 |
+
for i in range(len(boxes)):
|
| 137 |
+
if not keep[i]:
|
| 138 |
+
continue
|
| 139 |
+
for j in range(len(boxes)):
|
| 140 |
+
if i == j or not keep[j]:
|
| 141 |
+
continue
|
| 142 |
+
ai, aj = areas[i], areas[j]
|
| 143 |
+
if ai == 0 or aj == 0:
|
| 144 |
+
continue
|
| 145 |
+
if ai <= aj:
|
| 146 |
+
ratio = ai / aj
|
| 147 |
+
if ratio <= self.SMALL_RATIO_MAX:
|
| 148 |
+
ioa_i_in_j = self._ioa(boxes[i], boxes[j])
|
| 149 |
+
if ioa_i_in_j >= self.SMALL_CONTAINED_IOA:
|
| 150 |
+
keep[i] = False
|
| 151 |
+
break
|
| 152 |
+
else:
|
| 153 |
+
ratio = aj / ai
|
| 154 |
+
if ratio <= self.SMALL_RATIO_MAX:
|
| 155 |
+
ioa_j_in_i = self._ioa(boxes[j], boxes[i])
|
| 156 |
+
if ioa_j_in_i >= self.SMALL_CONTAINED_IOA:
|
| 157 |
+
keep[j] = False
|
| 158 |
+
return [bb for bb, k in zip(boxes, keep) if k]
|
| 159 |
+
|
| 160 |
+
def _assign_players_two_clusters(self, features: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
|
| 161 |
+
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 1.0)
|
| 162 |
+
_, labels, centers = cv2.kmeans(
|
| 163 |
+
np.float32(features),
|
| 164 |
+
K=2,
|
| 165 |
+
bestLabels=None,
|
| 166 |
+
criteria=criteria,
|
| 167 |
+
attempts=5,
|
| 168 |
+
flags=cv2.KMEANS_PP_CENTERS,
|
| 169 |
+
)
|
| 170 |
+
return labels.reshape(-1), centers
|
| 171 |
+
|
| 172 |
+
def _reclass_extra_goalkeepers(
|
| 173 |
+
self,
|
| 174 |
+
img_bgr: np.ndarray,
|
| 175 |
+
boxes: List[BoundingBox],
|
| 176 |
+
cluster_centers: Optional[np.ndarray],
|
| 177 |
+
) -> None:
|
| 178 |
+
gk_idxs = [i for i, bb in enumerate(boxes) if int(bb.cls_id) == 1]
|
| 179 |
+
if len(gk_idxs) <= 1:
|
| 180 |
+
return
|
| 181 |
+
gk_idxs_sorted = sorted(gk_idxs, key=lambda i: boxes[i].conf, reverse=True)
|
| 182 |
+
keep_gk_idx = gk_idxs_sorted[0]
|
| 183 |
+
to_reclass = gk_idxs_sorted[1:]
|
| 184 |
+
for gki in to_reclass:
|
| 185 |
+
hs_gk = self._hs_feature_from_roi(img_bgr, boxes[gki])
|
| 186 |
+
if cluster_centers is not None:
|
| 187 |
+
d0 = float(np.linalg.norm(hs_gk - cluster_centers[0]))
|
| 188 |
+
d1 = float(np.linalg.norm(hs_gk - cluster_centers[1]))
|
| 189 |
+
assign_cls = 6 if d0 <= d1 else 7
|
| 190 |
+
else:
|
| 191 |
+
assign_cls = 6 if float(hs_gk[0]) < self.SINGLE_PLAYER_HUE_PIVOT else 7
|
| 192 |
+
boxes[gki].cls_id = int(assign_cls)
|
| 193 |
+
|
| 194 |
+
def _aggressive_multi_scale_detection(self, img_bgr: np.ndarray) -> List[BoundingBox]:
|
| 195 |
+
"""
|
| 196 |
+
Aggressive Multi-Scale Object Detection optimized for enumeration and placement.
|
| 197 |
+
Uses 5 scales with confidence boosting for small objects.
|
| 198 |
+
"""
|
| 199 |
+
H, W = img_bgr.shape[:2]
|
| 200 |
+
all_detections = []
|
| 201 |
+
|
| 202 |
+
for scale in self.AGGRESSIVE_SCALES:
|
| 203 |
+
if scale != 1.0:
|
| 204 |
+
new_h, new_w = int(H * scale), int(W * scale)
|
| 205 |
+
# More lenient dimension constraints for aggressive detection
|
| 206 |
+
if new_h > 2560 or new_w > 2560 or new_h < 256 or new_w < 256:
|
| 207 |
+
continue
|
| 208 |
+
scaled_img = cv2.resize(img_bgr, (new_w, new_h))
|
| 209 |
+
else:
|
| 210 |
+
scaled_img = img_bgr
|
| 211 |
+
new_h, new_w = H, W
|
| 212 |
+
|
| 213 |
+
# Run detection on scaled image
|
| 214 |
+
results = self.bbox_model.predict([scaled_img], verbose=False)
|
| 215 |
+
|
| 216 |
+
if results and hasattr(results[0], "boxes") and results[0].boxes is not None:
|
| 217 |
+
for box in results[0].boxes.data:
|
| 218 |
+
x1, y1, x2, y2, conf, cls_id = box.tolist()
|
| 219 |
+
|
| 220 |
+
# Scale coordinates back to original image size
|
| 221 |
+
if scale != 1.0:
|
| 222 |
+
x1 = x1 / scale
|
| 223 |
+
y1 = y1 / scale
|
| 224 |
+
x2 = x2 / scale
|
| 225 |
+
y2 = y2 / scale
|
| 226 |
+
|
| 227 |
+
# Clip to original image bounds
|
| 228 |
+
x1, y1, x2, y2 = self._clip_box_to_image(x1, y1, x2, y2, W, H)
|
| 229 |
+
|
| 230 |
+
# Calculate box area for confidence boosting
|
| 231 |
+
box_area = (x2 - x1) * (y2 - y1)
|
| 232 |
+
|
| 233 |
+
# Aggressive confidence boosting based on scale and size
|
| 234 |
+
if scale == 1.3 and box_area < 1500: # Very small objects at high scale
|
| 235 |
+
conf *= self.SMALL_OBJECT_CONF_BOOST
|
| 236 |
+
elif scale == 1.1 and box_area < 3000: # Small objects at medium scale
|
| 237 |
+
conf *= 1.10
|
| 238 |
+
elif scale == 0.7 and box_area > 15000: # Large objects at small scale
|
| 239 |
+
conf *= 1.08
|
| 240 |
+
elif scale == 0.9 and box_area > 8000: # Medium-large objects
|
| 241 |
+
conf *= 1.05
|
| 242 |
+
|
| 243 |
+
# Extra boost for small objects regardless of scale
|
| 244 |
+
if box_area < 1000:
|
| 245 |
+
conf *= 1.12
|
| 246 |
+
|
| 247 |
+
all_detections.append(BoundingBox(
|
| 248 |
+
x1=int(x1), y1=int(y1), x2=int(x2), y2=int(y2),
|
| 249 |
+
cls_id=int(cls_id), conf=float(conf)
|
| 250 |
+
))
|
| 251 |
+
|
| 252 |
+
# Apply enumeration-optimized NMS
|
| 253 |
+
return self._enumeration_optimized_nms(all_detections)
|
| 254 |
+
|
| 255 |
+
def _enumeration_optimized_nms(self, boxes: List[BoundingBox]) -> List[BoundingBox]:
|
| 256 |
+
"""
|
| 257 |
+
Enumeration-optimized NMS with lower threshold to preserve more detections.
|
| 258 |
+
"""
|
| 259 |
+
if not boxes:
|
| 260 |
+
return []
|
| 261 |
+
|
| 262 |
+
# Group by class for class-specific NMS
|
| 263 |
+
boxes_by_class = {}
|
| 264 |
+
for box in boxes:
|
| 265 |
+
if box.cls_id not in boxes_by_class:
|
| 266 |
+
boxes_by_class[box.cls_id] = []
|
| 267 |
+
boxes_by_class[box.cls_id].append(box)
|
| 268 |
+
|
| 269 |
+
final_boxes = []
|
| 270 |
+
|
| 271 |
+
for cls_id, class_boxes in boxes_by_class.items():
|
| 272 |
+
# Sort by confidence
|
| 273 |
+
class_boxes_sorted = sorted(class_boxes, key=lambda x: x.conf, reverse=True)
|
| 274 |
+
keep = []
|
| 275 |
+
|
| 276 |
+
while class_boxes_sorted:
|
| 277 |
+
# Take the highest confidence box
|
| 278 |
+
current = class_boxes_sorted.pop(0)
|
| 279 |
+
keep.append(current)
|
| 280 |
+
|
| 281 |
+
# Remove boxes with high IoU (lower threshold for enumeration)
|
| 282 |
+
remaining = []
|
| 283 |
+
for box in class_boxes_sorted:
|
| 284 |
+
iou = self._calculate_iou(current, box)
|
| 285 |
+
if iou < self.ENUMERATION_NMS_THRESHOLD:
|
| 286 |
+
remaining.append(box)
|
| 287 |
+
elif box.conf > current.conf * 0.95: # Keep very close confidence boxes
|
| 288 |
+
remaining.append(box)
|
| 289 |
+
|
| 290 |
+
class_boxes_sorted = remaining
|
| 291 |
+
|
| 292 |
+
final_boxes.extend(keep)
|
| 293 |
+
|
| 294 |
+
return final_boxes
|
| 295 |
+
|
| 296 |
+
def _calculate_iou(self, box1: BoundingBox, box2: BoundingBox) -> float:
|
| 297 |
+
"""Calculate Intersection over Union (IoU) between two bounding boxes."""
|
| 298 |
+
# Calculate intersection
|
| 299 |
+
x1 = max(box1.x1, box2.x1)
|
| 300 |
+
y1 = max(box1.y1, box2.y1)
|
| 301 |
+
x2 = min(box1.x2, box2.x2)
|
| 302 |
+
y2 = min(box1.y2, box2.y2)
|
| 303 |
+
|
| 304 |
+
if x2 <= x1 or y2 <= y1:
|
| 305 |
+
return 0.0
|
| 306 |
+
|
| 307 |
+
intersection = (x2 - x1) * (y2 - y1)
|
| 308 |
+
|
| 309 |
+
# Calculate union
|
| 310 |
+
area1 = (box1.x2 - box1.x1) * (box1.y2 - box1.y1)
|
| 311 |
+
area2 = (box2.x2 - box2.x1) * (box2.y2 - box2.y1)
|
| 312 |
+
union = area1 + area2 - intersection
|
| 313 |
+
|
| 314 |
+
return intersection / union if union > 0 else 0.0
|
| 315 |
+
|
| 316 |
+
def predict_batch(
|
| 317 |
+
self,
|
| 318 |
+
batch_images: List[ndarray],
|
| 319 |
+
offset: int,
|
| 320 |
+
n_keypoints: int,
|
| 321 |
+
task_type: Optional[str] = None,
|
| 322 |
+
) -> List[TVFrameResult]:
|
| 323 |
+
process_objects = task_type is None or task_type == "object"
|
| 324 |
+
process_keypoints = task_type is None or task_type == "keypoint"
|
| 325 |
+
bboxes: Dict[int, List[BoundingBox]] = {}
|
| 326 |
+
if process_objects:
|
| 327 |
+
# Use aggressive multi-scale detection for optimal enumeration and placement
|
| 328 |
+
for frame_idx_in_batch, img_bgr in enumerate(batch_images):
|
| 329 |
+
boxes = self._aggressive_multi_scale_detection(img_bgr)
|
| 330 |
+
|
| 331 |
+
# Handle multiple football detections (keep best one)
|
| 332 |
+
footballs = [bb for bb in boxes if int(bb.cls_id) == 0]
|
| 333 |
+
if len(footballs) > 1:
|
| 334 |
+
best_ball = max(footballs, key=lambda b: b.conf)
|
| 335 |
+
boxes = [bb for bb in boxes if int(bb.cls_id) != 0]
|
| 336 |
+
boxes.append(best_ball)
|
| 337 |
+
|
| 338 |
+
# Apply more lenient suppression for better enumeration
|
| 339 |
+
boxes = self.suppress_quasi_total_containment(boxes)
|
| 340 |
+
boxes = self.suppress_small_contained(boxes)
|
| 341 |
+
|
| 342 |
+
# Team classification for players
|
| 343 |
+
player_indices: List[int] = []
|
| 344 |
+
player_feats: List[np.ndarray] = []
|
| 345 |
+
for i, bb in enumerate(boxes):
|
| 346 |
+
if int(bb.cls_id) == 2:
|
| 347 |
+
hs = self._hs_feature_from_roi(img_bgr, bb)
|
| 348 |
+
player_indices.append(i)
|
| 349 |
+
player_feats.append(hs)
|
| 350 |
+
|
| 351 |
+
cluster_centers: Optional[np.ndarray] = None
|
| 352 |
+
n_players = len(player_feats)
|
| 353 |
+
if n_players >= 2:
|
| 354 |
+
feats = np.vstack(player_feats)
|
| 355 |
+
labels, centers = self._assign_players_two_clusters(feats)
|
| 356 |
+
order = np.argsort(centers[:, 0])
|
| 357 |
+
centers = centers[order]
|
| 358 |
+
remap = {old_idx: new_idx for new_idx, old_idx in enumerate(order)}
|
| 359 |
+
labels = np.vectorize(remap.get)(labels)
|
| 360 |
+
cluster_centers = centers
|
| 361 |
+
for idx_in_list, lbl in zip(player_indices, labels):
|
| 362 |
+
boxes[idx_in_list].cls_id = 6 if int(lbl) == 0 else 7
|
| 363 |
+
elif n_players == 1:
|
| 364 |
+
hue, _ = player_feats[0]
|
| 365 |
+
boxes[player_indices[0]].cls_id = 6 if float(hue) < self.SINGLE_PLAYER_HUE_PIVOT else 7
|
| 366 |
+
|
| 367 |
+
self._reclass_extra_goalkeepers(img_bgr, boxes, cluster_centers)
|
| 368 |
+
bboxes[offset + frame_idx_in_batch] = boxes
|
| 369 |
+
keypoints: Dict[int, List[Tuple[int, int]]] = {}
|
| 370 |
+
if process_keypoints:
|
| 371 |
+
keypoints_model_results = self.keypoints_model.predict(batch_images)
|
| 372 |
+
else:
|
| 373 |
+
keypoints_model_results = None
|
| 374 |
+
if keypoints_model_results is not None:
|
| 375 |
+
for frame_idx_in_batch, detection in enumerate(keypoints_model_results):
|
| 376 |
+
if not hasattr(detection, "keypoints") or detection.keypoints is None:
|
| 377 |
+
continue
|
| 378 |
+
frame_keypoints_with_conf: List[Tuple[int, int, float]] = []
|
| 379 |
+
for i, part_points in enumerate(detection.keypoints.data):
|
| 380 |
+
for k_id, (x, y, _) in enumerate(part_points):
|
| 381 |
+
confidence = float(detection.keypoints.conf[i][k_id])
|
| 382 |
+
frame_keypoints_with_conf.append((int(x), int(y), confidence))
|
| 383 |
+
if len(frame_keypoints_with_conf) < n_keypoints:
|
| 384 |
+
frame_keypoints_with_conf.extend(
|
| 385 |
+
[(0, 0, 0.0)] * (n_keypoints - len(frame_keypoints_with_conf))
|
| 386 |
+
)
|
| 387 |
+
else:
|
| 388 |
+
frame_keypoints_with_conf = frame_keypoints_with_conf[:n_keypoints]
|
| 389 |
+
filtered_keypoints: List[Tuple[int, int]] = []
|
| 390 |
+
for idx, (x, y, confidence) in enumerate(frame_keypoints_with_conf):
|
| 391 |
+
if idx in self.CORNER_INDICES:
|
| 392 |
+
if confidence < 0.3:
|
| 393 |
+
filtered_keypoints.append((0, 0))
|
| 394 |
+
else:
|
| 395 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 396 |
+
else:
|
| 397 |
+
if confidence < 0.5:
|
| 398 |
+
filtered_keypoints.append((0, 0))
|
| 399 |
+
else:
|
| 400 |
+
filtered_keypoints.append((int(x), int(y)))
|
| 401 |
+
keypoints[offset + frame_idx_in_batch] = filtered_keypoints
|
| 402 |
+
results: List[TVFrameResult] = []
|
| 403 |
+
for frame_number in range(offset, offset + len(batch_images)):
|
| 404 |
+
results.append(
|
| 405 |
+
TVFrameResult(
|
| 406 |
+
frame_id=frame_number,
|
| 407 |
+
boxes=bboxes.get(frame_number, []),
|
| 408 |
+
keypoints=keypoints.get(
|
| 409 |
+
frame_number,
|
| 410 |
+
[(0, 0) for _ in range(n_keypoints)],
|
| 411 |
+
),
|
| 412 |
+
)
|
| 413 |
+
)
|
| 414 |
+
return results
|
objdetect.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8bbacfcb38e38b1b8816788e9e6e845160533719a0b87b693d58b932380d0d28
|
| 3 |
+
size 152961687
|