scorevision: push artifact
Browse files
miner.py
CHANGED
|
@@ -11,6 +11,7 @@ from typing import Any
|
|
| 11 |
|
| 12 |
import cv2
|
| 13 |
import numpy as np
|
|
|
|
| 14 |
from numpy import ndarray
|
| 15 |
from pydantic import BaseModel
|
| 16 |
|
|
@@ -30,6 +31,207 @@ class TVFrameResult(BaseModel):
|
|
| 30 |
keypoints: list[tuple[int, int]]
|
| 31 |
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
class Miner:
|
| 34 |
"""
|
| 35 |
Your miner engine.
|
|
@@ -71,7 +273,7 @@ class Miner:
|
|
| 71 |
"Add it to chute_config.yml (pip install segment-anything)."
|
| 72 |
) from e
|
| 73 |
|
| 74 |
-
device = "cuda" if
|
| 75 |
self.sam = sam_model_registry[model_type](checkpoint=str(ckpt_path))
|
| 76 |
self.sam.to(device=device)
|
| 77 |
|
|
@@ -85,10 +287,13 @@ class Miner:
|
|
| 85 |
)
|
| 86 |
|
| 87 |
# ---------------- Keypoints ----------------
|
| 88 |
-
#
|
| 89 |
-
|
| 90 |
-
self.
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
def __repr__(self) -> str:
|
| 94 |
return (
|
|
@@ -102,32 +307,43 @@ class Miner:
|
|
| 102 |
offset: int,
|
| 103 |
n_keypoints: int,
|
| 104 |
) -> list[TVFrameResult]:
|
| 105 |
-
# ------------------ Boxes (SAM masks -> boxes) ------------------
|
| 106 |
-
# SAM returns masks for "things" but does not label them (player/ref/ball).
|
| 107 |
-
# For a first working miner, we mark everything as "player" (cls_id=2).
|
| 108 |
-
# To score well, you will later need classification (ball/ref/goalkeeper/team).
|
| 109 |
bboxes: dict[int, list[BoundingBox]] = {}
|
|
|
|
| 110 |
|
|
|
|
| 111 |
for i, img in enumerate(batch_images):
|
| 112 |
frame_id = offset + i
|
| 113 |
|
| 114 |
-
# Convert BGR(OpenCV) -> RGB(SAM)
|
| 115 |
if img is None:
|
| 116 |
bboxes[frame_id] = []
|
|
|
|
| 117 |
continue
|
| 118 |
-
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
masks = self.mask_generator.generate(rgb) # list[dict]
|
| 121 |
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
out_boxes: list[BoundingBox] = []
|
| 126 |
for m in masks:
|
| 127 |
-
|
| 128 |
-
x, y, w, h = m.get("bbox") or (0, 0, 0, 0)
|
| 129 |
x1, y1 = int(x), int(y)
|
| 130 |
-
x2, y2 = int(x +
|
| 131 |
if x2 <= x1 or y2 <= y1:
|
| 132 |
continue
|
| 133 |
|
|
@@ -137,26 +353,94 @@ class Miner:
|
|
| 137 |
if box_area / area_frame > float(os.getenv("MAX_BOX_AREA_FRAC", "0.25")):
|
| 138 |
continue
|
| 139 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
conf = float(m.get("predicted_iou") or 0.5)
|
| 141 |
-
|
| 142 |
-
BoundingBox(
|
| 143 |
-
x1=x1,
|
| 144 |
-
y1=y1,
|
| 145 |
-
x2=x2,
|
| 146 |
-
y2=y2,
|
| 147 |
-
cls_id=2, # default: player
|
| 148 |
-
conf=conf,
|
| 149 |
-
)
|
| 150 |
-
)
|
| 151 |
|
| 152 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
|
| 161 |
# ---------------- Combine ------------------
|
| 162 |
results: list[TVFrameResult] = []
|
|
|
|
| 11 |
|
| 12 |
import cv2
|
| 13 |
import numpy as np
|
| 14 |
+
import torch
|
| 15 |
from numpy import ndarray
|
| 16 |
from pydantic import BaseModel
|
| 17 |
|
|
|
|
| 31 |
keypoints: list[tuple[int, int]]
|
| 32 |
|
| 33 |
|
| 34 |
+
# ==========================
|
| 35 |
+
# Football template keypoints (order matters!)
|
| 36 |
+
# Copied from: scorevision/vlm_pipeline/domain_specific_schemas/football.py
|
| 37 |
+
# ==========================
|
| 38 |
+
FOOTBALL_KEYPOINTS: list[tuple[int, int]] = [
|
| 39 |
+
(5, 5), # 1
|
| 40 |
+
(5, 140), # 2
|
| 41 |
+
(5, 250), # 3
|
| 42 |
+
(5, 430), # 4
|
| 43 |
+
(5, 540), # 5
|
| 44 |
+
(5, 675), # 6
|
| 45 |
+
(55, 250), # 7
|
| 46 |
+
(55, 430), # 8
|
| 47 |
+
(110, 340), # 9
|
| 48 |
+
(165, 140), # 10
|
| 49 |
+
(165, 270), # 11
|
| 50 |
+
(165, 410), # 12
|
| 51 |
+
(165, 540), # 13
|
| 52 |
+
(527, 5), # 14
|
| 53 |
+
(527, 253), # 15
|
| 54 |
+
(527, 433), # 16
|
| 55 |
+
(527, 675), # 17
|
| 56 |
+
(888, 140), # 18
|
| 57 |
+
(888, 270), # 19
|
| 58 |
+
(888, 410), # 20
|
| 59 |
+
(888, 540), # 21
|
| 60 |
+
(940, 340), # 22
|
| 61 |
+
(998, 250), # 23
|
| 62 |
+
(998, 430), # 24
|
| 63 |
+
(1045, 5), # 25
|
| 64 |
+
(1045, 140), # 26
|
| 65 |
+
(1045, 250), # 27
|
| 66 |
+
(1045, 430), # 28
|
| 67 |
+
(1045, 540), # 29
|
| 68 |
+
(1045, 675), # 30
|
| 69 |
+
(435, 340), # 31
|
| 70 |
+
(615, 340), # 32
|
| 71 |
+
]
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _clamp_box(x1: int, y1: int, x2: int, y2: int, w: int, h: int) -> tuple[int, int, int, int]:
|
| 75 |
+
x1 = max(0, min(w - 1, x1))
|
| 76 |
+
y1 = max(0, min(h - 1, y1))
|
| 77 |
+
x2 = max(0, min(w - 1, x2))
|
| 78 |
+
y2 = max(0, min(h - 1, y2))
|
| 79 |
+
if x2 <= x1:
|
| 80 |
+
x2 = min(w - 1, x1 + 1)
|
| 81 |
+
if y2 <= y1:
|
| 82 |
+
y2 = min(h - 1, y1 + 1)
|
| 83 |
+
return x1, y1, x2, y2
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def _center_crop(box: tuple[int, int, int, int], frac: float = 0.55) -> tuple[int, int, int, int]:
|
| 87 |
+
"""Take a smaller crop (helps focus on jersey color vs grass)."""
|
| 88 |
+
x1, y1, x2, y2 = box
|
| 89 |
+
cx = (x1 + x2) / 2
|
| 90 |
+
cy = (y1 + y2) / 2
|
| 91 |
+
w = (x2 - x1) * frac
|
| 92 |
+
h = (y2 - y1) * frac
|
| 93 |
+
nx1 = int(cx - w / 2)
|
| 94 |
+
nx2 = int(cx + w / 2)
|
| 95 |
+
ny1 = int(cy - h / 2)
|
| 96 |
+
ny2 = int(cy + h / 2)
|
| 97 |
+
return nx1, ny1, nx2, ny2
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _mean_hsv(bgr: np.ndarray, box: tuple[int, int, int, int]) -> tuple[float, float, float]:
|
| 101 |
+
h, w = bgr.shape[:2]
|
| 102 |
+
x1, y1, x2, y2 = _clamp_box(*box, w, h)
|
| 103 |
+
crop = bgr[y1:y2, x1:x2]
|
| 104 |
+
if crop.size == 0:
|
| 105 |
+
return (0.0, 0.0, 0.0)
|
| 106 |
+
hsv = cv2.cvtColor(crop, cv2.COLOR_BGR2HSV)
|
| 107 |
+
mean = hsv.reshape(-1, 3).mean(axis=0)
|
| 108 |
+
return float(mean[0]), float(mean[1]), float(mean[2])
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def _hsv_dist(a: tuple[float, float, float], b: tuple[float, float, float]) -> float:
|
| 112 |
+
# hue wrap-around: treat hue as circular
|
| 113 |
+
dh = abs(a[0] - b[0])
|
| 114 |
+
dh = min(dh, 180 - dh)
|
| 115 |
+
ds = abs(a[1] - b[1])
|
| 116 |
+
dv = abs(a[2] - b[2])
|
| 117 |
+
return float(dh * 1.0 + ds * 0.25 + dv * 0.25)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def _two_centroids(colors: list[tuple[float, float, float]]) -> tuple[tuple[float, float, float], tuple[float, float, float]] | None:
|
| 121 |
+
"""Tiny k-means(k=2) for team jersey colors."""
|
| 122 |
+
if len(colors) < 2:
|
| 123 |
+
return None
|
| 124 |
+
pts = np.array(colors, dtype=np.float32)
|
| 125 |
+
# init: farthest-in-hue from first point
|
| 126 |
+
idx1 = 0
|
| 127 |
+
idx2 = int(np.argmax(np.abs(pts[:, 0] - pts[0, 0])))
|
| 128 |
+
c1 = pts[idx1].copy()
|
| 129 |
+
c2 = pts[idx2].copy()
|
| 130 |
+
for _ in range(8):
|
| 131 |
+
d1 = np.linalg.norm(pts - c1, axis=1)
|
| 132 |
+
d2 = np.linalg.norm(pts - c2, axis=1)
|
| 133 |
+
a1 = pts[d1 <= d2]
|
| 134 |
+
a2 = pts[d1 > d2]
|
| 135 |
+
if len(a1) > 0:
|
| 136 |
+
c1 = a1.mean(axis=0)
|
| 137 |
+
if len(a2) > 0:
|
| 138 |
+
c2 = a2.mean(axis=0)
|
| 139 |
+
return (float(c1[0]), float(c1[1]), float(c1[2])), (float(c2[0]), float(c2[1]), float(c2[2]))
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def _detect_pitch_lines_mask(bgr: np.ndarray) -> np.ndarray:
|
| 143 |
+
"""Binary mask (0/255) for likely pitch lines."""
|
| 144 |
+
hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
|
| 145 |
+
green = cv2.inRange(hsv, (25, 30, 30), (95, 255, 255))
|
| 146 |
+
green = cv2.medianBlur(green, 7)
|
| 147 |
+
white = cv2.inRange(hsv, (0, 0, 170), (180, 60, 255))
|
| 148 |
+
white = cv2.bitwise_and(white, white, mask=green)
|
| 149 |
+
k = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
|
| 150 |
+
white = cv2.morphologyEx(white, cv2.MORPH_OPEN, k, iterations=1)
|
| 151 |
+
white = cv2.dilate(white, k, iterations=1)
|
| 152 |
+
return white
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def _load_template_line_mask(path_hf_repo: Path) -> tuple[np.ndarray, tuple[int, int]]:
|
| 156 |
+
"""
|
| 157 |
+
Load football pitch template line mask from your miner repo folder.
|
| 158 |
+
|
| 159 |
+
You MUST copy `football_pitch_template.png` into `my_miner_repo/` before pushing.
|
| 160 |
+
"""
|
| 161 |
+
tpl_name = os.getenv("PITCH_TEMPLATE_PNG", "football_pitch_template.png")
|
| 162 |
+
tpl_path = (path_hf_repo / tpl_name).resolve()
|
| 163 |
+
if not tpl_path.is_file():
|
| 164 |
+
raise FileNotFoundError(
|
| 165 |
+
f"Missing {tpl_name} in miner repo. Copy it from turbovision: "
|
| 166 |
+
f"scorevision/vlm_pipeline/domain_specific_schemas/football_pitch_template.png"
|
| 167 |
+
)
|
| 168 |
+
tpl = cv2.imread(str(tpl_path))
|
| 169 |
+
if tpl is None:
|
| 170 |
+
raise RuntimeError(f"Failed to read template image: {tpl_path}")
|
| 171 |
+
gray = cv2.cvtColor(tpl, cv2.COLOR_BGR2GRAY)
|
| 172 |
+
_, lines = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
|
| 173 |
+
return lines, (tpl.shape[1], tpl.shape[0]) # (W,H)
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def _estimate_homography_ecc(template_lines: np.ndarray, frame_lines: np.ndarray) -> np.ndarray | None:
|
| 177 |
+
"""Estimate template->frame homography using ECC alignment on binary line masks."""
|
| 178 |
+
max_w = 960
|
| 179 |
+
fh, fw = frame_lines.shape[:2]
|
| 180 |
+
scale = min(1.0, max_w / float(fw)) if fw > 0 else 1.0
|
| 181 |
+
|
| 182 |
+
def _resize(img: np.ndarray) -> np.ndarray:
|
| 183 |
+
if scale >= 0.999:
|
| 184 |
+
return img
|
| 185 |
+
return cv2.resize(img, (int(img.shape[1] * scale), int(img.shape[0] * scale)), interpolation=cv2.INTER_AREA)
|
| 186 |
+
|
| 187 |
+
tpl = _resize(template_lines)
|
| 188 |
+
frm = _resize(frame_lines)
|
| 189 |
+
tpl_f = tpl.astype(np.float32) / 255.0
|
| 190 |
+
frm_f = frm.astype(np.float32) / 255.0
|
| 191 |
+
|
| 192 |
+
warp = np.eye(3, dtype=np.float32)
|
| 193 |
+
criteria = (
|
| 194 |
+
cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
|
| 195 |
+
int(os.getenv("ECC_ITERS", "80")),
|
| 196 |
+
float(os.getenv("ECC_EPS", "1e-5")),
|
| 197 |
+
)
|
| 198 |
+
try:
|
| 199 |
+
# Find warp that best aligns template to frame (warp(template) ≈ frame)
|
| 200 |
+
cv2.findTransformECC(
|
| 201 |
+
frm_f,
|
| 202 |
+
tpl_f,
|
| 203 |
+
warp,
|
| 204 |
+
cv2.MOTION_HOMOGRAPHY,
|
| 205 |
+
criteria,
|
| 206 |
+
inputMask=None,
|
| 207 |
+
gaussFiltSize=3,
|
| 208 |
+
)
|
| 209 |
+
if scale < 0.999:
|
| 210 |
+
S = np.array([[1 / scale, 0, 0], [0, 1 / scale, 0], [0, 0, 1]], dtype=np.float32)
|
| 211 |
+
warp = S @ warp
|
| 212 |
+
return warp
|
| 213 |
+
except Exception:
|
| 214 |
+
return None
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def _project_keypoints(warp_tpl_to_frame: np.ndarray | None, frame_h: int, frame_w: int, n_keypoints: int) -> list[tuple[int, int]]:
|
| 218 |
+
if warp_tpl_to_frame is None:
|
| 219 |
+
return [(0, 0) for _ in range(n_keypoints)]
|
| 220 |
+
pts = np.array(FOOTBALL_KEYPOINTS[:n_keypoints], dtype=np.float32).reshape(1, -1, 2)
|
| 221 |
+
try:
|
| 222 |
+
out = cv2.perspectiveTransform(pts, warp_tpl_to_frame)[0]
|
| 223 |
+
except Exception:
|
| 224 |
+
return [(0, 0) for _ in range(n_keypoints)]
|
| 225 |
+
res: list[tuple[int, int]] = []
|
| 226 |
+
for x, y in out:
|
| 227 |
+
xi, yi = int(round(float(x))), int(round(float(y)))
|
| 228 |
+
if xi < 0 or yi < 0 or xi >= frame_w or yi >= frame_h:
|
| 229 |
+
res.append((0, 0))
|
| 230 |
+
else:
|
| 231 |
+
res.append((xi, yi))
|
| 232 |
+
return res
|
| 233 |
+
|
| 234 |
+
|
| 235 |
class Miner:
|
| 236 |
"""
|
| 237 |
Your miner engine.
|
|
|
|
| 273 |
"Add it to chute_config.yml (pip install segment-anything)."
|
| 274 |
) from e
|
| 275 |
|
| 276 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 277 |
self.sam = sam_model_registry[model_type](checkpoint=str(ckpt_path))
|
| 278 |
self.sam.to(device=device)
|
| 279 |
|
|
|
|
| 287 |
)
|
| 288 |
|
| 289 |
# ---------------- Keypoints ----------------
|
| 290 |
+
# We'll estimate pitch keypoints via template line alignment (ECC).
|
| 291 |
+
# This is OpenCV-only (no extra model), but not perfect.
|
| 292 |
+
self.enable_keypoints = os.getenv("ENABLE_KEYPOINTS", "1").lower() in ("1", "true", "yes")
|
| 293 |
+
self._template_lines: np.ndarray | None = None
|
| 294 |
+
self._template_wh: tuple[int, int] | None = None
|
| 295 |
+
if self.enable_keypoints:
|
| 296 |
+
self._template_lines, self._template_wh = _load_template_line_mask(path_hf_repo)
|
| 297 |
|
| 298 |
def __repr__(self) -> str:
|
| 299 |
return (
|
|
|
|
| 307 |
offset: int,
|
| 308 |
n_keypoints: int,
|
| 309 |
) -> list[TVFrameResult]:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 310 |
bboxes: dict[int, list[BoundingBox]] = {}
|
| 311 |
+
keypoints: dict[int, list[tuple[int, int]]] = {}
|
| 312 |
|
| 313 |
+
# Per-frame processing (keeps logic simple; you can optimize later)
|
| 314 |
for i, img in enumerate(batch_images):
|
| 315 |
frame_id = offset + i
|
| 316 |
|
|
|
|
| 317 |
if img is None:
|
| 318 |
bboxes[frame_id] = []
|
| 319 |
+
keypoints[frame_id] = [(0, 0) for _ in range(n_keypoints)]
|
| 320 |
continue
|
|
|
|
| 321 |
|
| 322 |
+
frame_h, frame_w = img.shape[:2]
|
| 323 |
+
|
| 324 |
+
# ---------------- Keypoints (ECC template alignment) ----------------
|
| 325 |
+
if self.enable_keypoints and self._template_lines is not None:
|
| 326 |
+
frame_lines = _detect_pitch_lines_mask(img)
|
| 327 |
+
warp = _estimate_homography_ecc(self._template_lines, frame_lines)
|
| 328 |
+
keypoints[frame_id] = _project_keypoints(warp, frame_h, frame_w, n_keypoints)
|
| 329 |
+
else:
|
| 330 |
+
keypoints[frame_id] = [(0, 0) for _ in range(n_keypoints)]
|
| 331 |
+
|
| 332 |
+
# ---------------- Boxes (SAM masks -> boxes) ----------------
|
| 333 |
+
# SAM returns masks without semantic classes. We'll add simple heuristics:
|
| 334 |
+
# - ball: very small near-square bbox (cls_id=0)
|
| 335 |
+
# - teams: cluster jersey colors into two groups -> cls_id=6 or 7
|
| 336 |
+
# - non-team dark/odd colors -> referee (cls_id=3) and maybe one goalkeeper (cls_id=1)
|
| 337 |
+
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 338 |
masks = self.mask_generator.generate(rgb) # list[dict]
|
| 339 |
|
| 340 |
+
area_frame = float(frame_h * frame_w)
|
| 341 |
+
cand: list[tuple[int, int, int, int, float]] = []
|
| 342 |
+
|
|
|
|
| 343 |
for m in masks:
|
| 344 |
+
x, y, w_, h_ = m.get("bbox") or (0, 0, 0, 0)
|
|
|
|
| 345 |
x1, y1 = int(x), int(y)
|
| 346 |
+
x2, y2 = int(x + w_), int(y + h_)
|
| 347 |
if x2 <= x1 or y2 <= y1:
|
| 348 |
continue
|
| 349 |
|
|
|
|
| 353 |
if box_area / area_frame > float(os.getenv("MAX_BOX_AREA_FRAC", "0.25")):
|
| 354 |
continue
|
| 355 |
|
| 356 |
+
# human-ish aspect ratio filter (helps remove lines/signage)
|
| 357 |
+
ar = float((y2 - y1) / max(1.0, (x2 - x1)))
|
| 358 |
+
if ar < float(os.getenv("MIN_ASPECT_RATIO", "1.0")):
|
| 359 |
+
continue
|
| 360 |
+
if ar > float(os.getenv("MAX_ASPECT_RATIO", "6.0")):
|
| 361 |
+
continue
|
| 362 |
+
|
| 363 |
conf = float(m.get("predicted_iou") or 0.5)
|
| 364 |
+
cand.append((x1, y1, x2, y2, conf))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 365 |
|
| 366 |
+
# ball candidates (tiny)
|
| 367 |
+
ball_max_area = int(os.getenv("BALL_MAX_AREA", "900"))
|
| 368 |
+
ball: list[BoundingBox] = []
|
| 369 |
+
people: list[tuple[int, int, int, int, float]] = []
|
| 370 |
+
for x1, y1, x2, y2, conf in cand:
|
| 371 |
+
bw = x2 - x1
|
| 372 |
+
bh = y2 - y1
|
| 373 |
+
a = bw * bh
|
| 374 |
+
if a <= ball_max_area and 0.6 <= (bw / max(1.0, bh)) <= 1.6:
|
| 375 |
+
ball.append(BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2, cls_id=0, conf=float(conf)))
|
| 376 |
+
else:
|
| 377 |
+
people.append((x1, y1, x2, y2, conf))
|
| 378 |
|
| 379 |
+
# keep 1 ball max
|
| 380 |
+
ball.sort(key=lambda b: b.conf, reverse=True)
|
| 381 |
+
if len(ball) > 1:
|
| 382 |
+
ball = ball[:1]
|
| 383 |
+
|
| 384 |
+
# jersey colors (center crop per person box)
|
| 385 |
+
colors: list[tuple[float, float, float]] = []
|
| 386 |
+
for x1, y1, x2, y2, _conf in people:
|
| 387 |
+
cx1, cy1, cx2, cy2 = _center_crop((x1, y1, x2, y2))
|
| 388 |
+
cx1, cy1, cx2, cy2 = _clamp_box(cx1, cy1, cx2, cy2, frame_w, frame_h)
|
| 389 |
+
colors.append(_mean_hsv(img, (cx1, cy1, cx2, cy2)))
|
| 390 |
+
|
| 391 |
+
cents = _two_centroids(colors)
|
| 392 |
+
team1_c, team2_c = cents if cents is not None else ((0.0, 0.0, 0.0), (90.0, 0.0, 0.0))
|
| 393 |
+
|
| 394 |
+
dark_v_thresh = float(os.getenv("REF_DARK_V", "70"))
|
| 395 |
+
nonteam_dist = float(os.getenv("NONTEAM_DIST", "45"))
|
| 396 |
+
|
| 397 |
+
team_boxes: list[BoundingBox] = []
|
| 398 |
+
nonteam_boxes: list[tuple[int, int, int, int, float, tuple[float, float, float]]] = []
|
| 399 |
+
|
| 400 |
+
for (x1, y1, x2, y2, conf), c in zip(people, colors, strict=False):
|
| 401 |
+
d1 = _hsv_dist(c, team1_c)
|
| 402 |
+
d2 = _hsv_dist(c, team2_c)
|
| 403 |
+
if c[2] < dark_v_thresh or (d1 > nonteam_dist and d2 > nonteam_dist):
|
| 404 |
+
nonteam_boxes.append((x1, y1, x2, y2, conf, c))
|
| 405 |
+
else:
|
| 406 |
+
cls = 6 if d1 <= d2 else 7
|
| 407 |
+
team_boxes.append(BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2, cls_id=int(cls), conf=float(conf)))
|
| 408 |
+
|
| 409 |
+
# goalkeeper vs referee split among nonteam:
|
| 410 |
+
# choose at most 1 goalkeeper near left/right third; rest referees.
|
| 411 |
+
gk_box: BoundingBox | None = None
|
| 412 |
+
refs: list[BoundingBox] = []
|
| 413 |
+
if nonteam_boxes:
|
| 414 |
+
edge_candidates = []
|
| 415 |
+
mid_candidates = []
|
| 416 |
+
for x1, y1, x2, y2, conf, _c in nonteam_boxes:
|
| 417 |
+
cx = (x1 + x2) / 2.0
|
| 418 |
+
if cx < frame_w * 0.33 or cx > frame_w * 0.66:
|
| 419 |
+
edge_candidates.append((x1, y1, x2, y2, conf))
|
| 420 |
+
else:
|
| 421 |
+
mid_candidates.append((x1, y1, x2, y2, conf))
|
| 422 |
+
edge_candidates.sort(key=lambda t: t[4], reverse=True)
|
| 423 |
+
if edge_candidates:
|
| 424 |
+
x1, y1, x2, y2, conf = edge_candidates[0]
|
| 425 |
+
gk_box = BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2, cls_id=1, conf=float(conf))
|
| 426 |
+
for x1, y1, x2, y2, conf in edge_candidates[1:]:
|
| 427 |
+
refs.append(BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2, cls_id=3, conf=float(conf)))
|
| 428 |
+
for x1, y1, x2, y2, conf in mid_candidates:
|
| 429 |
+
refs.append(BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2, cls_id=3, conf=float(conf)))
|
| 430 |
+
|
| 431 |
+
out: list[BoundingBox] = []
|
| 432 |
+
out.extend(ball)
|
| 433 |
+
if gk_box is not None:
|
| 434 |
+
out.append(gk_box)
|
| 435 |
+
out.extend(team_boxes)
|
| 436 |
+
out.extend(refs)
|
| 437 |
+
|
| 438 |
+
max_boxes = int(os.getenv("MAX_BOXES_PER_FRAME", "40"))
|
| 439 |
+
if len(out) > max_boxes:
|
| 440 |
+
out.sort(key=lambda b: b.conf, reverse=True)
|
| 441 |
+
out = out[:max_boxes]
|
| 442 |
+
|
| 443 |
+
bboxes[frame_id] = out
|
| 444 |
|
| 445 |
# ---------------- Combine ------------------
|
| 446 |
results: list[TVFrameResult] = []
|