Spaces:
Sleeping
Sleeping
Commit ·
ecbaf10
1
Parent(s): 251faa9
cleaning up pylint
Browse files- src/detection/scorebug.py +5 -0
- src/pipeline/models.py +17 -0
- src/pipeline/orchestrator.py +1 -1
- src/pipeline/parallel.py +103 -101
- src/pipeline/play_detector.py +23 -26
- src/pipeline/template_builder_pass.py +274 -223
- src/readers/__init__.py +2 -4
- src/readers/playclock.py +119 -205
- src/setup/coverage.py +84 -0
- src/setup/playclock_region.py +2 -39
- src/setup/template_builder.py +20 -173
- src/setup/template_library.py +15 -45
- src/tracking/play_merger.py +4 -1
- src/tracking/play_state.py +143 -123
- src/ui/sessions.py +10 -2
- src/utils/__init__.py +27 -0
- src/utils/color.py +75 -0
- src/utils/frame_result.py +50 -0
- src/utils/regions.py +116 -0
src/detection/scorebug.py
CHANGED
|
@@ -78,6 +78,11 @@ class DetectScoreBug:
|
|
| 78 |
if self._use_fixed_region:
|
| 79 |
logger.info(" Fixed region: %s", self.fixed_region)
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
def _load_fixed_region_config(self, config_path: str) -> None:
|
| 82 |
"""Load fixed region from a JSON config file."""
|
| 83 |
path = Path(config_path)
|
|
|
|
| 78 |
if self._use_fixed_region:
|
| 79 |
logger.info(" Fixed region: %s", self.fixed_region)
|
| 80 |
|
| 81 |
+
@property
|
| 82 |
+
def is_fixed_region_mode(self) -> bool:
|
| 83 |
+
"""Check if detector is using fixed region mode for faster detection."""
|
| 84 |
+
return self._use_fixed_region
|
| 85 |
+
|
| 86 |
def _load_fixed_region_config(self, config_path: str) -> None:
|
| 87 |
"""Load fixed region from a JSON config file."""
|
| 88 |
path = Path(config_path)
|
src/pipeline/models.py
CHANGED
|
@@ -88,6 +88,23 @@ class DetectionResult(BaseModel):
|
|
| 88 |
# =============================================================================
|
| 89 |
|
| 90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
class ChunkResult(BaseModel):
|
| 92 |
"""Result from processing a single video chunk in parallel processing.
|
| 93 |
|
|
|
|
| 88 |
# =============================================================================
|
| 89 |
|
| 90 |
|
| 91 |
+
class ParallelProcessingConfig(BaseModel):
|
| 92 |
+
"""Configuration for parallel video chunk processing.
|
| 93 |
+
|
| 94 |
+
This model groups the configuration parameters needed by parallel processing
|
| 95 |
+
functions, reducing the number of individual arguments.
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
video_path: str = Field(..., description="Path to video file")
|
| 99 |
+
start_time: float = Field(..., description="Start time in seconds")
|
| 100 |
+
end_time: float = Field(..., description="End time in seconds")
|
| 101 |
+
frame_interval: float = Field(..., description="Time interval between frames")
|
| 102 |
+
fixed_playclock_coords: Tuple[int, int, int, int] = Field(..., description="(x, y, w, h) for play clock region")
|
| 103 |
+
fixed_scorebug_coords: Tuple[int, int, int, int] = Field(..., description="(x, y, w, h) for scorebug region")
|
| 104 |
+
template_library_path: Optional[str] = Field(None, description="Path to template library directory")
|
| 105 |
+
timeout_config_path: Optional[str] = Field(None, description="Path to timeout tracker config")
|
| 106 |
+
|
| 107 |
+
|
| 108 |
class ChunkResult(BaseModel):
|
| 109 |
"""Result from processing a single video chunk in parallel processing.
|
| 110 |
|
src/pipeline/orchestrator.py
CHANGED
|
@@ -11,7 +11,7 @@ Supports both sequential and parallel processing modes:
|
|
| 11 |
|
| 12 |
import logging
|
| 13 |
from pathlib import Path
|
| 14 |
-
from typing import Any, Dict
|
| 15 |
|
| 16 |
from config.session import SessionConfig, MIN_PLAY_DURATION
|
| 17 |
from detection import DetectTimeouts
|
|
|
|
| 11 |
|
| 12 |
import logging
|
| 13 |
from pathlib import Path
|
| 14 |
+
from typing import Any, Dict
|
| 15 |
|
| 16 |
from config.session import SessionConfig, MIN_PLAY_DURATION
|
| 17 |
from detection import DetectTimeouts
|
src/pipeline/parallel.py
CHANGED
|
@@ -15,7 +15,8 @@ from multiprocessing import Manager
|
|
| 15 |
from pathlib import Path
|
| 16 |
from typing import Any, Dict, List, Optional, Tuple
|
| 17 |
|
| 18 |
-
from
|
|
|
|
| 19 |
|
| 20 |
logger = logging.getLogger(__name__)
|
| 21 |
|
|
@@ -25,53 +26,58 @@ logger = logging.getLogger(__name__)
|
|
| 25 |
# =============================================================================
|
| 26 |
|
| 27 |
|
| 28 |
-
def _init_chunk_detectors(
|
| 29 |
-
fixed_playclock_coords: Tuple[int, int, int, int],
|
| 30 |
-
fixed_scorebug_coords: Tuple[int, int, int, int],
|
| 31 |
-
template_library_path: Optional[str],
|
| 32 |
-
timeout_config_path: Optional[str],
|
| 33 |
-
) -> Tuple[Any, Any, Any, Any]:
|
| 34 |
"""
|
| 35 |
Initialize all detection components for a chunk worker.
|
| 36 |
|
| 37 |
Must be called within the subprocess since these objects can't be pickled.
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
Args:
|
| 40 |
-
|
| 41 |
-
fixed_scorebug_coords: (x, y, w, h) for scorebug region.
|
| 42 |
-
template_library_path: Path to template library directory.
|
| 43 |
-
timeout_config_path: Path to timeout tracker config.
|
| 44 |
|
| 45 |
Returns:
|
| 46 |
Tuple of (scorebug_detector, clock_reader, template_reader, timeout_tracker).
|
| 47 |
"""
|
|
|
|
|
|
|
|
|
|
| 48 |
from detection import DetectScoreBug, DetectTimeouts
|
| 49 |
from readers import ReadPlayClock
|
| 50 |
from setup import DigitTemplateLibrary, PlayClockRegionConfig, PlayClockRegionExtractor
|
| 51 |
|
| 52 |
# Create scorebug detector with fixed region
|
| 53 |
scorebug_detector = DetectScoreBug(template_path=None, use_split_detection=True)
|
| 54 |
-
scorebug_detector.set_fixed_region(fixed_scorebug_coords)
|
| 55 |
|
| 56 |
# Create play clock region extractor
|
| 57 |
-
pc_x, pc_y, pc_w, pc_h = fixed_playclock_coords
|
| 58 |
-
sb_x, sb_y, _, _ = fixed_scorebug_coords
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
clock_reader = PlayClockRegionExtractor(region_config=playclock_config)
|
| 63 |
|
| 64 |
# Create template reader if template path provided
|
| 65 |
template_reader = None
|
| 66 |
-
if template_library_path and Path(template_library_path).exists():
|
| 67 |
template_library = DigitTemplateLibrary()
|
| 68 |
-
if template_library.load(template_library_path):
|
| 69 |
template_reader = ReadPlayClock(template_library, pc_w, pc_h)
|
| 70 |
|
| 71 |
# Initialize timeout tracker if config provided
|
| 72 |
timeout_tracker = None
|
| 73 |
-
if timeout_config_path and Path(timeout_config_path).exists():
|
| 74 |
-
timeout_tracker = DetectTimeouts(config_path=timeout_config_path)
|
| 75 |
|
| 76 |
return scorebug_detector, clock_reader, template_reader, timeout_tracker
|
| 77 |
|
|
@@ -103,15 +109,12 @@ def _process_frame(
|
|
| 103 |
# Detect scorebug (fast path with fixed region)
|
| 104 |
scorebug = scorebug_detector.detect(img)
|
| 105 |
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
"clock_value": None,
|
| 113 |
-
"clock_detected": False,
|
| 114 |
-
}
|
| 115 |
|
| 116 |
if scorebug.detected:
|
| 117 |
stats["frames_with_scorebug"] += 1
|
|
@@ -123,7 +126,7 @@ def _process_frame(
|
|
| 123 |
frame_result["away_timeouts"] = timeout_reading.away_timeouts
|
| 124 |
|
| 125 |
# Extract play clock region and template match
|
| 126 |
-
play_clock_region = clock_reader.
|
| 127 |
if play_clock_region is not None and template_reader:
|
| 128 |
clock_result = template_reader.read(play_clock_region)
|
| 129 |
frame_result["clock_detected"] = clock_result.detected
|
|
@@ -134,16 +137,50 @@ def _process_frame(
|
|
| 134 |
return frame_result
|
| 135 |
|
| 136 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
def _process_chunk(
|
| 138 |
chunk_id: int,
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
frame_interval: float,
|
| 143 |
-
fixed_playclock_coords: Tuple[int, int, int, int],
|
| 144 |
-
fixed_scorebug_coords: Tuple[int, int, int, int],
|
| 145 |
-
template_library_path: Optional[str],
|
| 146 |
-
timeout_config_path: Optional[str],
|
| 147 |
progress_dict: Optional[Dict] = None,
|
| 148 |
) -> ChunkResult:
|
| 149 |
"""
|
|
@@ -154,47 +191,41 @@ def _process_chunk(
|
|
| 154 |
|
| 155 |
Args:
|
| 156 |
chunk_id: Identifier for this chunk (for logging).
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
frame_interval: Time interval between frames.
|
| 161 |
-
fixed_playclock_coords: (x, y, w, h) for play clock region.
|
| 162 |
-
fixed_scorebug_coords: (x, y, w, h) for scorebug region.
|
| 163 |
-
template_library_path: Path to template library directory (or None).
|
| 164 |
-
timeout_config_path: Path to timeout tracker config (or None).
|
| 165 |
progress_dict: Shared dictionary for progress updates.
|
| 166 |
|
| 167 |
Returns:
|
| 168 |
ChunkResult with processing results.
|
| 169 |
"""
|
| 170 |
-
#
|
|
|
|
|
|
|
| 171 |
import cv2
|
| 172 |
|
| 173 |
t_start = time.perf_counter()
|
| 174 |
-
io_time = 0.0
|
| 175 |
|
| 176 |
# Initialize all detection components
|
| 177 |
-
scorebug_detector, clock_reader, template_reader, timeout_tracker = _init_chunk_detectors(
|
| 178 |
-
fixed_playclock_coords, fixed_scorebug_coords, template_library_path, timeout_config_path
|
| 179 |
-
)
|
| 180 |
|
| 181 |
# Open video and seek to start
|
| 182 |
t_io_start = time.perf_counter()
|
| 183 |
-
cap = cv2.VideoCapture(video_path)
|
| 184 |
if not cap.isOpened():
|
| 185 |
-
raise RuntimeError(f"Could not open video: {video_path}")
|
| 186 |
|
| 187 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 188 |
-
frame_skip = int(frame_interval * fps)
|
| 189 |
-
start_frame = int(
|
| 190 |
-
end_frame = int(
|
| 191 |
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
|
| 192 |
-
io_time
|
| 193 |
|
| 194 |
# Initialize processing state
|
| 195 |
frame_data: List[Dict[str, Any]] = []
|
| 196 |
stats = {"total_frames": 0, "frames_with_scorebug": 0, "frames_with_clock": 0}
|
| 197 |
-
total_expected_frames = max(1, int((
|
| 198 |
|
| 199 |
# Initialize progress
|
| 200 |
if progress_dict is not None:
|
|
@@ -236,8 +267,8 @@ def _process_chunk(
|
|
| 236 |
|
| 237 |
return ChunkResult(
|
| 238 |
chunk_id=chunk_id,
|
| 239 |
-
start_time=
|
| 240 |
-
end_time=
|
| 241 |
frames_processed=stats["total_frames"],
|
| 242 |
frames_with_scorebug=stats["frames_with_scorebug"],
|
| 243 |
frames_with_clock=stats["frames_with_clock"],
|
|
@@ -337,7 +368,8 @@ def _display_progress(progress_dict: Dict, num_workers: int) -> None:
|
|
| 337 |
overall_pct = min(100, int(100 * total_frames_done / total_frames_expected)) if total_frames_expected > 0 else 0
|
| 338 |
progress_str = " | ".join(parts)
|
| 339 |
|
| 340 |
-
|
|
|
|
| 341 |
remaining = num_workers - completed_workers
|
| 342 |
status_msg = f" [{overall_pct:3d}%] {progress_str} — waiting for {remaining} worker{'s' if remaining > 1 else ''}..."
|
| 343 |
elif completed_workers == num_workers:
|
|
@@ -352,12 +384,7 @@ def _display_progress(progress_dict: Dict, num_workers: int) -> None:
|
|
| 352 |
def _submit_chunk_jobs(
|
| 353 |
executor: ProcessPoolExecutor,
|
| 354 |
chunks: List[Tuple[int, float, float]],
|
| 355 |
-
|
| 356 |
-
frame_interval: float,
|
| 357 |
-
fixed_playclock_coords: Tuple[int, int, int, int],
|
| 358 |
-
fixed_scorebug_coords: Tuple[int, int, int, int],
|
| 359 |
-
template_library_path: Optional[str],
|
| 360 |
-
timeout_config_path: Optional[str],
|
| 361 |
progress_dict: Dict,
|
| 362 |
) -> Dict[Future, int]:
|
| 363 |
"""
|
|
@@ -366,12 +393,7 @@ def _submit_chunk_jobs(
|
|
| 366 |
Args:
|
| 367 |
executor: ProcessPoolExecutor instance.
|
| 368 |
chunks: List of (chunk_id, start_time, end_time) tuples.
|
| 369 |
-
|
| 370 |
-
frame_interval: Time between frame samples.
|
| 371 |
-
fixed_playclock_coords: Play clock region coordinates.
|
| 372 |
-
fixed_scorebug_coords: Scorebug region coordinates.
|
| 373 |
-
template_library_path: Path to template library.
|
| 374 |
-
timeout_config_path: Path to timeout config.
|
| 375 |
progress_dict: Shared progress dictionary.
|
| 376 |
|
| 377 |
Returns:
|
|
@@ -382,14 +404,9 @@ def _submit_chunk_jobs(
|
|
| 382 |
future = executor.submit(
|
| 383 |
_process_chunk,
|
| 384 |
chunk_id,
|
| 385 |
-
|
| 386 |
chunk_start,
|
| 387 |
chunk_end,
|
| 388 |
-
frame_interval,
|
| 389 |
-
fixed_playclock_coords,
|
| 390 |
-
fixed_scorebug_coords,
|
| 391 |
-
template_library_path,
|
| 392 |
-
timeout_config_path,
|
| 393 |
progress_dict,
|
| 394 |
)
|
| 395 |
futures[future] = chunk_id
|
|
@@ -458,14 +475,7 @@ def _merge_chunk_results(results: Dict[int, Optional[ChunkResult]], num_workers:
|
|
| 458 |
|
| 459 |
|
| 460 |
def process_video_parallel(
|
| 461 |
-
|
| 462 |
-
start_time: float,
|
| 463 |
-
end_time: float,
|
| 464 |
-
frame_interval: float,
|
| 465 |
-
fixed_playclock_coords: Tuple[int, int, int, int],
|
| 466 |
-
fixed_scorebug_coords: Tuple[int, int, int, int],
|
| 467 |
-
template_library_path: Optional[str],
|
| 468 |
-
timeout_config_path: Optional[str],
|
| 469 |
num_workers: int = 2,
|
| 470 |
) -> Tuple[List[Dict[str, Any]], Dict[str, int], float]:
|
| 471 |
"""
|
|
@@ -475,22 +485,16 @@ def process_video_parallel(
|
|
| 475 |
Results are merged in chronological order.
|
| 476 |
|
| 477 |
Args:
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
end_time: End time in seconds.
|
| 481 |
-
frame_interval: Time interval between frames.
|
| 482 |
-
fixed_playclock_coords: (x, y, w, h) for play clock region.
|
| 483 |
-
fixed_scorebug_coords: (x, y, w, h) for scorebug region.
|
| 484 |
-
template_library_path: Path to template library directory.
|
| 485 |
-
timeout_config_path: Path to timeout tracker config.
|
| 486 |
num_workers: Number of parallel workers (default 2).
|
| 487 |
|
| 488 |
Returns:
|
| 489 |
Tuple of (frame_data_list, stats_dict, total_io_time).
|
| 490 |
"""
|
| 491 |
# Calculate chunk boundaries
|
| 492 |
-
chunks = _calculate_chunk_boundaries(start_time, end_time, num_workers)
|
| 493 |
-
chunk_duration = (end_time - start_time) / num_workers
|
| 494 |
|
| 495 |
logger.info("Parallel processing: %d workers, %.1fs per chunk", num_workers, chunk_duration)
|
| 496 |
for chunk_id, chunk_start, chunk_end in chunks:
|
|
@@ -508,9 +512,7 @@ def process_video_parallel(
|
|
| 508 |
|
| 509 |
# Execute chunks in parallel
|
| 510 |
with ProcessPoolExecutor(max_workers=num_workers) as executor:
|
| 511 |
-
futures = _submit_chunk_jobs(
|
| 512 |
-
executor, chunks, video_path, frame_interval, fixed_playclock_coords, fixed_scorebug_coords, template_library_path, timeout_config_path, progress_dict
|
| 513 |
-
)
|
| 514 |
results = _collect_chunk_results(futures)
|
| 515 |
|
| 516 |
# Stop progress monitor and show completion
|
|
|
|
| 15 |
from pathlib import Path
|
| 16 |
from typing import Any, Dict, List, Optional, Tuple
|
| 17 |
|
| 18 |
+
from utils import create_frame_result
|
| 19 |
+
from .models import ChunkResult, ParallelProcessingConfig
|
| 20 |
|
| 21 |
logger = logging.getLogger(__name__)
|
| 22 |
|
|
|
|
| 26 |
# =============================================================================
|
| 27 |
|
| 28 |
|
| 29 |
+
def _init_chunk_detectors(config: ParallelProcessingConfig) -> Tuple[Any, Any, Any, Any]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
"""
|
| 31 |
Initialize all detection components for a chunk worker.
|
| 32 |
|
| 33 |
Must be called within the subprocess since these objects can't be pickled.
|
| 34 |
|
| 35 |
+
Note: Imports are inside this function because multiprocessing requires
|
| 36 |
+
fresh imports in each worker process. Moving these to module level would
|
| 37 |
+
cause pickling errors when spawning workers.
|
| 38 |
+
|
| 39 |
Args:
|
| 40 |
+
config: Parallel processing configuration.
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
Returns:
|
| 43 |
Tuple of (scorebug_detector, clock_reader, template_reader, timeout_tracker).
|
| 44 |
"""
|
| 45 |
+
# pylint: disable=import-outside-toplevel
|
| 46 |
+
# Imports must be inside function for multiprocessing - each subprocess
|
| 47 |
+
# needs its own fresh imports of these modules to avoid pickling errors
|
| 48 |
from detection import DetectScoreBug, DetectTimeouts
|
| 49 |
from readers import ReadPlayClock
|
| 50 |
from setup import DigitTemplateLibrary, PlayClockRegionConfig, PlayClockRegionExtractor
|
| 51 |
|
| 52 |
# Create scorebug detector with fixed region
|
| 53 |
scorebug_detector = DetectScoreBug(template_path=None, use_split_detection=True)
|
| 54 |
+
scorebug_detector.set_fixed_region(config.fixed_scorebug_coords)
|
| 55 |
|
| 56 |
# Create play clock region extractor
|
| 57 |
+
pc_x, pc_y, pc_w, pc_h = config.fixed_playclock_coords
|
| 58 |
+
sb_x, sb_y, _, _ = config.fixed_scorebug_coords
|
| 59 |
+
playclock_config = PlayClockRegionConfig(
|
| 60 |
+
x_offset=pc_x - sb_x,
|
| 61 |
+
y_offset=pc_y - sb_y,
|
| 62 |
+
width=pc_w,
|
| 63 |
+
height=pc_h,
|
| 64 |
+
source_video="",
|
| 65 |
+
scorebug_template="",
|
| 66 |
+
samples_used=0,
|
| 67 |
+
)
|
| 68 |
clock_reader = PlayClockRegionExtractor(region_config=playclock_config)
|
| 69 |
|
| 70 |
# Create template reader if template path provided
|
| 71 |
template_reader = None
|
| 72 |
+
if config.template_library_path and Path(config.template_library_path).exists():
|
| 73 |
template_library = DigitTemplateLibrary()
|
| 74 |
+
if template_library.load(config.template_library_path):
|
| 75 |
template_reader = ReadPlayClock(template_library, pc_w, pc_h)
|
| 76 |
|
| 77 |
# Initialize timeout tracker if config provided
|
| 78 |
timeout_tracker = None
|
| 79 |
+
if config.timeout_config_path and Path(config.timeout_config_path).exists():
|
| 80 |
+
timeout_tracker = DetectTimeouts(config_path=config.timeout_config_path)
|
| 81 |
|
| 82 |
return scorebug_detector, clock_reader, template_reader, timeout_tracker
|
| 83 |
|
|
|
|
| 109 |
# Detect scorebug (fast path with fixed region)
|
| 110 |
scorebug = scorebug_detector.detect(img)
|
| 111 |
|
| 112 |
+
# Initialize frame result using shared factory
|
| 113 |
+
frame_result = create_frame_result(
|
| 114 |
+
timestamp=timestamp,
|
| 115 |
+
scorebug_detected=scorebug.detected,
|
| 116 |
+
scorebug_bbox=scorebug.bbox if scorebug.detected else None,
|
| 117 |
+
)
|
|
|
|
|
|
|
|
|
|
| 118 |
|
| 119 |
if scorebug.detected:
|
| 120 |
stats["frames_with_scorebug"] += 1
|
|
|
|
| 126 |
frame_result["away_timeouts"] = timeout_reading.away_timeouts
|
| 127 |
|
| 128 |
# Extract play clock region and template match
|
| 129 |
+
play_clock_region = clock_reader.extract_region(img, scorebug.bbox)
|
| 130 |
if play_clock_region is not None and template_reader:
|
| 131 |
clock_result = template_reader.read(play_clock_region)
|
| 132 |
frame_result["clock_detected"] = clock_result.detected
|
|
|
|
| 137 |
return frame_result
|
| 138 |
|
| 139 |
|
| 140 |
+
def _read_video_frames(cap: Any, start_frame: int, end_frame: int, frame_skip: int, fps: float) -> Tuple[List[Tuple[float, Any]], float]:
|
| 141 |
+
"""
|
| 142 |
+
Read frames from video capture within the given range.
|
| 143 |
+
|
| 144 |
+
Args:
|
| 145 |
+
cap: OpenCV VideoCapture object.
|
| 146 |
+
start_frame: First frame to read.
|
| 147 |
+
end_frame: Last frame to read.
|
| 148 |
+
frame_skip: Number of frames to skip between samples.
|
| 149 |
+
fps: Video frames per second.
|
| 150 |
+
|
| 151 |
+
Returns:
|
| 152 |
+
Tuple of (list of (timestamp, frame) tuples, total_io_time).
|
| 153 |
+
"""
|
| 154 |
+
frames = []
|
| 155 |
+
io_time = 0.0
|
| 156 |
+
current_frame = start_frame
|
| 157 |
+
|
| 158 |
+
while current_frame < end_frame:
|
| 159 |
+
t_io_start = time.perf_counter()
|
| 160 |
+
ret, img = cap.read()
|
| 161 |
+
io_time += time.perf_counter() - t_io_start
|
| 162 |
+
|
| 163 |
+
if not ret:
|
| 164 |
+
break
|
| 165 |
+
|
| 166 |
+
timestamp = current_frame / fps
|
| 167 |
+
frames.append((timestamp, img))
|
| 168 |
+
|
| 169 |
+
# Skip to next sample frame
|
| 170 |
+
t_io_start = time.perf_counter()
|
| 171 |
+
for _ in range(frame_skip - 1):
|
| 172 |
+
cap.grab()
|
| 173 |
+
io_time += time.perf_counter() - t_io_start
|
| 174 |
+
current_frame += frame_skip
|
| 175 |
+
|
| 176 |
+
return frames, io_time
|
| 177 |
+
|
| 178 |
+
# pylint: disable=too-many-locals
|
| 179 |
def _process_chunk(
|
| 180 |
chunk_id: int,
|
| 181 |
+
config: ParallelProcessingConfig,
|
| 182 |
+
chunk_start: float,
|
| 183 |
+
chunk_end: float,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
progress_dict: Optional[Dict] = None,
|
| 185 |
) -> ChunkResult:
|
| 186 |
"""
|
|
|
|
| 191 |
|
| 192 |
Args:
|
| 193 |
chunk_id: Identifier for this chunk (for logging).
|
| 194 |
+
config: Parallel processing configuration.
|
| 195 |
+
chunk_start: Chunk start time in seconds.
|
| 196 |
+
chunk_end: Chunk end time in seconds.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 197 |
progress_dict: Shared dictionary for progress updates.
|
| 198 |
|
| 199 |
Returns:
|
| 200 |
ChunkResult with processing results.
|
| 201 |
"""
|
| 202 |
+
# pylint: disable=import-outside-toplevel
|
| 203 |
+
# cv2 import must be inside function for multiprocessing - each subprocess
|
| 204 |
+
# needs its own fresh import to avoid issues with OpenCV's internal state
|
| 205 |
import cv2
|
| 206 |
|
| 207 |
t_start = time.perf_counter()
|
|
|
|
| 208 |
|
| 209 |
# Initialize all detection components
|
| 210 |
+
scorebug_detector, clock_reader, template_reader, timeout_tracker = _init_chunk_detectors(config)
|
|
|
|
|
|
|
| 211 |
|
| 212 |
# Open video and seek to start
|
| 213 |
t_io_start = time.perf_counter()
|
| 214 |
+
cap = cv2.VideoCapture(config.video_path)
|
| 215 |
if not cap.isOpened():
|
| 216 |
+
raise RuntimeError(f"Could not open video: {config.video_path}")
|
| 217 |
|
| 218 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 219 |
+
frame_skip = int(config.frame_interval * fps)
|
| 220 |
+
start_frame = int(chunk_start * fps)
|
| 221 |
+
end_frame = int(chunk_end * fps)
|
| 222 |
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
|
| 223 |
+
io_time = time.perf_counter() - t_io_start
|
| 224 |
|
| 225 |
# Initialize processing state
|
| 226 |
frame_data: List[Dict[str, Any]] = []
|
| 227 |
stats = {"total_frames": 0, "frames_with_scorebug": 0, "frames_with_clock": 0}
|
| 228 |
+
total_expected_frames = max(1, int((chunk_end - chunk_start) / config.frame_interval))
|
| 229 |
|
| 230 |
# Initialize progress
|
| 231 |
if progress_dict is not None:
|
|
|
|
| 267 |
|
| 268 |
return ChunkResult(
|
| 269 |
chunk_id=chunk_id,
|
| 270 |
+
start_time=chunk_start,
|
| 271 |
+
end_time=chunk_end,
|
| 272 |
frames_processed=stats["total_frames"],
|
| 273 |
frames_with_scorebug=stats["frames_with_scorebug"],
|
| 274 |
frames_with_clock=stats["frames_with_clock"],
|
|
|
|
| 368 |
overall_pct = min(100, int(100 * total_frames_done / total_frames_expected)) if total_frames_expected > 0 else 0
|
| 369 |
progress_str = " | ".join(parts)
|
| 370 |
|
| 371 |
+
# Use chained comparison for cleaner code
|
| 372 |
+
if 0 < completed_workers < num_workers:
|
| 373 |
remaining = num_workers - completed_workers
|
| 374 |
status_msg = f" [{overall_pct:3d}%] {progress_str} — waiting for {remaining} worker{'s' if remaining > 1 else ''}..."
|
| 375 |
elif completed_workers == num_workers:
|
|
|
|
| 384 |
def _submit_chunk_jobs(
|
| 385 |
executor: ProcessPoolExecutor,
|
| 386 |
chunks: List[Tuple[int, float, float]],
|
| 387 |
+
config: ParallelProcessingConfig,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 388 |
progress_dict: Dict,
|
| 389 |
) -> Dict[Future, int]:
|
| 390 |
"""
|
|
|
|
| 393 |
Args:
|
| 394 |
executor: ProcessPoolExecutor instance.
|
| 395 |
chunks: List of (chunk_id, start_time, end_time) tuples.
|
| 396 |
+
config: Parallel processing configuration.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 397 |
progress_dict: Shared progress dictionary.
|
| 398 |
|
| 399 |
Returns:
|
|
|
|
| 404 |
future = executor.submit(
|
| 405 |
_process_chunk,
|
| 406 |
chunk_id,
|
| 407 |
+
config,
|
| 408 |
chunk_start,
|
| 409 |
chunk_end,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 410 |
progress_dict,
|
| 411 |
)
|
| 412 |
futures[future] = chunk_id
|
|
|
|
| 475 |
|
| 476 |
|
| 477 |
def process_video_parallel(
|
| 478 |
+
config: ParallelProcessingConfig,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 479 |
num_workers: int = 2,
|
| 480 |
) -> Tuple[List[Dict[str, Any]], Dict[str, int], float]:
|
| 481 |
"""
|
|
|
|
| 485 |
Results are merged in chronological order.
|
| 486 |
|
| 487 |
Args:
|
| 488 |
+
config: Parallel processing configuration containing video path,
|
| 489 |
+
time bounds, frame interval, and region coordinates.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 490 |
num_workers: Number of parallel workers (default 2).
|
| 491 |
|
| 492 |
Returns:
|
| 493 |
Tuple of (frame_data_list, stats_dict, total_io_time).
|
| 494 |
"""
|
| 495 |
# Calculate chunk boundaries
|
| 496 |
+
chunks = _calculate_chunk_boundaries(config.start_time, config.end_time, num_workers)
|
| 497 |
+
chunk_duration = (config.end_time - config.start_time) / num_workers
|
| 498 |
|
| 499 |
logger.info("Parallel processing: %d workers, %.1fs per chunk", num_workers, chunk_duration)
|
| 500 |
for chunk_id, chunk_start, chunk_end in chunks:
|
|
|
|
| 512 |
|
| 513 |
# Execute chunks in parallel
|
| 514 |
with ProcessPoolExecutor(max_workers=num_workers) as executor:
|
| 515 |
+
futures = _submit_chunk_jobs(executor, chunks, config, progress_dict)
|
|
|
|
|
|
|
| 516 |
results = _collect_chunk_results(futures)
|
| 517 |
|
| 518 |
# Stop progress monitor and show completion
|
src/pipeline/play_detector.py
CHANGED
|
@@ -29,8 +29,9 @@ from detection import DetectScoreBug, ScorebugDetection, DetectTimeouts
|
|
| 29 |
from readers import ReadPlayClock, PlayClockReading
|
| 30 |
from setup import DigitTemplateBuilder, DigitTemplateLibrary, PlayClockRegionConfig, PlayClockRegionExtractor
|
| 31 |
from tracking import TrackPlayState, PlayEvent, PlayMerger, TimeoutInfo
|
|
|
|
| 32 |
from video import ThreadedFrameReader
|
| 33 |
-
from .models import DetectionConfig, DetectionResult, VideoContext
|
| 34 |
from .parallel import process_video_parallel
|
| 35 |
from .template_builder_pass import TemplateBuildingPass
|
| 36 |
|
|
@@ -300,7 +301,7 @@ class PlayDetector:
|
|
| 300 |
frame_data: List[Dict[str, Any]] = []
|
| 301 |
|
| 302 |
# Flag to track if we've locked the scorebug region
|
| 303 |
-
scorebug_region_locked = self.scorebug_detector.
|
| 304 |
|
| 305 |
# Progress tracking
|
| 306 |
progress_interval = int(30 / self.config.frame_interval) # Log every 30 seconds of video
|
|
@@ -383,16 +384,12 @@ class PlayDetector:
|
|
| 383 |
scorebug = self.scorebug_detector.detect(frame)
|
| 384 |
timing["scorebug_detection"] += time.perf_counter() - t_start
|
| 385 |
|
| 386 |
-
# Initialize frame result
|
| 387 |
-
frame_result =
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
"away_timeouts": None,
|
| 393 |
-
"clock_value": None,
|
| 394 |
-
"clock_detected": False,
|
| 395 |
-
}
|
| 396 |
|
| 397 |
# Initialize timeout_info for state machine
|
| 398 |
timeout_info = None
|
|
@@ -449,7 +446,6 @@ class PlayDetector:
|
|
| 449 |
|
| 450 |
def _finalize_detection(
|
| 451 |
self,
|
| 452 |
-
frame_data: List[Dict[str, Any]], # pylint: disable=unused-argument
|
| 453 |
context: VideoContext,
|
| 454 |
stats: Dict[str, Any],
|
| 455 |
timing: Dict[str, float],
|
|
@@ -461,7 +457,6 @@ class PlayDetector:
|
|
| 461 |
the streaming pass, so we just need to merge/filter and build the result.
|
| 462 |
|
| 463 |
Args:
|
| 464 |
-
frame_data: Complete frame data from streaming detection pass (unused, kept for API compatibility)
|
| 465 |
context: Video context
|
| 466 |
stats: Processing stats
|
| 467 |
timing: Timing breakdown
|
|
@@ -570,11 +565,13 @@ class PlayDetector:
|
|
| 570 |
|
| 571 |
# Streaming detection pass: read frames + template match + state machine (all in one)
|
| 572 |
# Uses threaded video I/O to overlap reading with processing
|
| 573 |
-
|
|
|
|
| 574 |
|
| 575 |
# Finalize: Clock reset classification and result building
|
| 576 |
-
return self._finalize_detection(
|
| 577 |
|
|
|
|
| 578 |
def detect_parallel(self, num_workers: int = 2, output_dir: Optional[Path] = None) -> DetectionResult:
|
| 579 |
"""
|
| 580 |
Run play detection using parallel chunk processing.
|
|
@@ -596,8 +593,6 @@ class PlayDetector:
|
|
| 596 |
Returns:
|
| 597 |
DetectionResult with all detected plays
|
| 598 |
"""
|
| 599 |
-
import time as time_module
|
| 600 |
-
|
| 601 |
logger.info("Starting parallel play detection (%d workers)...", num_workers)
|
| 602 |
logger.info("Video: %s", self.config.video_path)
|
| 603 |
logger.info("Segment: %.1fs to %s", self.config.start_time, self.config.end_time or "end")
|
|
@@ -643,9 +638,10 @@ class PlayDetector:
|
|
| 643 |
|
| 644 |
# Run parallel processing
|
| 645 |
logger.info("Starting parallel frame extraction...")
|
| 646 |
-
t_parallel_start =
|
| 647 |
|
| 648 |
-
|
|
|
|
| 649 |
video_path=self.config.video_path,
|
| 650 |
start_time=self.config.start_time,
|
| 651 |
end_time=end_time,
|
|
@@ -654,12 +650,13 @@ class PlayDetector:
|
|
| 654 |
fixed_scorebug_coords=self.config.fixed_scorebug_coords,
|
| 655 |
template_library_path=str(template_path) if template_path else None,
|
| 656 |
timeout_config_path=timeout_config_path,
|
| 657 |
-
num_workers=num_workers,
|
| 658 |
)
|
| 659 |
|
|
|
|
|
|
|
| 660 |
timing["video_io"] = io_time
|
| 661 |
# Estimate template matching time from parallel processing
|
| 662 |
-
parallel_time =
|
| 663 |
timing["template_matching"] = max(0, parallel_time - io_time - timing["template_building"])
|
| 664 |
|
| 665 |
logger.info("Parallel processing complete: %d frames", stats["total_frames"])
|
|
@@ -678,7 +675,7 @@ class PlayDetector:
|
|
| 678 |
)
|
| 679 |
|
| 680 |
# Run state machine on merged frame data
|
| 681 |
-
t_sm_start =
|
| 682 |
for frame in frame_data:
|
| 683 |
# Create proper objects for state machine
|
| 684 |
scorebug = ScorebugDetection(
|
|
@@ -700,7 +697,7 @@ class PlayDetector:
|
|
| 700 |
away_timeouts=frame.get("away_timeouts"),
|
| 701 |
)
|
| 702 |
self.state_machine.update(frame["timestamp"], scorebug, clock_reading, timeout_info)
|
| 703 |
-
timing["state_machine"] =
|
| 704 |
|
| 705 |
# Update stats dict
|
| 706 |
stats_dict = {
|
|
@@ -710,11 +707,11 @@ class PlayDetector:
|
|
| 710 |
}
|
| 711 |
|
| 712 |
# Finalize: Clock reset classification and result building
|
| 713 |
-
return self._finalize_detection(
|
| 714 |
|
| 715 |
def _log_detection_mode(self) -> None:
|
| 716 |
"""Log the detection mode being used."""
|
| 717 |
-
use_fixed_region = self.scorebug_detector and self.scorebug_detector.
|
| 718 |
|
| 719 |
if use_fixed_region:
|
| 720 |
logger.info("Mode: Fixed region (scorebug location pre-configured)")
|
|
|
|
| 29 |
from readers import ReadPlayClock, PlayClockReading
|
| 30 |
from setup import DigitTemplateBuilder, DigitTemplateLibrary, PlayClockRegionConfig, PlayClockRegionExtractor
|
| 31 |
from tracking import TrackPlayState, PlayEvent, PlayMerger, TimeoutInfo
|
| 32 |
+
from utils import create_frame_result
|
| 33 |
from video import ThreadedFrameReader
|
| 34 |
+
from .models import DetectionConfig, DetectionResult, ParallelProcessingConfig, VideoContext
|
| 35 |
from .parallel import process_video_parallel
|
| 36 |
from .template_builder_pass import TemplateBuildingPass
|
| 37 |
|
|
|
|
| 301 |
frame_data: List[Dict[str, Any]] = []
|
| 302 |
|
| 303 |
# Flag to track if we've locked the scorebug region
|
| 304 |
+
scorebug_region_locked = self.scorebug_detector.is_fixed_region_mode if self.scorebug_detector else False
|
| 305 |
|
| 306 |
# Progress tracking
|
| 307 |
progress_interval = int(30 / self.config.frame_interval) # Log every 30 seconds of video
|
|
|
|
| 384 |
scorebug = self.scorebug_detector.detect(frame)
|
| 385 |
timing["scorebug_detection"] += time.perf_counter() - t_start
|
| 386 |
|
| 387 |
+
# Initialize frame result using shared factory
|
| 388 |
+
frame_result = create_frame_result(
|
| 389 |
+
timestamp=current_time,
|
| 390 |
+
scorebug_detected=scorebug.detected,
|
| 391 |
+
scorebug_bbox=scorebug.bbox if scorebug.detected else None,
|
| 392 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 393 |
|
| 394 |
# Initialize timeout_info for state machine
|
| 395 |
timeout_info = None
|
|
|
|
| 446 |
|
| 447 |
def _finalize_detection(
|
| 448 |
self,
|
|
|
|
| 449 |
context: VideoContext,
|
| 450 |
stats: Dict[str, Any],
|
| 451 |
timing: Dict[str, float],
|
|
|
|
| 457 |
the streaming pass, so we just need to merge/filter and build the result.
|
| 458 |
|
| 459 |
Args:
|
|
|
|
| 460 |
context: Video context
|
| 461 |
stats: Processing stats
|
| 462 |
timing: Timing breakdown
|
|
|
|
| 565 |
|
| 566 |
# Streaming detection pass: read frames + template match + state machine (all in one)
|
| 567 |
# Uses threaded video I/O to overlap reading with processing
|
| 568 |
+
# Note: Return value unused - state machine is updated inline during the pass
|
| 569 |
+
_ = self._streaming_detection_pass(context, stats, timing)
|
| 570 |
|
| 571 |
# Finalize: Clock reset classification and result building
|
| 572 |
+
return self._finalize_detection(context, stats, timing)
|
| 573 |
|
| 574 |
+
# pylint: disable=too-many-locals
|
| 575 |
def detect_parallel(self, num_workers: int = 2, output_dir: Optional[Path] = None) -> DetectionResult:
|
| 576 |
"""
|
| 577 |
Run play detection using parallel chunk processing.
|
|
|
|
| 593 |
Returns:
|
| 594 |
DetectionResult with all detected plays
|
| 595 |
"""
|
|
|
|
|
|
|
| 596 |
logger.info("Starting parallel play detection (%d workers)...", num_workers)
|
| 597 |
logger.info("Video: %s", self.config.video_path)
|
| 598 |
logger.info("Segment: %.1fs to %s", self.config.start_time, self.config.end_time or "end")
|
|
|
|
| 638 |
|
| 639 |
# Run parallel processing
|
| 640 |
logger.info("Starting parallel frame extraction...")
|
| 641 |
+
t_parallel_start = time.perf_counter()
|
| 642 |
|
| 643 |
+
# Create parallel processing config
|
| 644 |
+
parallel_config = ParallelProcessingConfig(
|
| 645 |
video_path=self.config.video_path,
|
| 646 |
start_time=self.config.start_time,
|
| 647 |
end_time=end_time,
|
|
|
|
| 650 |
fixed_scorebug_coords=self.config.fixed_scorebug_coords,
|
| 651 |
template_library_path=str(template_path) if template_path else None,
|
| 652 |
timeout_config_path=timeout_config_path,
|
|
|
|
| 653 |
)
|
| 654 |
|
| 655 |
+
frame_data, stats, io_time = process_video_parallel(parallel_config, num_workers=num_workers)
|
| 656 |
+
|
| 657 |
timing["video_io"] = io_time
|
| 658 |
# Estimate template matching time from parallel processing
|
| 659 |
+
parallel_time = time.perf_counter() - t_parallel_start
|
| 660 |
timing["template_matching"] = max(0, parallel_time - io_time - timing["template_building"])
|
| 661 |
|
| 662 |
logger.info("Parallel processing complete: %d frames", stats["total_frames"])
|
|
|
|
| 675 |
)
|
| 676 |
|
| 677 |
# Run state machine on merged frame data
|
| 678 |
+
t_sm_start = time.perf_counter()
|
| 679 |
for frame in frame_data:
|
| 680 |
# Create proper objects for state machine
|
| 681 |
scorebug = ScorebugDetection(
|
|
|
|
| 697 |
away_timeouts=frame.get("away_timeouts"),
|
| 698 |
)
|
| 699 |
self.state_machine.update(frame["timestamp"], scorebug, clock_reading, timeout_info)
|
| 700 |
+
timing["state_machine"] = time.perf_counter() - t_sm_start
|
| 701 |
|
| 702 |
# Update stats dict
|
| 703 |
stats_dict = {
|
|
|
|
| 707 |
}
|
| 708 |
|
| 709 |
# Finalize: Clock reset classification and result building
|
| 710 |
+
return self._finalize_detection(context, stats_dict, timing)
|
| 711 |
|
| 712 |
def _log_detection_mode(self) -> None:
|
| 713 |
"""Log the detection mode being used."""
|
| 714 |
+
use_fixed_region = self.scorebug_detector and self.scorebug_detector.is_fixed_region_mode
|
| 715 |
|
| 716 |
if use_fixed_region:
|
| 717 |
logger.info("Mode: Fixed region (scorebug location pre-configured)")
|
src/pipeline/template_builder_pass.py
CHANGED
|
@@ -4,15 +4,21 @@ Template building pass (Pass 0) for the play detection pipeline.
|
|
| 4 |
This module handles the initial phase of building digit templates by scanning
|
| 5 |
the video for frames with scorebugs and running OCR on the play clock region.
|
| 6 |
These templates are then used for fast template matching in subsequent passes.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
"""
|
| 8 |
|
| 9 |
import logging
|
| 10 |
import time
|
|
|
|
| 11 |
from pathlib import Path
|
| 12 |
-
from typing import
|
| 13 |
|
| 14 |
import cv2
|
| 15 |
import easyocr
|
|
|
|
| 16 |
|
| 17 |
from detection import DetectScoreBug
|
| 18 |
from readers import ReadPlayClock
|
|
@@ -21,23 +27,186 @@ from .models import DetectionConfig
|
|
| 21 |
|
| 22 |
logger = logging.getLogger(__name__)
|
| 23 |
|
| 24 |
-
# Global EasyOCR reader instance for template building (lazy-loaded)
|
| 25 |
-
_easyocr_reader = None # pylint: disable=invalid-name
|
| 26 |
-
|
| 27 |
|
|
|
|
| 28 |
def get_easyocr_reader() -> easyocr.Reader:
|
| 29 |
-
"""Get or create the
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
"""
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
This pass runs BEFORE the main detection loop. It:
|
| 43 |
1. Uses the scorebug template (created during user setup) for detection
|
|
@@ -47,6 +216,89 @@ class TemplateBuildingPass:
|
|
| 47 |
|
| 48 |
This solves the problem of building templates from pre-game content that
|
| 49 |
has no scorebug, which causes all subsequent template matching to fail.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
"""
|
| 51 |
|
| 52 |
def __init__(
|
|
@@ -58,17 +310,7 @@ class TemplateBuildingPass:
|
|
| 58 |
max_scan_frames: int = 2000,
|
| 59 |
target_coverage: float = 0.70,
|
| 60 |
):
|
| 61 |
-
"""
|
| 62 |
-
Initialize the template building pass.
|
| 63 |
-
|
| 64 |
-
Args:
|
| 65 |
-
config: Detection configuration
|
| 66 |
-
clock_reader: Play clock region extractor
|
| 67 |
-
template_builder: Digit template builder
|
| 68 |
-
min_samples: Minimum valid OCR samples to collect
|
| 69 |
-
max_scan_frames: Maximum frames to scan
|
| 70 |
-
target_coverage: Target template coverage (0.0-1.0)
|
| 71 |
-
"""
|
| 72 |
self.config = config
|
| 73 |
self.clock_reader = clock_reader
|
| 74 |
self.template_builder = template_builder
|
|
@@ -77,203 +319,12 @@ class TemplateBuildingPass:
|
|
| 77 |
self.target_coverage = target_coverage
|
| 78 |
|
| 79 |
def run(self) -> Tuple[Optional[DigitTemplateLibrary], Optional[ReadPlayClock], float]:
|
| 80 |
-
"""
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
logger.info(" Target: %d samples OR %.0f%% template coverage", self.min_samples, self.target_coverage * 100)
|
| 89 |
-
|
| 90 |
-
t_build_start = time.perf_counter()
|
| 91 |
-
|
| 92 |
-
# Validate configuration
|
| 93 |
-
if not self._validate_config():
|
| 94 |
-
return None, None, time.perf_counter() - t_build_start
|
| 95 |
-
|
| 96 |
-
# Create temporary scorebug detector for Pass 0
|
| 97 |
-
sb_x, sb_y, sb_w, sb_h = self.config.fixed_scorebug_coords
|
| 98 |
-
logger.info(" Scorebug region: (%d, %d, %d, %d)", sb_x, sb_y, sb_w, sb_h)
|
| 99 |
-
logger.info(" Scorebug template: %s", self.config.template_path)
|
| 100 |
-
|
| 101 |
-
temp_detector = DetectScoreBug(
|
| 102 |
-
template_path=str(self.config.template_path),
|
| 103 |
-
fixed_region=(sb_x, sb_y, sb_w, sb_h),
|
| 104 |
-
use_split_detection=self.config.use_split_detection,
|
| 105 |
-
)
|
| 106 |
-
|
| 107 |
-
# Get EasyOCR reader for labeling
|
| 108 |
-
reader = get_easyocr_reader()
|
| 109 |
-
|
| 110 |
-
# Scan video and collect samples
|
| 111 |
-
valid_samples, frames_scanned, frames_with_scorebug = self._scan_video(temp_detector, reader)
|
| 112 |
-
|
| 113 |
-
# Log scan results
|
| 114 |
-
logger.info(
|
| 115 |
-
"Pass 0 scan complete: %d frames, %d with scorebug (%.1f%%), %d valid samples",
|
| 116 |
-
frames_scanned,
|
| 117 |
-
frames_with_scorebug,
|
| 118 |
-
100 * frames_with_scorebug / max(1, frames_scanned),
|
| 119 |
-
valid_samples,
|
| 120 |
-
)
|
| 121 |
-
|
| 122 |
-
# Check if we have enough samples
|
| 123 |
-
if valid_samples < 50:
|
| 124 |
-
logger.warning("Pass 0: Insufficient samples (%d < 50), template building may fail", valid_samples)
|
| 125 |
-
if frames_with_scorebug == 0:
|
| 126 |
-
logger.error("Pass 0: No scorebugs detected! Check that the scorebug template matches the video.")
|
| 127 |
-
return None, None, time.perf_counter() - t_build_start
|
| 128 |
-
|
| 129 |
-
# Build templates
|
| 130 |
-
template_library = self.template_builder.build_templates(min_samples=2)
|
| 131 |
-
coverage = template_library.get_coverage_status()
|
| 132 |
-
logger.info(
|
| 133 |
-
"Pass 0 templates built: %d/%d (%.1f%%) coverage",
|
| 134 |
-
coverage["total_have"],
|
| 135 |
-
coverage["total_needed"],
|
| 136 |
-
100 * coverage["total_have"] / coverage["total_needed"],
|
| 137 |
)
|
| 138 |
-
|
| 139 |
-
# Create template reader
|
| 140 |
-
region_w = self.clock_reader.config.width if self.clock_reader.config else 50
|
| 141 |
-
region_h = self.clock_reader.config.height if self.clock_reader.config else 28
|
| 142 |
-
template_reader = ReadPlayClock(template_library, region_w, region_h)
|
| 143 |
-
|
| 144 |
-
build_time = time.perf_counter() - t_build_start
|
| 145 |
-
logger.info("Pass 0 complete: Template building took %.2fs", build_time)
|
| 146 |
-
|
| 147 |
-
return template_library, template_reader, build_time
|
| 148 |
-
|
| 149 |
-
def _validate_config(self) -> bool:
|
| 150 |
-
"""Validate that required configuration is present."""
|
| 151 |
-
if not self.config.template_path:
|
| 152 |
-
logger.warning("Pass 0: No scorebug template path provided, cannot detect scorebugs")
|
| 153 |
-
return False
|
| 154 |
-
|
| 155 |
-
template_path = Path(self.config.template_path)
|
| 156 |
-
if not template_path.exists():
|
| 157 |
-
logger.warning("Pass 0: Scorebug template not found at %s", template_path)
|
| 158 |
-
return False
|
| 159 |
-
|
| 160 |
-
if not self.config.fixed_scorebug_coords:
|
| 161 |
-
logger.warning("Pass 0: No fixed scorebug coordinates provided")
|
| 162 |
-
return False
|
| 163 |
-
|
| 164 |
-
return True
|
| 165 |
-
|
| 166 |
-
def _scan_video(self, temp_detector: DetectScoreBug, reader: easyocr.Reader) -> Tuple[int, int, int]:
|
| 167 |
-
"""
|
| 168 |
-
Scan video frames and collect OCR samples for template building.
|
| 169 |
-
|
| 170 |
-
Args:
|
| 171 |
-
temp_detector: Scorebug detector to use
|
| 172 |
-
reader: EasyOCR reader instance
|
| 173 |
-
|
| 174 |
-
Returns:
|
| 175 |
-
Tuple of (valid_samples, frames_scanned, frames_with_scorebug)
|
| 176 |
-
"""
|
| 177 |
-
# Open video
|
| 178 |
-
cap = cv2.VideoCapture(self.config.video_path)
|
| 179 |
-
if not cap.isOpened():
|
| 180 |
-
logger.error("Pass 0: Could not open video")
|
| 181 |
-
return 0, 0, 0
|
| 182 |
-
|
| 183 |
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 184 |
-
frame_skip = int(self.config.frame_interval * fps)
|
| 185 |
-
|
| 186 |
-
# Initialize counters
|
| 187 |
-
valid_samples = 0
|
| 188 |
-
frames_scanned = 0
|
| 189 |
-
frames_with_scorebug = 0
|
| 190 |
-
current_frame = 0
|
| 191 |
-
|
| 192 |
-
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
|
| 193 |
-
logger.info(" Scanning from frame 0 (0.0s)...")
|
| 194 |
-
|
| 195 |
-
try:
|
| 196 |
-
while frames_scanned < self.max_scan_frames:
|
| 197 |
-
ret, frame = cap.read()
|
| 198 |
-
if not ret:
|
| 199 |
-
break
|
| 200 |
-
|
| 201 |
-
current_time = current_frame / fps
|
| 202 |
-
frames_scanned += 1
|
| 203 |
-
|
| 204 |
-
# Detect scorebug using template matching
|
| 205 |
-
scorebug_result = temp_detector.detect(frame)
|
| 206 |
-
|
| 207 |
-
if scorebug_result.detected:
|
| 208 |
-
frames_with_scorebug += 1
|
| 209 |
-
valid_samples += self._process_scorebug_frame(frame, scorebug_result.bbox, current_time, reader)
|
| 210 |
-
|
| 211 |
-
# Progress logging every 200 frames
|
| 212 |
-
if frames_scanned % 200 == 0:
|
| 213 |
-
coverage = self.template_builder.get_coverage_estimate()
|
| 214 |
-
logger.info(
|
| 215 |
-
" Pass 0 progress: %d frames scanned, %d with scorebug, %d valid samples, ~%.0f%% coverage",
|
| 216 |
-
frames_scanned,
|
| 217 |
-
frames_with_scorebug,
|
| 218 |
-
valid_samples,
|
| 219 |
-
coverage * 100,
|
| 220 |
-
)
|
| 221 |
-
|
| 222 |
-
# Check completion criteria
|
| 223 |
-
if valid_samples >= self.min_samples or coverage >= self.target_coverage:
|
| 224 |
-
logger.info(" Completion criteria met!")
|
| 225 |
-
break
|
| 226 |
-
|
| 227 |
-
# Skip frames
|
| 228 |
-
for _ in range(frame_skip - 1):
|
| 229 |
-
cap.grab()
|
| 230 |
-
current_frame += frame_skip
|
| 231 |
-
|
| 232 |
-
finally:
|
| 233 |
-
cap.release()
|
| 234 |
-
|
| 235 |
-
return valid_samples, frames_scanned, frames_with_scorebug
|
| 236 |
-
|
| 237 |
-
def _process_scorebug_frame(self, frame, scorebug_bbox: Tuple[int, int, int, int], current_time: float, reader: easyocr.Reader) -> int:
|
| 238 |
-
"""
|
| 239 |
-
Process a frame with detected scorebug and extract play clock via OCR.
|
| 240 |
-
|
| 241 |
-
Args:
|
| 242 |
-
frame: Video frame
|
| 243 |
-
scorebug_bbox: Scorebug bounding box (x, y, w, h)
|
| 244 |
-
current_time: Current timestamp
|
| 245 |
-
reader: EasyOCR reader
|
| 246 |
-
|
| 247 |
-
Returns:
|
| 248 |
-
1 if a valid sample was collected, 0 otherwise
|
| 249 |
-
"""
|
| 250 |
-
# Extract play clock region
|
| 251 |
-
play_clock_region = self.clock_reader.extract_region(frame, scorebug_bbox)
|
| 252 |
-
if play_clock_region is None:
|
| 253 |
-
return 0
|
| 254 |
-
|
| 255 |
-
# Preprocess and run OCR
|
| 256 |
-
preprocessed = self.clock_reader.preprocess_for_ocr(play_clock_region)
|
| 257 |
-
|
| 258 |
-
try:
|
| 259 |
-
ocr_results = reader.readtext(preprocessed, allowlist="0123456789", detail=1)
|
| 260 |
-
if not ocr_results:
|
| 261 |
-
return 0
|
| 262 |
-
|
| 263 |
-
best = max(ocr_results, key=lambda x: x[2])
|
| 264 |
-
text, confidence = best[1].strip(), best[2]
|
| 265 |
-
|
| 266 |
-
# Parse and validate
|
| 267 |
-
if text and confidence >= 0.5:
|
| 268 |
-
try:
|
| 269 |
-
value = int(text)
|
| 270 |
-
if 0 <= value <= 40:
|
| 271 |
-
self.template_builder.add_sample(play_clock_region, value, current_time, confidence)
|
| 272 |
-
return 1
|
| 273 |
-
except ValueError:
|
| 274 |
-
pass # Invalid text, skip
|
| 275 |
-
|
| 276 |
-
except Exception as e: # pylint: disable=broad-except
|
| 277 |
-
logger.debug("Pass 0: OCR error at %.1fs: %s", current_time, e)
|
| 278 |
-
|
| 279 |
-
return 0
|
|
|
|
| 4 |
This module handles the initial phase of building digit templates by scanning
|
| 5 |
the video for frames with scorebugs and running OCR on the play clock region.
|
| 6 |
These templates are then used for fast template matching in subsequent passes.
|
| 7 |
+
|
| 8 |
+
Note: This module uses functions instead of a class because there is only one
|
| 9 |
+
public entry point (run_template_building_pass). The class pattern was causing
|
| 10 |
+
"too few public methods" warnings.
|
| 11 |
"""
|
| 12 |
|
| 13 |
import logging
|
| 14 |
import time
|
| 15 |
+
from functools import lru_cache
|
| 16 |
from pathlib import Path
|
| 17 |
+
from typing import Optional, Tuple
|
| 18 |
|
| 19 |
import cv2
|
| 20 |
import easyocr
|
| 21 |
+
import numpy as np
|
| 22 |
|
| 23 |
from detection import DetectScoreBug
|
| 24 |
from readers import ReadPlayClock
|
|
|
|
| 27 |
|
| 28 |
logger = logging.getLogger(__name__)
|
| 29 |
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
+
@lru_cache(maxsize=1)
|
| 32 |
def get_easyocr_reader() -> easyocr.Reader:
|
| 33 |
+
"""Get or create the EasyOCR reader instance (lazily loaded, cached)."""
|
| 34 |
+
logger.info("Initializing EasyOCR reader for template building...")
|
| 35 |
+
reader = easyocr.Reader(["en"], gpu=False, verbose=False)
|
| 36 |
+
logger.info("EasyOCR reader initialized")
|
| 37 |
+
return reader
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _validate_config(config: DetectionConfig) -> bool:
|
| 41 |
+
"""Validate that required configuration is present for template building."""
|
| 42 |
+
if not config.template_path:
|
| 43 |
+
logger.warning("Pass 0: No scorebug template path provided, cannot detect scorebugs")
|
| 44 |
+
return False
|
| 45 |
+
|
| 46 |
+
template_path = Path(config.template_path)
|
| 47 |
+
if not template_path.exists():
|
| 48 |
+
logger.warning("Pass 0: Scorebug template not found at %s", template_path)
|
| 49 |
+
return False
|
| 50 |
+
|
| 51 |
+
if not config.fixed_scorebug_coords:
|
| 52 |
+
logger.warning("Pass 0: No fixed scorebug coordinates provided")
|
| 53 |
+
return False
|
| 54 |
+
|
| 55 |
+
return True
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _process_scorebug_frame(
|
| 59 |
+
frame: np.ndarray,
|
| 60 |
+
scorebug_bbox: Tuple[int, int, int, int],
|
| 61 |
+
current_time: float,
|
| 62 |
+
clock_reader: PlayClockRegionExtractor,
|
| 63 |
+
template_builder: DigitTemplateBuilder,
|
| 64 |
+
reader: easyocr.Reader,
|
| 65 |
+
) -> int:
|
| 66 |
+
"""
|
| 67 |
+
Process a frame with detected scorebug and extract play clock via OCR.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
frame: Video frame
|
| 71 |
+
scorebug_bbox: Scorebug bounding box (x, y, w, h)
|
| 72 |
+
current_time: Current timestamp
|
| 73 |
+
clock_reader: Play clock region extractor
|
| 74 |
+
template_builder: Digit template builder
|
| 75 |
+
reader: EasyOCR reader
|
| 76 |
+
|
| 77 |
+
Returns:
|
| 78 |
+
1 if a valid sample was collected, 0 otherwise
|
| 79 |
+
"""
|
| 80 |
+
# Extract play clock region
|
| 81 |
+
play_clock_region = clock_reader.extract_region(frame, scorebug_bbox)
|
| 82 |
+
if play_clock_region is None:
|
| 83 |
+
return 0
|
| 84 |
+
|
| 85 |
+
# Preprocess and run OCR
|
| 86 |
+
preprocessed = clock_reader.preprocess_for_ocr(play_clock_region)
|
| 87 |
|
| 88 |
+
try:
|
| 89 |
+
ocr_results = reader.readtext(preprocessed, allowlist="0123456789", detail=1)
|
| 90 |
+
if not ocr_results:
|
| 91 |
+
return 0
|
| 92 |
|
| 93 |
+
best = max(ocr_results, key=lambda x: x[2])
|
| 94 |
+
text, confidence = best[1].strip(), best[2]
|
| 95 |
+
|
| 96 |
+
# Parse and validate
|
| 97 |
+
if text and confidence >= 0.5:
|
| 98 |
+
try:
|
| 99 |
+
value = int(text)
|
| 100 |
+
if 0 <= value <= 40:
|
| 101 |
+
template_builder.add_sample(play_clock_region, value, current_time, confidence)
|
| 102 |
+
return 1
|
| 103 |
+
except ValueError:
|
| 104 |
+
pass # Invalid text, skip
|
| 105 |
+
|
| 106 |
+
except Exception as e: # pylint: disable=broad-except
|
| 107 |
+
logger.debug("Pass 0: OCR error at %.1fs: %s", current_time, e)
|
| 108 |
+
|
| 109 |
+
return 0
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def _scan_video(
|
| 113 |
+
config: DetectionConfig,
|
| 114 |
+
clock_reader: PlayClockRegionExtractor,
|
| 115 |
+
template_builder: DigitTemplateBuilder,
|
| 116 |
+
temp_detector: DetectScoreBug,
|
| 117 |
+
reader: easyocr.Reader,
|
| 118 |
+
min_samples: int,
|
| 119 |
+
max_scan_frames: int,
|
| 120 |
+
target_coverage: float,
|
| 121 |
+
) -> Tuple[int, int, int]:
|
| 122 |
"""
|
| 123 |
+
Scan video frames and collect OCR samples for template building.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
config: Detection configuration
|
| 127 |
+
clock_reader: Play clock region extractor
|
| 128 |
+
template_builder: Digit template builder
|
| 129 |
+
temp_detector: Scorebug detector to use
|
| 130 |
+
reader: EasyOCR reader instance
|
| 131 |
+
min_samples: Minimum valid OCR samples to collect
|
| 132 |
+
max_scan_frames: Maximum frames to scan
|
| 133 |
+
target_coverage: Target template coverage (0.0-1.0)
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
Tuple of (valid_samples, frames_scanned, frames_with_scorebug)
|
| 137 |
+
"""
|
| 138 |
+
# Open video
|
| 139 |
+
cap = cv2.VideoCapture(config.video_path)
|
| 140 |
+
if not cap.isOpened():
|
| 141 |
+
logger.error("Pass 0: Could not open video")
|
| 142 |
+
return 0, 0, 0
|
| 143 |
+
|
| 144 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 145 |
+
frame_skip = int(config.frame_interval * fps)
|
| 146 |
+
|
| 147 |
+
# Initialize counters
|
| 148 |
+
valid_samples = 0
|
| 149 |
+
frames_scanned = 0
|
| 150 |
+
frames_with_scorebug = 0
|
| 151 |
+
current_frame = 0
|
| 152 |
+
|
| 153 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
|
| 154 |
+
logger.info(" Scanning from frame 0 (0.0s)...")
|
| 155 |
+
|
| 156 |
+
try:
|
| 157 |
+
while frames_scanned < max_scan_frames:
|
| 158 |
+
ret, frame = cap.read()
|
| 159 |
+
if not ret:
|
| 160 |
+
break
|
| 161 |
+
|
| 162 |
+
current_time = current_frame / fps
|
| 163 |
+
frames_scanned += 1
|
| 164 |
+
|
| 165 |
+
# Detect scorebug using template matching
|
| 166 |
+
scorebug_result = temp_detector.detect(frame)
|
| 167 |
+
|
| 168 |
+
if scorebug_result.detected:
|
| 169 |
+
frames_with_scorebug += 1
|
| 170 |
+
valid_samples += _process_scorebug_frame(frame, scorebug_result.bbox, current_time, clock_reader, template_builder, reader)
|
| 171 |
+
|
| 172 |
+
# Progress logging every 200 frames
|
| 173 |
+
if frames_scanned % 200 == 0:
|
| 174 |
+
coverage = template_builder.get_coverage_estimate()
|
| 175 |
+
logger.info(
|
| 176 |
+
" Pass 0 progress: %d frames scanned, %d with scorebug, %d valid samples, ~%.0f%% coverage",
|
| 177 |
+
frames_scanned,
|
| 178 |
+
frames_with_scorebug,
|
| 179 |
+
valid_samples,
|
| 180 |
+
coverage * 100,
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
# Check completion criteria
|
| 184 |
+
if valid_samples >= min_samples or coverage >= target_coverage:
|
| 185 |
+
logger.info(" Completion criteria met!")
|
| 186 |
+
break
|
| 187 |
+
|
| 188 |
+
# Skip frames
|
| 189 |
+
for _ in range(frame_skip - 1):
|
| 190 |
+
cap.grab()
|
| 191 |
+
current_frame += frame_skip
|
| 192 |
+
|
| 193 |
+
finally:
|
| 194 |
+
cap.release()
|
| 195 |
+
|
| 196 |
+
return valid_samples, frames_scanned, frames_with_scorebug
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
# pylint: disable=too-many-locals
|
| 200 |
+
def run_template_building_pass(
|
| 201 |
+
config: DetectionConfig,
|
| 202 |
+
clock_reader: PlayClockRegionExtractor,
|
| 203 |
+
template_builder: DigitTemplateBuilder,
|
| 204 |
+
min_samples: int = 200,
|
| 205 |
+
max_scan_frames: int = 2000,
|
| 206 |
+
target_coverage: float = 0.70,
|
| 207 |
+
) -> Tuple[Optional[DigitTemplateLibrary], Optional[ReadPlayClock], float]:
|
| 208 |
+
"""
|
| 209 |
+
Run the template building pass (Pass 0).
|
| 210 |
|
| 211 |
This pass runs BEFORE the main detection loop. It:
|
| 212 |
1. Uses the scorebug template (created during user setup) for detection
|
|
|
|
| 216 |
|
| 217 |
This solves the problem of building templates from pre-game content that
|
| 218 |
has no scorebug, which causes all subsequent template matching to fail.
|
| 219 |
+
|
| 220 |
+
Args:
|
| 221 |
+
config: Detection configuration
|
| 222 |
+
clock_reader: Play clock region extractor
|
| 223 |
+
template_builder: Digit template builder
|
| 224 |
+
min_samples: Minimum valid OCR samples to collect (default: 200)
|
| 225 |
+
max_scan_frames: Maximum frames to scan (default: 2000)
|
| 226 |
+
target_coverage: Target template coverage 0.0-1.0 (default: 0.70)
|
| 227 |
+
|
| 228 |
+
Returns:
|
| 229 |
+
Tuple of (template_library, template_reader, build_time).
|
| 230 |
+
template_library and template_reader may be None if building failed.
|
| 231 |
+
"""
|
| 232 |
+
logger.info("Pass 0: Building templates using scorebug template detection...")
|
| 233 |
+
logger.info(" Target: %d samples OR %.0f%% template coverage", min_samples, target_coverage * 100)
|
| 234 |
+
|
| 235 |
+
t_build_start = time.perf_counter()
|
| 236 |
+
|
| 237 |
+
# Validate configuration
|
| 238 |
+
if not _validate_config(config):
|
| 239 |
+
return None, None, time.perf_counter() - t_build_start
|
| 240 |
+
|
| 241 |
+
# Create temporary scorebug detector for Pass 0
|
| 242 |
+
sb_x, sb_y, sb_w, sb_h = config.fixed_scorebug_coords
|
| 243 |
+
logger.info(" Scorebug region: (%d, %d, %d, %d)", sb_x, sb_y, sb_w, sb_h)
|
| 244 |
+
logger.info(" Scorebug template: %s", config.template_path)
|
| 245 |
+
|
| 246 |
+
temp_detector = DetectScoreBug(
|
| 247 |
+
template_path=str(config.template_path),
|
| 248 |
+
fixed_region=(sb_x, sb_y, sb_w, sb_h),
|
| 249 |
+
use_split_detection=config.use_split_detection,
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# Get EasyOCR reader for labeling
|
| 253 |
+
reader = get_easyocr_reader()
|
| 254 |
+
|
| 255 |
+
# Scan video and collect samples
|
| 256 |
+
valid_samples, frames_scanned, frames_with_scorebug = _scan_video(config, clock_reader, template_builder, temp_detector, reader, min_samples, max_scan_frames, target_coverage)
|
| 257 |
+
|
| 258 |
+
# Log scan results
|
| 259 |
+
logger.info(
|
| 260 |
+
"Pass 0 scan complete: %d frames, %d with scorebug (%.1f%%), %d valid samples",
|
| 261 |
+
frames_scanned,
|
| 262 |
+
frames_with_scorebug,
|
| 263 |
+
100 * frames_with_scorebug / max(1, frames_scanned),
|
| 264 |
+
valid_samples,
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
# Check if we have enough samples
|
| 268 |
+
if valid_samples < 50:
|
| 269 |
+
logger.warning("Pass 0: Insufficient samples (%d < 50), template building may fail", valid_samples)
|
| 270 |
+
if frames_with_scorebug == 0:
|
| 271 |
+
logger.error("Pass 0: No scorebugs detected! Check that the scorebug template matches the video.")
|
| 272 |
+
return None, None, time.perf_counter() - t_build_start
|
| 273 |
+
|
| 274 |
+
# Build templates
|
| 275 |
+
template_library = template_builder.build_templates(min_samples=2)
|
| 276 |
+
coverage = template_library.get_coverage_status()
|
| 277 |
+
logger.info(
|
| 278 |
+
"Pass 0 templates built: %d/%d (%.1f%%) coverage",
|
| 279 |
+
coverage["total_have"],
|
| 280 |
+
coverage["total_needed"],
|
| 281 |
+
100 * coverage["total_have"] / coverage["total_needed"],
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
# Create template reader
|
| 285 |
+
region_w = clock_reader.config.width if clock_reader.config else 50
|
| 286 |
+
region_h = clock_reader.config.height if clock_reader.config else 28
|
| 287 |
+
template_reader = ReadPlayClock(template_library, region_w, region_h)
|
| 288 |
+
|
| 289 |
+
build_time = time.perf_counter() - t_build_start
|
| 290 |
+
logger.info("Pass 0 complete: Template building took %.2fs", build_time)
|
| 291 |
+
|
| 292 |
+
return template_library, template_reader, build_time
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
# Keep the class for backward compatibility, but it now delegates to the function
|
| 296 |
+
class TemplateBuildingPass: # pylint: disable=too-few-public-methods
|
| 297 |
+
"""
|
| 298 |
+
Backward-compatible class wrapper for run_template_building_pass.
|
| 299 |
+
|
| 300 |
+
Deprecated: Use run_template_building_pass() function directly instead.
|
| 301 |
+
This class exists only for backward compatibility with existing code.
|
| 302 |
"""
|
| 303 |
|
| 304 |
def __init__(
|
|
|
|
| 310 |
max_scan_frames: int = 2000,
|
| 311 |
target_coverage: float = 0.70,
|
| 312 |
):
|
| 313 |
+
"""Initialize the template building pass."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 314 |
self.config = config
|
| 315 |
self.clock_reader = clock_reader
|
| 316 |
self.template_builder = template_builder
|
|
|
|
| 319 |
self.target_coverage = target_coverage
|
| 320 |
|
| 321 |
def run(self) -> Tuple[Optional[DigitTemplateLibrary], Optional[ReadPlayClock], float]:
|
| 322 |
+
"""Run the template building pass by delegating to the function."""
|
| 323 |
+
return run_template_building_pass(
|
| 324 |
+
self.config,
|
| 325 |
+
self.clock_reader,
|
| 326 |
+
self.template_builder,
|
| 327 |
+
self.min_samples,
|
| 328 |
+
self.max_scan_frames,
|
| 329 |
+
self.target_coverage,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 330 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/readers/__init__.py
CHANGED
|
@@ -3,6 +3,8 @@
|
|
| 3 |
This package contains components that read values from detected regions:
|
| 4 |
- Play clock digit values (via template matching)
|
| 5 |
- Future: expanded timeout value reading
|
|
|
|
|
|
|
| 6 |
"""
|
| 7 |
|
| 8 |
from .models import (
|
|
@@ -12,8 +14,6 @@ from .models import (
|
|
| 12 |
)
|
| 13 |
from .playclock import (
|
| 14 |
ReadPlayClock,
|
| 15 |
-
detect_red_digits,
|
| 16 |
-
normalize_to_grayscale,
|
| 17 |
backfill_missing_readings,
|
| 18 |
)
|
| 19 |
|
|
@@ -24,7 +24,5 @@ __all__ = [
|
|
| 24 |
"TemplatePlayClockReading",
|
| 25 |
# Play clock reading
|
| 26 |
"ReadPlayClock",
|
| 27 |
-
"detect_red_digits",
|
| 28 |
-
"normalize_to_grayscale",
|
| 29 |
"backfill_missing_readings",
|
| 30 |
]
|
|
|
|
| 3 |
This package contains components that read values from detected regions:
|
| 4 |
- Play clock digit values (via template matching)
|
| 5 |
- Future: expanded timeout value reading
|
| 6 |
+
|
| 7 |
+
Note: detect_red_digits and normalize_to_grayscale are now in utils.color
|
| 8 |
"""
|
| 9 |
|
| 10 |
from .models import (
|
|
|
|
| 14 |
)
|
| 15 |
from .playclock import (
|
| 16 |
ReadPlayClock,
|
|
|
|
|
|
|
| 17 |
backfill_missing_readings,
|
| 18 |
)
|
| 19 |
|
|
|
|
| 24 |
"TemplatePlayClockReading",
|
| 25 |
# Play clock reading
|
| 26 |
"ReadPlayClock",
|
|
|
|
|
|
|
| 27 |
"backfill_missing_readings",
|
| 28 |
]
|
src/readers/playclock.py
CHANGED
|
@@ -1,14 +1,12 @@
|
|
| 1 |
-
# pylint: disable=too-many-lines,duplicate-code
|
| 2 |
"""
|
| 3 |
Play clock reading via template matching.
|
| 4 |
|
| 5 |
This module provides:
|
| 6 |
- ReadPlayClock: Fast template-based clock value reading
|
| 7 |
-
- Color normalization utilities for handling red/white digits
|
| 8 |
- backfill_missing_readings for gap interpolation
|
| 9 |
|
| 10 |
-
|
| 11 |
-
|
| 12 |
|
| 13 |
Performance comparison (from ocr_benchmark.md):
|
| 14 |
- EasyOCR: 48.9ms/frame
|
|
@@ -22,73 +20,12 @@ import cv2
|
|
| 22 |
import numpy as np
|
| 23 |
|
| 24 |
from setup import DigitTemplateLibrary
|
|
|
|
| 25 |
from .models import PlayClockReading, TemplateMatchResult, TemplatePlayClockReading
|
| 26 |
|
| 27 |
logger = logging.getLogger(__name__)
|
| 28 |
|
| 29 |
|
| 30 |
-
# =============================================================================
|
| 31 |
-
# Color Normalization Utilities
|
| 32 |
-
# =============================================================================
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
def detect_red_digits(region: np.ndarray) -> bool:
|
| 36 |
-
"""
|
| 37 |
-
Detect if the play clock digits are red.
|
| 38 |
-
|
| 39 |
-
Red digits appear when the play clock has 5 seconds or less remaining.
|
| 40 |
-
Red digits have high red channel values with very low green and blue.
|
| 41 |
-
|
| 42 |
-
Args:
|
| 43 |
-
region: Play clock region (BGR format)
|
| 44 |
-
|
| 45 |
-
Returns:
|
| 46 |
-
True if red digits detected, False otherwise
|
| 47 |
-
"""
|
| 48 |
-
# Split into BGR channels
|
| 49 |
-
b, g, r = cv2.split(region)
|
| 50 |
-
|
| 51 |
-
# Calculate mean values for each channel
|
| 52 |
-
r_mean = np.mean(r)
|
| 53 |
-
g_mean = np.mean(g)
|
| 54 |
-
b_mean = np.mean(b)
|
| 55 |
-
|
| 56 |
-
# Red digits: high red channel, very low green/blue, red > 2x green
|
| 57 |
-
# pylint: disable=chained-comparison
|
| 58 |
-
is_red = r_mean > 15 and max(g_mean, b_mean) < 15 and r_mean > (g_mean * 2)
|
| 59 |
-
|
| 60 |
-
if is_red:
|
| 61 |
-
logger.debug("Red digits detected: R=%.1f, G=%.1f, B=%.1f", r_mean, g_mean, b_mean)
|
| 62 |
-
|
| 63 |
-
return is_red
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
def normalize_to_grayscale(region: np.ndarray) -> np.ndarray:
|
| 67 |
-
"""
|
| 68 |
-
Normalize a play clock region to grayscale, handling both red and white digits.
|
| 69 |
-
|
| 70 |
-
Red digits (displayed when clock <= 5) are converted to white-like grayscale
|
| 71 |
-
by extracting the red channel. White digits use standard grayscale conversion.
|
| 72 |
-
This allows a single set of templates to match both color variants.
|
| 73 |
-
|
| 74 |
-
Args:
|
| 75 |
-
region: Play clock region (BGR format)
|
| 76 |
-
|
| 77 |
-
Returns:
|
| 78 |
-
Grayscale image where digits appear as bright pixels on dark background
|
| 79 |
-
"""
|
| 80 |
-
is_red = detect_red_digits(region)
|
| 81 |
-
|
| 82 |
-
if is_red:
|
| 83 |
-
# For red digits, use the red channel directly as grayscale
|
| 84 |
-
# This converts red digits to white-like appearance
|
| 85 |
-
_, _, r = cv2.split(region)
|
| 86 |
-
return r
|
| 87 |
-
|
| 88 |
-
# Standard grayscale conversion for white digits
|
| 89 |
-
return cv2.cvtColor(region, cv2.COLOR_BGR2GRAY)
|
| 90 |
-
|
| 91 |
-
|
| 92 |
# =============================================================================
|
| 93 |
# Template-Based Play Clock Reader
|
| 94 |
# =============================================================================
|
|
@@ -134,7 +71,7 @@ class ReadPlayClock:
|
|
| 134 |
"""
|
| 135 |
Preprocess play clock region for template matching.
|
| 136 |
|
| 137 |
-
|
| 138 |
|
| 139 |
Args:
|
| 140 |
region: Play clock region (BGR format)
|
|
@@ -142,46 +79,7 @@ class ReadPlayClock:
|
|
| 142 |
Returns:
|
| 143 |
Preprocessed binary image (scaled up)
|
| 144 |
"""
|
| 145 |
-
|
| 146 |
-
gray = normalize_to_grayscale(region)
|
| 147 |
-
|
| 148 |
-
# Scale up to match template resolution
|
| 149 |
-
scaled = cv2.resize(gray, None, fx=self.scale_factor, fy=self.scale_factor, interpolation=cv2.INTER_LINEAR)
|
| 150 |
-
|
| 151 |
-
# Use Otsu's thresholding
|
| 152 |
-
_, binary = cv2.threshold(scaled, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
| 153 |
-
|
| 154 |
-
# Ensure white digits on black background
|
| 155 |
-
mean_intensity = np.mean(binary)
|
| 156 |
-
if mean_intensity > 128:
|
| 157 |
-
binary = cv2.bitwise_not(binary)
|
| 158 |
-
|
| 159 |
-
return binary
|
| 160 |
-
|
| 161 |
-
def _extract_left_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 162 |
-
"""Extract left region for tens digit."""
|
| 163 |
-
_, w = preprocessed.shape[:2]
|
| 164 |
-
mid_x = w // 2
|
| 165 |
-
return preprocessed[:, : mid_x - 2]
|
| 166 |
-
|
| 167 |
-
def _extract_right_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 168 |
-
"""Extract right region for ones digit in double-digit displays."""
|
| 169 |
-
_, w = preprocessed.shape[:2]
|
| 170 |
-
mid_x = w // 2
|
| 171 |
-
return preprocessed[:, mid_x + 2 :]
|
| 172 |
-
|
| 173 |
-
def _extract_center_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 174 |
-
"""Extract center region for ones digit in single-digit displays."""
|
| 175 |
-
_, w = preprocessed.shape[:2]
|
| 176 |
-
center_start = int(w * 0.20)
|
| 177 |
-
center_end = int(w * 0.80)
|
| 178 |
-
return preprocessed[:, center_start:center_end]
|
| 179 |
-
|
| 180 |
-
def _extract_far_left_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 181 |
-
"""Extract far left region for blank detection in single-digit displays."""
|
| 182 |
-
_, w = preprocessed.shape[:2]
|
| 183 |
-
far_left_end = int(w * 0.20)
|
| 184 |
-
return preprocessed[:, :far_left_end]
|
| 185 |
|
| 186 |
def match_digit(self, region: np.ndarray, templates: List) -> TemplateMatchResult:
|
| 187 |
"""
|
|
@@ -251,9 +149,9 @@ class ReadPlayClock:
|
|
| 251 |
method="template_double",
|
| 252 |
)
|
| 253 |
|
| 254 |
-
# Extract regions
|
| 255 |
-
tens_region =
|
| 256 |
-
ones_region =
|
| 257 |
|
| 258 |
# Match digits
|
| 259 |
tens_match = self.match_digit(tens_region, tens_templates)
|
|
@@ -320,9 +218,9 @@ class ReadPlayClock:
|
|
| 320 |
method="template_single",
|
| 321 |
)
|
| 322 |
|
| 323 |
-
# Extract regions
|
| 324 |
-
center_region =
|
| 325 |
-
far_left_region =
|
| 326 |
|
| 327 |
# Match center digit (ones)
|
| 328 |
ones_match = self.match_digit(center_region, ones_templates)
|
|
@@ -490,7 +388,104 @@ class ReadPlayClock:
|
|
| 490 |
# =============================================================================
|
| 491 |
|
| 492 |
|
| 493 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 494 |
"""
|
| 495 |
Backfill missing play clock readings when there's a clear countdown sequence with gaps.
|
| 496 |
|
|
@@ -517,112 +512,31 @@ def backfill_missing_readings(readings: list[PlayClockReading], max_gap_seconds:
|
|
| 517 |
if not readings:
|
| 518 |
return readings
|
| 519 |
|
| 520 |
-
# Create a copy to avoid modifying the original
|
| 521 |
result = list(readings)
|
| 522 |
-
|
| 523 |
-
# Find gaps and try to backfill
|
| 524 |
i = 0
|
|
|
|
| 525 |
while i < len(result):
|
| 526 |
# Skip if this reading is valid
|
| 527 |
if result[i].detected and result[i].value is not None:
|
| 528 |
i += 1
|
| 529 |
continue
|
| 530 |
|
| 531 |
-
# Found a missing reading - find the extent of the gap
|
| 532 |
gap_start = i
|
| 533 |
-
gap_end = i
|
| 534 |
-
while gap_end < len(result) and (not result[gap_end].detected or result[gap_end].value is None):
|
| 535 |
-
gap_end += 1
|
| 536 |
-
|
| 537 |
gap_frame_count = gap_end - gap_start
|
| 538 |
|
| 539 |
-
#
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
i = gap_end
|
| 543 |
-
continue
|
| 544 |
-
|
| 545 |
-
if gap_end >= len(result):
|
| 546 |
-
logger.debug("Gap at end of sequence (index %d), no right side for backfill", gap_start)
|
| 547 |
-
i = gap_end
|
| 548 |
-
continue
|
| 549 |
-
|
| 550 |
-
# Get the clock values on either side
|
| 551 |
-
left_value = result[gap_start - 1].value
|
| 552 |
-
right_value = result[gap_end].value
|
| 553 |
-
|
| 554 |
-
if left_value is None or right_value is None:
|
| 555 |
-
logger.debug("Adjacent values are None, cannot backfill")
|
| 556 |
-
i = gap_end
|
| 557 |
-
continue
|
| 558 |
-
|
| 559 |
-
# Calculate the gap in seconds (clock values)
|
| 560 |
-
# Left should be higher than right in a countdown
|
| 561 |
-
if left_value <= right_value:
|
| 562 |
-
logger.debug("Invalid countdown: left=%d not greater than right=%d", left_value, right_value)
|
| 563 |
-
i = gap_end
|
| 564 |
-
continue
|
| 565 |
-
|
| 566 |
-
seconds_gap = left_value - right_value - 1 # Number of missing clock values
|
| 567 |
-
|
| 568 |
-
# Check if gap in seconds is within our limit
|
| 569 |
-
if seconds_gap > max_gap_seconds:
|
| 570 |
-
logger.debug(
|
| 571 |
-
"Gap of %d seconds (left=%d, right=%d) exceeds max_gap_seconds=%d, skipping backfill",
|
| 572 |
-
seconds_gap,
|
| 573 |
-
left_value,
|
| 574 |
-
right_value,
|
| 575 |
-
max_gap_seconds,
|
| 576 |
-
)
|
| 577 |
i = gap_end
|
| 578 |
continue
|
| 579 |
|
|
|
|
| 580 |
if seconds_gap <= 0:
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
for j in range(gap_frame_count):
|
| 585 |
-
# Use left value for first half, right value for second half
|
| 586 |
-
if j < gap_frame_count / 2:
|
| 587 |
-
backfill_value = left_value
|
| 588 |
-
else:
|
| 589 |
-
backfill_value = right_value
|
| 590 |
-
result[gap_start + j] = PlayClockReading(
|
| 591 |
-
detected=True,
|
| 592 |
-
value=backfill_value,
|
| 593 |
-
confidence=0.0,
|
| 594 |
-
raw_text=f"BACKFILLED_{backfill_value}",
|
| 595 |
-
)
|
| 596 |
-
i = gap_end
|
| 597 |
-
continue
|
| 598 |
-
|
| 599 |
-
# Calculate the missing clock values
|
| 600 |
-
missing_values = list(range(left_value - 1, right_value, -1))
|
| 601 |
-
|
| 602 |
-
logger.info(
|
| 603 |
-
"Backfilling gap: frames %d-%d, clock values %d to %d, missing values: %s",
|
| 604 |
-
gap_start,
|
| 605 |
-
gap_end - 1,
|
| 606 |
-
left_value,
|
| 607 |
-
right_value,
|
| 608 |
-
missing_values,
|
| 609 |
-
)
|
| 610 |
-
|
| 611 |
-
# Distribute missing values across the gap frames
|
| 612 |
-
for j in range(gap_frame_count):
|
| 613 |
-
# Calculate which missing value this frame should have
|
| 614 |
-
position = j / gap_frame_count
|
| 615 |
-
value_index = int(position * len(missing_values))
|
| 616 |
-
value_index = min(value_index, len(missing_values) - 1)
|
| 617 |
-
backfill_value = missing_values[value_index]
|
| 618 |
-
|
| 619 |
-
result[gap_start + j] = PlayClockReading(
|
| 620 |
-
detected=True,
|
| 621 |
-
value=backfill_value,
|
| 622 |
-
confidence=0.0,
|
| 623 |
-
raw_text=f"BACKFILLED_{backfill_value}",
|
| 624 |
-
)
|
| 625 |
-
logger.debug(" Backfilled index %d with value %d", gap_start + j, backfill_value)
|
| 626 |
|
| 627 |
i = gap_end
|
| 628 |
|
|
|
|
|
|
|
| 1 |
"""
|
| 2 |
Play clock reading via template matching.
|
| 3 |
|
| 4 |
This module provides:
|
| 5 |
- ReadPlayClock: Fast template-based clock value reading
|
|
|
|
| 6 |
- backfill_missing_readings for gap interpolation
|
| 7 |
|
| 8 |
+
Color normalization utilities (detect_red_digits, normalize_to_grayscale) are
|
| 9 |
+
now shared from utils.color to eliminate code duplication.
|
| 10 |
|
| 11 |
Performance comparison (from ocr_benchmark.md):
|
| 12 |
- EasyOCR: 48.9ms/frame
|
|
|
|
| 20 |
import numpy as np
|
| 21 |
|
| 22 |
from setup import DigitTemplateLibrary
|
| 23 |
+
from utils import extract_center_region, extract_far_left_region, extract_left_region, extract_right_region, preprocess_playclock_region
|
| 24 |
from .models import PlayClockReading, TemplateMatchResult, TemplatePlayClockReading
|
| 25 |
|
| 26 |
logger = logging.getLogger(__name__)
|
| 27 |
|
| 28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
# =============================================================================
|
| 30 |
# Template-Based Play Clock Reader
|
| 31 |
# =============================================================================
|
|
|
|
| 71 |
"""
|
| 72 |
Preprocess play clock region for template matching.
|
| 73 |
|
| 74 |
+
Delegates to shared utility function in utils.regions.
|
| 75 |
|
| 76 |
Args:
|
| 77 |
region: Play clock region (BGR format)
|
|
|
|
| 79 |
Returns:
|
| 80 |
Preprocessed binary image (scaled up)
|
| 81 |
"""
|
| 82 |
+
return preprocess_playclock_region(region, self.scale_factor)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
def match_digit(self, region: np.ndarray, templates: List) -> TemplateMatchResult:
|
| 85 |
"""
|
|
|
|
| 149 |
method="template_double",
|
| 150 |
)
|
| 151 |
|
| 152 |
+
# Extract regions using shared utility functions
|
| 153 |
+
tens_region = extract_left_region(preprocessed)
|
| 154 |
+
ones_region = extract_right_region(preprocessed)
|
| 155 |
|
| 156 |
# Match digits
|
| 157 |
tens_match = self.match_digit(tens_region, tens_templates)
|
|
|
|
| 218 |
method="template_single",
|
| 219 |
)
|
| 220 |
|
| 221 |
+
# Extract regions using shared utility functions
|
| 222 |
+
center_region = extract_center_region(preprocessed)
|
| 223 |
+
far_left_region = extract_far_left_region(preprocessed)
|
| 224 |
|
| 225 |
# Match center digit (ones)
|
| 226 |
ones_match = self.match_digit(center_region, ones_templates)
|
|
|
|
| 388 |
# =============================================================================
|
| 389 |
|
| 390 |
|
| 391 |
+
def _find_gap_extent(readings: List[PlayClockReading], start_idx: int) -> int:
|
| 392 |
+
"""Find the end index of a gap starting at start_idx."""
|
| 393 |
+
gap_end = start_idx
|
| 394 |
+
while gap_end < len(readings) and (not readings[gap_end].detected or readings[gap_end].value is None):
|
| 395 |
+
gap_end += 1
|
| 396 |
+
return gap_end
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def _validate_gap_boundaries(readings: List[PlayClockReading], gap_start: int, gap_end: int, max_gap_seconds: int) -> Tuple[bool, int, int, int]:
|
| 400 |
+
"""
|
| 401 |
+
Validate that a gap can be backfilled.
|
| 402 |
+
|
| 403 |
+
Returns:
|
| 404 |
+
Tuple of (is_valid, left_value, right_value, seconds_gap).
|
| 405 |
+
is_valid is False if gap cannot be backfilled.
|
| 406 |
+
"""
|
| 407 |
+
# Check if we have valid readings on both sides
|
| 408 |
+
if gap_start == 0:
|
| 409 |
+
logger.debug("Gap at start of sequence (index 0), no left side for backfill")
|
| 410 |
+
return False, 0, 0, 0
|
| 411 |
+
|
| 412 |
+
if gap_end >= len(readings):
|
| 413 |
+
logger.debug("Gap at end of sequence (index %d), no right side for backfill", gap_start)
|
| 414 |
+
return False, 0, 0, 0
|
| 415 |
+
|
| 416 |
+
# Get the clock values on either side
|
| 417 |
+
left_value = readings[gap_start - 1].value
|
| 418 |
+
right_value = readings[gap_end].value
|
| 419 |
+
|
| 420 |
+
if left_value is None or right_value is None:
|
| 421 |
+
logger.debug("Adjacent values are None, cannot backfill")
|
| 422 |
+
return False, 0, 0, 0
|
| 423 |
+
|
| 424 |
+
# Left should be higher than right in a countdown
|
| 425 |
+
if left_value <= right_value:
|
| 426 |
+
logger.debug("Invalid countdown: left=%d not greater than right=%d", left_value, right_value)
|
| 427 |
+
return False, 0, 0, 0
|
| 428 |
+
|
| 429 |
+
seconds_gap = left_value - right_value - 1
|
| 430 |
+
|
| 431 |
+
# Check if gap in seconds is within our limit
|
| 432 |
+
if seconds_gap > max_gap_seconds:
|
| 433 |
+
logger.debug(
|
| 434 |
+
"Gap of %d seconds (left=%d, right=%d) exceeds max_gap_seconds=%d, skipping backfill",
|
| 435 |
+
seconds_gap,
|
| 436 |
+
left_value,
|
| 437 |
+
right_value,
|
| 438 |
+
max_gap_seconds,
|
| 439 |
+
)
|
| 440 |
+
return False, 0, 0, 0
|
| 441 |
+
|
| 442 |
+
return True, left_value, right_value, seconds_gap
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
def _backfill_interpolate(result: List[PlayClockReading], gap_start: int, gap_frame_count: int, left_value: int, right_value: int) -> None:
|
| 446 |
+
"""Backfill a gap using nearest value interpolation (no missing clock values)."""
|
| 447 |
+
logger.debug("No missing clock values between %d and %d, using nearest value interpolation", left_value, right_value)
|
| 448 |
+
for j in range(gap_frame_count):
|
| 449 |
+
# Use left value for first half, right value for second half
|
| 450 |
+
backfill_value = left_value if j < gap_frame_count / 2 else right_value
|
| 451 |
+
result[gap_start + j] = PlayClockReading(
|
| 452 |
+
detected=True,
|
| 453 |
+
value=backfill_value,
|
| 454 |
+
confidence=0.0,
|
| 455 |
+
raw_text=f"BACKFILLED_{backfill_value}",
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
def _backfill_with_missing_values(result: List[PlayClockReading], gap_start: int, gap_end: int, left_value: int, right_value: int) -> None:
|
| 460 |
+
"""Backfill a gap by distributing missing clock values across frames."""
|
| 461 |
+
gap_frame_count = gap_end - gap_start
|
| 462 |
+
missing_values = list(range(left_value - 1, right_value, -1))
|
| 463 |
+
|
| 464 |
+
logger.info(
|
| 465 |
+
"Backfilling gap: frames %d-%d, clock values %d to %d, missing values: %s",
|
| 466 |
+
gap_start,
|
| 467 |
+
gap_end - 1,
|
| 468 |
+
left_value,
|
| 469 |
+
right_value,
|
| 470 |
+
missing_values,
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
for j in range(gap_frame_count):
|
| 474 |
+
# Calculate which missing value this frame should have
|
| 475 |
+
position = j / gap_frame_count
|
| 476 |
+
value_index = min(int(position * len(missing_values)), len(missing_values) - 1)
|
| 477 |
+
backfill_value = missing_values[value_index]
|
| 478 |
+
|
| 479 |
+
result[gap_start + j] = PlayClockReading(
|
| 480 |
+
detected=True,
|
| 481 |
+
value=backfill_value,
|
| 482 |
+
confidence=0.0,
|
| 483 |
+
raw_text=f"BACKFILLED_{backfill_value}",
|
| 484 |
+
)
|
| 485 |
+
logger.debug(" Backfilled index %d with value %d", gap_start + j, backfill_value)
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
def backfill_missing_readings(readings: List[PlayClockReading], max_gap_seconds: int = 3) -> List[PlayClockReading]:
|
| 489 |
"""
|
| 490 |
Backfill missing play clock readings when there's a clear countdown sequence with gaps.
|
| 491 |
|
|
|
|
| 512 |
if not readings:
|
| 513 |
return readings
|
| 514 |
|
|
|
|
| 515 |
result = list(readings)
|
|
|
|
|
|
|
| 516 |
i = 0
|
| 517 |
+
|
| 518 |
while i < len(result):
|
| 519 |
# Skip if this reading is valid
|
| 520 |
if result[i].detected and result[i].value is not None:
|
| 521 |
i += 1
|
| 522 |
continue
|
| 523 |
|
| 524 |
+
# Found a missing reading - find the extent of the gap
|
| 525 |
gap_start = i
|
| 526 |
+
gap_end = _find_gap_extent(result, i)
|
|
|
|
|
|
|
|
|
|
| 527 |
gap_frame_count = gap_end - gap_start
|
| 528 |
|
| 529 |
+
# Validate that we can backfill this gap
|
| 530 |
+
is_valid, left_value, right_value, seconds_gap = _validate_gap_boundaries(result, gap_start, gap_end, max_gap_seconds)
|
| 531 |
+
if not is_valid:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 532 |
i = gap_end
|
| 533 |
continue
|
| 534 |
|
| 535 |
+
# Perform the backfill
|
| 536 |
if seconds_gap <= 0:
|
| 537 |
+
_backfill_interpolate(result, gap_start, gap_frame_count, left_value, right_value)
|
| 538 |
+
else:
|
| 539 |
+
_backfill_with_missing_values(result, gap_start, gap_end, left_value, right_value)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 540 |
|
| 541 |
i = gap_end
|
| 542 |
|
src/setup/coverage.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Shared coverage calculation utilities for digit templates.
|
| 3 |
+
|
| 4 |
+
This module provides utilities for calculating template coverage status,
|
| 5 |
+
shared between DigitTemplateBuilder and DigitTemplateLibrary.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import Any, Dict, Iterable, Set, Tuple
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Standard digit sets for play clock displays
|
| 12 |
+
ONES_DIGITS = set(range(10)) # 0-9
|
| 13 |
+
TENS_DIGITS = {1, 2, 3, 4} # 10, 20, 30, 40
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def categorize_template_keys(keys: Iterable[Tuple[bool, int, str]]) -> Tuple[Set[int], Set[int], Set[int], bool]:
|
| 17 |
+
"""
|
| 18 |
+
Categorize template keys into digit sets.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
keys: Iterable of (is_tens, digit, position) tuples
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
Tuple of (ones_center_have, ones_right_have, tens_have, has_blank)
|
| 25 |
+
"""
|
| 26 |
+
ones_center_have: Set[int] = set()
|
| 27 |
+
ones_right_have: Set[int] = set()
|
| 28 |
+
tens_have: Set[int] = set()
|
| 29 |
+
has_blank = False
|
| 30 |
+
|
| 31 |
+
for is_tens, digit, position in keys:
|
| 32 |
+
if is_tens:
|
| 33 |
+
if digit == -1:
|
| 34 |
+
has_blank = True
|
| 35 |
+
else:
|
| 36 |
+
tens_have.add(digit)
|
| 37 |
+
else:
|
| 38 |
+
if position == "center":
|
| 39 |
+
ones_center_have.add(digit)
|
| 40 |
+
elif position == "right":
|
| 41 |
+
ones_right_have.add(digit)
|
| 42 |
+
|
| 43 |
+
return ones_center_have, ones_right_have, tens_have, has_blank
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def calculate_coverage_status(
|
| 47 |
+
ones_center_have: Set[int],
|
| 48 |
+
ones_right_have: Set[int],
|
| 49 |
+
tens_have: Set[int],
|
| 50 |
+
has_blank: bool,
|
| 51 |
+
total_items: int = 0,
|
| 52 |
+
) -> Dict[str, Any]:
|
| 53 |
+
"""
|
| 54 |
+
Calculate coverage status from digit sets.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
ones_center_have: Set of ones digits that have center templates
|
| 58 |
+
ones_right_have: Set of ones digits that have right templates
|
| 59 |
+
tens_have: Set of tens digits that have templates
|
| 60 |
+
has_blank: Whether blank template exists
|
| 61 |
+
total_items: Total number of items (templates or samples)
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
Dictionary with coverage information
|
| 65 |
+
"""
|
| 66 |
+
ones_center_missing = ONES_DIGITS - ones_center_have
|
| 67 |
+
ones_right_missing = ONES_DIGITS - ones_right_have
|
| 68 |
+
tens_missing = TENS_DIGITS - tens_have
|
| 69 |
+
|
| 70 |
+
# Total needed: 10 ones_center + 10 ones_right + 4 tens + 1 blank = 25
|
| 71 |
+
total_needed = 25
|
| 72 |
+
|
| 73 |
+
return {
|
| 74 |
+
"total_needed": total_needed,
|
| 75 |
+
"total_have": total_items,
|
| 76 |
+
"is_complete": total_items >= total_needed,
|
| 77 |
+
"ones_center_have": sorted(ones_center_have),
|
| 78 |
+
"ones_center_missing": sorted(ones_center_missing),
|
| 79 |
+
"ones_right_have": sorted(ones_right_have),
|
| 80 |
+
"ones_right_missing": sorted(ones_right_missing),
|
| 81 |
+
"tens_have": sorted(tens_have),
|
| 82 |
+
"tens_missing": sorted(tens_missing),
|
| 83 |
+
"has_blank": has_blank,
|
| 84 |
+
}
|
src/setup/playclock_region.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
# pylint: disable=duplicate-code
|
| 2 |
"""
|
| 3 |
Play clock region extraction and OCR preprocessing for template building.
|
| 4 |
|
|
@@ -9,8 +8,7 @@ This module provides:
|
|
| 9 |
The region extraction logic determines WHERE to look in the frame,
|
| 10 |
while the OCR preprocessing prepares images for EasyOCR labeling.
|
| 11 |
|
| 12 |
-
|
| 13 |
-
to avoid circular imports. Both modules need these utilities independently.
|
| 14 |
"""
|
| 15 |
|
| 16 |
import json
|
|
@@ -21,47 +19,12 @@ from typing import Optional, Tuple
|
|
| 21 |
import cv2
|
| 22 |
import numpy as np
|
| 23 |
|
|
|
|
| 24 |
from .models import PlayClockRegionConfig
|
| 25 |
|
| 26 |
logger = logging.getLogger(__name__)
|
| 27 |
|
| 28 |
|
| 29 |
-
# =============================================================================
|
| 30 |
-
# Color Detection Utilities (duplicated to avoid circular imports)
|
| 31 |
-
# =============================================================================
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
def detect_red_digits(region: np.ndarray) -> bool:
|
| 35 |
-
"""
|
| 36 |
-
Detect if the play clock digits are red.
|
| 37 |
-
|
| 38 |
-
Red digits appear when the play clock has 5 seconds or less remaining.
|
| 39 |
-
Red digits have high red channel values with very low green and blue.
|
| 40 |
-
|
| 41 |
-
Args:
|
| 42 |
-
region: Play clock region (BGR format)
|
| 43 |
-
|
| 44 |
-
Returns:
|
| 45 |
-
True if red digits detected, False otherwise
|
| 46 |
-
"""
|
| 47 |
-
# Split into BGR channels
|
| 48 |
-
b, g, r = cv2.split(region)
|
| 49 |
-
|
| 50 |
-
# Calculate mean values for each channel
|
| 51 |
-
r_mean = np.mean(r)
|
| 52 |
-
g_mean = np.mean(g)
|
| 53 |
-
b_mean = np.mean(b)
|
| 54 |
-
|
| 55 |
-
# Red digits: high red channel, very low green/blue, red > 2x green
|
| 56 |
-
# pylint: disable=chained-comparison
|
| 57 |
-
is_red = r_mean > 15 and max(g_mean, b_mean) < 15 and r_mean > (g_mean * 2)
|
| 58 |
-
|
| 59 |
-
if is_red:
|
| 60 |
-
logger.debug("Red digits detected: R=%.1f, G=%.1f, B=%.1f", r_mean, g_mean, b_mean)
|
| 61 |
-
|
| 62 |
-
return is_red
|
| 63 |
-
|
| 64 |
-
|
| 65 |
# =============================================================================
|
| 66 |
# Play Clock Region Extractor
|
| 67 |
# =============================================================================
|
|
|
|
|
|
|
| 1 |
"""
|
| 2 |
Play clock region extraction and OCR preprocessing for template building.
|
| 3 |
|
|
|
|
| 8 |
The region extraction logic determines WHERE to look in the frame,
|
| 9 |
while the OCR preprocessing prepares images for EasyOCR labeling.
|
| 10 |
|
| 11 |
+
Color detection utilities are shared from utils.color to eliminate code duplication.
|
|
|
|
| 12 |
"""
|
| 13 |
|
| 14 |
import json
|
|
|
|
| 19 |
import cv2
|
| 20 |
import numpy as np
|
| 21 |
|
| 22 |
+
from utils import detect_red_digits
|
| 23 |
from .models import PlayClockRegionConfig
|
| 24 |
|
| 25 |
logger = logging.getLogger(__name__)
|
| 26 |
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
# =============================================================================
|
| 29 |
# Play Clock Region Extractor
|
| 30 |
# =============================================================================
|
src/setup/template_builder.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
# pylint: disable=duplicate-code
|
| 2 |
"""
|
| 3 |
Digit template builder for creating play clock digit templates from OCR samples.
|
| 4 |
|
|
@@ -6,8 +5,7 @@ This module provides the DigitTemplateBuilder class which collects samples from
|
|
| 6 |
the play clock region, extracts individual digits, and builds averaged templates
|
| 7 |
for each unique digit value.
|
| 8 |
|
| 9 |
-
|
| 10 |
-
to avoid circular imports. Both modules need these utilities independently.
|
| 11 |
"""
|
| 12 |
|
| 13 |
import logging
|
|
@@ -16,69 +14,20 @@ from typing import Dict, List, Optional, Tuple
|
|
| 16 |
import cv2
|
| 17 |
import numpy as np
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
from .models import DigitSample, DigitTemplate
|
| 20 |
from .template_library import DigitTemplateLibrary
|
| 21 |
|
| 22 |
logger = logging.getLogger(__name__)
|
| 23 |
|
| 24 |
|
| 25 |
-
def detect_red_digits(region: np.ndarray) -> bool:
|
| 26 |
-
"""
|
| 27 |
-
Detect if the play clock digits are red.
|
| 28 |
-
|
| 29 |
-
Red digits appear when the play clock has 5 seconds or less remaining.
|
| 30 |
-
Red digits have high red channel values with very low green and blue.
|
| 31 |
-
|
| 32 |
-
Args:
|
| 33 |
-
region: Play clock region (BGR format)
|
| 34 |
-
|
| 35 |
-
Returns:
|
| 36 |
-
True if red digits detected, False otherwise
|
| 37 |
-
"""
|
| 38 |
-
# Split into BGR channels
|
| 39 |
-
b, g, r = cv2.split(region)
|
| 40 |
-
|
| 41 |
-
# Calculate mean values for each channel
|
| 42 |
-
r_mean = np.mean(r)
|
| 43 |
-
g_mean = np.mean(g)
|
| 44 |
-
b_mean = np.mean(b)
|
| 45 |
-
|
| 46 |
-
# Red digits: high red channel, very low green/blue, red > 2x green
|
| 47 |
-
# pylint: disable=chained-comparison
|
| 48 |
-
is_red = r_mean > 15 and max(g_mean, b_mean) < 15 and r_mean > (g_mean * 2)
|
| 49 |
-
|
| 50 |
-
if is_red:
|
| 51 |
-
logger.debug("Red digits detected: R=%.1f, G=%.1f, B=%.1f", r_mean, g_mean, b_mean)
|
| 52 |
-
|
| 53 |
-
return is_red
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
def normalize_to_grayscale(region: np.ndarray) -> np.ndarray:
|
| 57 |
-
"""
|
| 58 |
-
Normalize a play clock region to grayscale, handling both red and white digits.
|
| 59 |
-
|
| 60 |
-
Red digits (displayed when clock <= 5) are converted to white-like grayscale
|
| 61 |
-
by extracting the red channel. White digits use standard grayscale conversion.
|
| 62 |
-
This allows a single set of templates to match both color variants.
|
| 63 |
-
|
| 64 |
-
Args:
|
| 65 |
-
region: Play clock region (BGR format)
|
| 66 |
-
|
| 67 |
-
Returns:
|
| 68 |
-
Grayscale image where digits appear as bright pixels on dark background
|
| 69 |
-
"""
|
| 70 |
-
is_red = detect_red_digits(region)
|
| 71 |
-
|
| 72 |
-
if is_red:
|
| 73 |
-
# For red digits, use the red channel directly as grayscale
|
| 74 |
-
# This converts red digits to white-like appearance
|
| 75 |
-
_, _, r = cv2.split(region)
|
| 76 |
-
return r
|
| 77 |
-
|
| 78 |
-
# Standard grayscale conversion for white digits
|
| 79 |
-
return cv2.cvtColor(region, cv2.COLOR_BGR2GRAY)
|
| 80 |
-
|
| 81 |
-
|
| 82 |
class DigitTemplateBuilder:
|
| 83 |
"""
|
| 84 |
Builds digit templates from OCR-labeled play clock samples.
|
|
@@ -116,7 +65,7 @@ class DigitTemplateBuilder:
|
|
| 116 |
"""
|
| 117 |
Preprocess play clock region for template extraction.
|
| 118 |
|
| 119 |
-
|
| 120 |
|
| 121 |
Args:
|
| 122 |
region: Play clock region (BGR format)
|
|
@@ -124,97 +73,7 @@ class DigitTemplateBuilder:
|
|
| 124 |
Returns:
|
| 125 |
Preprocessed binary image (white digits on black background)
|
| 126 |
"""
|
| 127 |
-
|
| 128 |
-
gray = normalize_to_grayscale(region)
|
| 129 |
-
|
| 130 |
-
# Scale up for better template quality
|
| 131 |
-
scale_factor = 4
|
| 132 |
-
scaled = cv2.resize(gray, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR)
|
| 133 |
-
|
| 134 |
-
# Use Otsu's thresholding
|
| 135 |
-
_, binary = cv2.threshold(scaled, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
| 136 |
-
|
| 137 |
-
# Ensure white digits on black background (digits should be bright)
|
| 138 |
-
mean_intensity = np.mean(binary)
|
| 139 |
-
if mean_intensity > 128:
|
| 140 |
-
binary = cv2.bitwise_not(binary)
|
| 141 |
-
|
| 142 |
-
return binary
|
| 143 |
-
|
| 144 |
-
def extract_left_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 145 |
-
"""
|
| 146 |
-
Extract the left (tens digit) region from preprocessed play clock.
|
| 147 |
-
|
| 148 |
-
Used for tens digits in double-digit displays (10-40).
|
| 149 |
-
|
| 150 |
-
Args:
|
| 151 |
-
preprocessed: Preprocessed play clock image (scaled 4x)
|
| 152 |
-
|
| 153 |
-
Returns:
|
| 154 |
-
Left region image for tens digit
|
| 155 |
-
"""
|
| 156 |
-
_, w = preprocessed.shape[:2]
|
| 157 |
-
mid_x = w // 2
|
| 158 |
-
return preprocessed[:, : mid_x - 2] # Small gap in middle
|
| 159 |
-
|
| 160 |
-
def extract_far_left_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 161 |
-
"""
|
| 162 |
-
Extract the far left region from preprocessed play clock.
|
| 163 |
-
|
| 164 |
-
Used for blank detection in single-digit displays (0-9). This narrow
|
| 165 |
-
region (0%-20% of width) doesn't overlap with a centered digit,
|
| 166 |
-
so it should be truly empty when the clock shows a single digit.
|
| 167 |
-
|
| 168 |
-
Args:
|
| 169 |
-
preprocessed: Preprocessed play clock image (scaled 4x)
|
| 170 |
-
|
| 171 |
-
Returns:
|
| 172 |
-
Far left region image (should be mostly black for single digits)
|
| 173 |
-
"""
|
| 174 |
-
_, w = preprocessed.shape[:2]
|
| 175 |
-
# Far left: 0% to 20% of width - avoids overlap with centered digit
|
| 176 |
-
far_left_end = int(w * 0.20)
|
| 177 |
-
return preprocessed[:, :far_left_end]
|
| 178 |
-
|
| 179 |
-
def extract_right_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 180 |
-
"""
|
| 181 |
-
Extract the right (ones digit) region from preprocessed play clock.
|
| 182 |
-
|
| 183 |
-
Used for ones digits in double-digit displays (10-40).
|
| 184 |
-
|
| 185 |
-
Args:
|
| 186 |
-
preprocessed: Preprocessed play clock image (scaled 4x)
|
| 187 |
-
|
| 188 |
-
Returns:
|
| 189 |
-
Right region image for ones digit
|
| 190 |
-
"""
|
| 191 |
-
_, w = preprocessed.shape[:2]
|
| 192 |
-
mid_x = w // 2
|
| 193 |
-
return preprocessed[:, mid_x + 2 :]
|
| 194 |
-
|
| 195 |
-
def extract_center_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 196 |
-
"""
|
| 197 |
-
Extract the center region from preprocessed play clock.
|
| 198 |
-
|
| 199 |
-
Used for ones digits in single-digit displays (0-9) where the
|
| 200 |
-
digit is center-aligned rather than right-aligned.
|
| 201 |
-
|
| 202 |
-
The center region spans approximately 60% of the width, centered.
|
| 203 |
-
|
| 204 |
-
Args:
|
| 205 |
-
preprocessed: Preprocessed play clock image (scaled 4x)
|
| 206 |
-
|
| 207 |
-
Returns:
|
| 208 |
-
Center region image for centered single digit
|
| 209 |
-
"""
|
| 210 |
-
_, w = preprocessed.shape[:2]
|
| 211 |
-
|
| 212 |
-
# Center region: ~20% padding on each side, capturing middle 60%
|
| 213 |
-
# This encompasses where a centered single digit would appear
|
| 214 |
-
center_start = int(w * 0.20)
|
| 215 |
-
center_end = int(w * 0.80)
|
| 216 |
-
|
| 217 |
-
return preprocessed[:, center_start:center_end]
|
| 218 |
|
| 219 |
def extract_digits(self, preprocessed: np.ndarray, clock_value: int) -> Tuple[Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray]]:
|
| 220 |
"""
|
|
@@ -234,9 +93,9 @@ class DigitTemplateBuilder:
|
|
| 234 |
"""
|
| 235 |
if clock_value >= 10:
|
| 236 |
# Double-digit: standard left/right split
|
| 237 |
-
return
|
| 238 |
# Single-digit: far-left is blank (truly empty), ones is centered
|
| 239 |
-
return None, None,
|
| 240 |
|
| 241 |
def add_sample(self, region: np.ndarray, clock_value: int, timestamp: float, confidence: float = 1.0) -> None:
|
| 242 |
"""
|
|
@@ -439,32 +298,20 @@ class DigitTemplateBuilder:
|
|
| 439 |
|
| 440 |
def get_coverage_status(self) -> Dict[str, any]:
|
| 441 |
"""Get current sample coverage status."""
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
tens_have = set()
|
| 445 |
-
has_blank = False
|
| 446 |
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
if is_tens:
|
| 450 |
-
if digit == -1:
|
| 451 |
-
has_blank = True
|
| 452 |
-
else:
|
| 453 |
-
tens_have.add(digit)
|
| 454 |
-
else:
|
| 455 |
-
if position == "center":
|
| 456 |
-
ones_center_have.add(digit)
|
| 457 |
-
elif position == "right":
|
| 458 |
-
ones_right_have.add(digit)
|
| 459 |
|
| 460 |
return {
|
| 461 |
"ones_center": sorted(ones_center_have),
|
| 462 |
"ones_right": sorted(ones_right_have),
|
| 463 |
"tens": sorted(tens_have),
|
| 464 |
"has_blank": has_blank,
|
| 465 |
-
"ones_center_missing": sorted(
|
| 466 |
-
"ones_right_missing": sorted(
|
| 467 |
-
"tens_missing": sorted(
|
| 468 |
}
|
| 469 |
|
| 470 |
def get_coverage_estimate(self) -> float:
|
|
|
|
|
|
|
| 1 |
"""
|
| 2 |
Digit template builder for creating play clock digit templates from OCR samples.
|
| 3 |
|
|
|
|
| 5 |
the play clock region, extracts individual digits, and builds averaged templates
|
| 6 |
for each unique digit value.
|
| 7 |
|
| 8 |
+
Region extraction and preprocessing utilities are shared from utils to eliminate code duplication.
|
|
|
|
| 9 |
"""
|
| 10 |
|
| 11 |
import logging
|
|
|
|
| 14 |
import cv2
|
| 15 |
import numpy as np
|
| 16 |
|
| 17 |
+
from utils import (
|
| 18 |
+
extract_center_region,
|
| 19 |
+
extract_far_left_region,
|
| 20 |
+
extract_left_region,
|
| 21 |
+
extract_right_region,
|
| 22 |
+
preprocess_playclock_region,
|
| 23 |
+
)
|
| 24 |
+
from .coverage import ONES_DIGITS, categorize_template_keys
|
| 25 |
from .models import DigitSample, DigitTemplate
|
| 26 |
from .template_library import DigitTemplateLibrary
|
| 27 |
|
| 28 |
logger = logging.getLogger(__name__)
|
| 29 |
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
class DigitTemplateBuilder:
|
| 32 |
"""
|
| 33 |
Builds digit templates from OCR-labeled play clock samples.
|
|
|
|
| 65 |
"""
|
| 66 |
Preprocess play clock region for template extraction.
|
| 67 |
|
| 68 |
+
Delegates to shared utility function in utils.regions.
|
| 69 |
|
| 70 |
Args:
|
| 71 |
region: Play clock region (BGR format)
|
|
|
|
| 73 |
Returns:
|
| 74 |
Preprocessed binary image (white digits on black background)
|
| 75 |
"""
|
| 76 |
+
return preprocess_playclock_region(region, scale_factor=4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
def extract_digits(self, preprocessed: np.ndarray, clock_value: int) -> Tuple[Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray], Optional[np.ndarray]]:
|
| 79 |
"""
|
|
|
|
| 93 |
"""
|
| 94 |
if clock_value >= 10:
|
| 95 |
# Double-digit: standard left/right split
|
| 96 |
+
return extract_left_region(preprocessed), extract_right_region(preprocessed), None, None
|
| 97 |
# Single-digit: far-left is blank (truly empty), ones is centered
|
| 98 |
+
return None, None, extract_center_region(preprocessed), extract_far_left_region(preprocessed)
|
| 99 |
|
| 100 |
def add_sample(self, region: np.ndarray, clock_value: int, timestamp: float, confidence: float = 1.0) -> None:
|
| 101 |
"""
|
|
|
|
| 298 |
|
| 299 |
def get_coverage_status(self) -> Dict[str, any]:
|
| 300 |
"""Get current sample coverage status."""
|
| 301 |
+
# Get keys for samples that have at least one entry
|
| 302 |
+
keys_with_samples = [key for key, samples in self.samples.items() if len(samples) >= 1]
|
|
|
|
|
|
|
| 303 |
|
| 304 |
+
# Use shared utility to categorize
|
| 305 |
+
ones_center_have, ones_right_have, tens_have, has_blank = categorize_template_keys(keys_with_samples)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
|
| 307 |
return {
|
| 308 |
"ones_center": sorted(ones_center_have),
|
| 309 |
"ones_right": sorted(ones_right_have),
|
| 310 |
"tens": sorted(tens_have),
|
| 311 |
"has_blank": has_blank,
|
| 312 |
+
"ones_center_missing": sorted(ONES_DIGITS - ones_center_have),
|
| 313 |
+
"ones_right_missing": sorted(ONES_DIGITS - ones_right_have),
|
| 314 |
+
"tens_missing": sorted({1, 2, 3, 4} - tens_have),
|
| 315 |
}
|
| 316 |
|
| 317 |
def get_coverage_estimate(self) -> float:
|
src/setup/template_library.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
# pylint: disable=duplicate-code
|
| 2 |
"""
|
| 3 |
Digit template library for storing and managing play clock digit templates.
|
| 4 |
|
|
@@ -9,10 +8,11 @@ managing digit templates used for play clock reading.
|
|
| 9 |
import json
|
| 10 |
import logging
|
| 11 |
from pathlib import Path
|
| 12 |
-
from typing import Dict, List, Optional, Tuple
|
| 13 |
|
| 14 |
import cv2
|
| 15 |
|
|
|
|
| 16 |
from .models import DigitTemplate
|
| 17 |
|
| 18 |
logger = logging.getLogger(__name__)
|
|
@@ -93,60 +93,30 @@ class DigitTemplateLibrary:
|
|
| 93 |
templates.append(template)
|
| 94 |
return templates
|
| 95 |
|
| 96 |
-
def get_coverage_status(self) -> Dict[str,
|
| 97 |
"""
|
| 98 |
Get the current template coverage status.
|
| 99 |
|
| 100 |
Returns:
|
| 101 |
Dictionary with coverage information
|
| 102 |
"""
|
| 103 |
-
|
| 104 |
-
ones_right_have =
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
for (is_tens, digit, position), _ in self.templates.items():
|
| 109 |
-
if is_tens:
|
| 110 |
-
if digit == -1:
|
| 111 |
-
has_blank = True
|
| 112 |
-
else:
|
| 113 |
-
tens_have.add(digit)
|
| 114 |
-
else:
|
| 115 |
-
if position == "center":
|
| 116 |
-
ones_center_have.add(digit)
|
| 117 |
-
elif position == "right":
|
| 118 |
-
ones_right_have.add(digit)
|
| 119 |
-
|
| 120 |
-
ones_center_missing = set(self.ONES_DIGITS) - ones_center_have
|
| 121 |
-
ones_right_missing = set(self.ONES_DIGITS) - ones_right_have
|
| 122 |
-
tens_missing = set([1, 2, 3, 4]) - tens_have
|
| 123 |
-
|
| 124 |
-
# Calculate totals
|
| 125 |
-
# ones_center (10) + ones_right (10) + tens (4) + blank (1) = 25
|
| 126 |
-
total_needed = 25
|
| 127 |
-
total_have = len(self.templates)
|
| 128 |
|
| 129 |
# Convert -1 to "blank" for display
|
| 130 |
def format_tens(digits):
|
| 131 |
return sorted(["blank" if d == -1 else d for d in digits], key=lambda x: (isinstance(x, str), x))
|
| 132 |
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
"ones_right_missing": sorted(ones_right_missing),
|
| 141 |
-
"tens_have": sorted(tens_have),
|
| 142 |
-
"tens_missing": sorted(tens_missing),
|
| 143 |
-
"has_blank": has_blank,
|
| 144 |
-
# Legacy fields for backward compatibility
|
| 145 |
-
"ones_have": sorted(ones_center_have | ones_right_have),
|
| 146 |
-
"ones_missing": sorted(ones_center_missing & ones_right_missing),
|
| 147 |
-
"tens_have_formatted": format_tens(tens_have | ({-1} if has_blank else set())),
|
| 148 |
-
"tens_missing_formatted": format_tens(tens_missing | (set() if has_blank else {-1})),
|
| 149 |
-
}
|
| 150 |
|
| 151 |
def is_complete(self) -> bool:
|
| 152 |
"""Check if all required templates are present."""
|
|
|
|
|
|
|
| 1 |
"""
|
| 2 |
Digit template library for storing and managing play clock digit templates.
|
| 3 |
|
|
|
|
| 8 |
import json
|
| 9 |
import logging
|
| 10 |
from pathlib import Path
|
| 11 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 12 |
|
| 13 |
import cv2
|
| 14 |
|
| 15 |
+
from .coverage import ONES_DIGITS, calculate_coverage_status, categorize_template_keys
|
| 16 |
from .models import DigitTemplate
|
| 17 |
|
| 18 |
logger = logging.getLogger(__name__)
|
|
|
|
| 93 |
templates.append(template)
|
| 94 |
return templates
|
| 95 |
|
| 96 |
+
def get_coverage_status(self) -> Dict[str, Any]:
|
| 97 |
"""
|
| 98 |
Get the current template coverage status.
|
| 99 |
|
| 100 |
Returns:
|
| 101 |
Dictionary with coverage information
|
| 102 |
"""
|
| 103 |
+
# Use shared utility to categorize template keys
|
| 104 |
+
ones_center_have, ones_right_have, tens_have, has_blank = categorize_template_keys(self.templates.keys())
|
| 105 |
+
|
| 106 |
+
# Get base coverage status from shared utility
|
| 107 |
+
status = calculate_coverage_status(ones_center_have, ones_right_have, tens_have, has_blank, total_items=len(self.templates))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
|
| 109 |
# Convert -1 to "blank" for display
|
| 110 |
def format_tens(digits):
|
| 111 |
return sorted(["blank" if d == -1 else d for d in digits], key=lambda x: (isinstance(x, str), x))
|
| 112 |
|
| 113 |
+
# Add legacy fields for backward compatibility
|
| 114 |
+
status["ones_have"] = sorted(ones_center_have | ones_right_have)
|
| 115 |
+
status["ones_missing"] = sorted((ONES_DIGITS - ones_center_have) & (ONES_DIGITS - ones_right_have))
|
| 116 |
+
status["tens_have_formatted"] = format_tens(tens_have | ({-1} if has_blank else set()))
|
| 117 |
+
status["tens_missing_formatted"] = format_tens((status["tens_missing"]) | (set() if has_blank else {-1}))
|
| 118 |
+
|
| 119 |
+
return status
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
def is_complete(self) -> bool:
|
| 122 |
"""Check if all required templates are present."""
|
src/tracking/play_merger.py
CHANGED
|
@@ -13,10 +13,13 @@ from .models import PlayEvent
|
|
| 13 |
logger = logging.getLogger(__name__)
|
| 14 |
|
| 15 |
|
| 16 |
-
class PlayMerger:
|
| 17 |
"""
|
| 18 |
Merge plays from multiple sources with deduplication and filtering.
|
| 19 |
|
|
|
|
|
|
|
|
|
|
| 20 |
Handles:
|
| 21 |
- Overlapping plays (start_time < last.end_time)
|
| 22 |
- Close plays (start times within proximity_threshold) representing same event
|
|
|
|
| 13 |
logger = logging.getLogger(__name__)
|
| 14 |
|
| 15 |
|
| 16 |
+
class PlayMerger: # pylint: disable=too-few-public-methods
|
| 17 |
"""
|
| 18 |
Merge plays from multiple sources with deduplication and filtering.
|
| 19 |
|
| 20 |
+
This is intentionally a single-purpose utility class with one public method (merge).
|
| 21 |
+
The filtering methods are internal implementation details.
|
| 22 |
+
|
| 23 |
Handles:
|
| 24 |
- Overlapping plays (start_time < last.end_time)
|
| 25 |
- Close plays (start times within proximity_threshold) representing same event
|
src/tracking/play_state.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
# pylint: disable=duplicate-code
|
| 2 |
"""
|
| 3 |
Play state machine module for detecting play start and end times.
|
| 4 |
|
|
@@ -23,7 +22,7 @@ from .models import PlayEvent, PlayState, PlayTrackingState, TrackPlayStateConfi
|
|
| 23 |
logger = logging.getLogger(__name__)
|
| 24 |
|
| 25 |
|
| 26 |
-
class TrackPlayState:
|
| 27 |
"""
|
| 28 |
State machine for detecting play boundaries using play clock behavior.
|
| 29 |
|
|
@@ -315,7 +314,8 @@ class TrackPlayState: # pylint: disable=too-many-instance-attributes,too-many-p
|
|
| 315 |
self._state.current_play_type = "special"
|
| 316 |
self._start_play(timestamp, "clock_reset_special", 40)
|
| 317 |
|
| 318 |
-
|
|
|
|
| 319 |
|
| 320 |
def _check_timeout_change(self, timeout_info: Optional[TimeoutInfo] = None) -> Optional[str]:
|
| 321 |
"""
|
|
@@ -341,17 +341,41 @@ class TrackPlayState: # pylint: disable=too-many-instance-attributes,too-many-p
|
|
| 341 |
|
| 342 |
return None
|
| 343 |
|
| 344 |
-
# pylint: disable=too-many-return-statements,too-many-branches
|
| 345 |
def _handle_play_in_progress(self, timestamp: float, clock_value: int) -> Optional[PlayEvent]:
|
| 346 |
-
"""Handle clock reading during PLAY_IN_PROGRESS state.
|
|
|
|
|
|
|
|
|
|
| 347 |
if self._state.current_play_start_time is None:
|
| 348 |
return None
|
| 349 |
|
| 350 |
# Check for play duration timeout
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 351 |
play_duration = timestamp - self._state.current_play_start_time
|
| 352 |
if play_duration > self.config.max_play_duration:
|
| 353 |
# Cap the end time at start + max_duration to avoid inflated durations
|
| 354 |
-
# This prevents long gaps (commercials, etc.) from extending play end times
|
| 355 |
capped_end_time = self._state.current_play_start_time + self.config.max_play_duration
|
| 356 |
logger.warning(
|
| 357 |
"Play duration (%.1fs) exceeded max (%.1fs), forcing end at %.1fs (capped from %.1fs)",
|
|
@@ -361,133 +385,129 @@ class TrackPlayState: # pylint: disable=too-many-instance-attributes,too-many-p
|
|
| 361 |
timestamp,
|
| 362 |
)
|
| 363 |
self._state.direct_end_time = capped_end_time
|
| 364 |
-
self._state.countdown_history = []
|
| 365 |
return self._end_play_capped(capped_end_time, clock_value, "max_duration")
|
|
|
|
| 366 |
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
if
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
time_at_40 = timestamp - self._state.first_40_timestamp
|
| 391 |
-
max_time_for_possession_change = 5.0 # 40→25 within 5 seconds indicates possession change
|
| 392 |
-
min_time_at_40 = 0.5 # Must be at 40 briefly to avoid OCR noise (lowered from 1.0 for timing precision)
|
| 393 |
-
|
| 394 |
-
if min_time_at_40 <= time_at_40 <= max_time_for_possession_change and len(self._state.countdown_history) == 0:
|
| 395 |
-
# Possession change detected - play continues (e.g., punt return in progress)
|
| 396 |
-
# DO NOT end the play here - let it continue until proper end detection
|
| 397 |
-
logger.info(
|
| 398 |
-
"Possession change detected at %.1fs: 40 → 25 after %.1fs. Play continues (return may be in progress).",
|
| 399 |
-
timestamp,
|
| 400 |
-
time_at_40,
|
| 401 |
-
)
|
| 402 |
-
# Mark as special play and update clock base for proper end detection
|
| 403 |
-
self._state.current_play_type = "special"
|
| 404 |
-
self._state.current_play_clock_base = 25
|
| 405 |
-
self._state.first_40_timestamp = None # Reset since we're now tracking at 25
|
| 406 |
-
self._state.countdown_history = [] # Reset countdown tracking for fresh detection at 25
|
| 407 |
-
return None # Continue tracking - DO NOT end the play!
|
| 408 |
-
|
| 409 |
-
# ABNORMAL CLOCK DROP DETECTION:
|
| 410 |
-
# If the first reading after seeing 40 drops by more than 5 seconds (e.g., 40 → 25),
|
| 411 |
-
# this could be either:
|
| 412 |
-
# 1. A timeout/injury reset (no play happened) - clock was at 40 briefly
|
| 413 |
-
# 2. A turnover/possession change (play DID happen) - clock was at 40 for several seconds during play
|
| 414 |
-
# We distinguish by checking how long the clock was at 40.
|
| 415 |
-
if len(self._state.countdown_history) == 0:
|
| 416 |
-
# This is the first reading after 40
|
| 417 |
-
clock_drop = 40 - clock_value
|
| 418 |
-
max_normal_drop = 5 # Allow up to 5 second drop on first reading (accounts for timing/OCR variance)
|
| 419 |
-
if clock_drop > max_normal_drop:
|
| 420 |
-
# Check how long clock was at 40 to distinguish turnover from timeout
|
| 421 |
-
time_at_40 = (timestamp - self._state.first_40_timestamp) if self._state.first_40_timestamp else 0
|
| 422 |
-
min_time_for_play = 2.0 # If clock was at 40 for > 2 seconds, a play likely happened
|
| 423 |
-
|
| 424 |
-
if time_at_40 > min_time_for_play:
|
| 425 |
-
# Ball was snapped, play happened - this is likely a turnover/possession change
|
| 426 |
-
# Estimate play ended shortly before we saw the abnormal reading
|
| 427 |
-
play_end_time = timestamp - 1.0
|
| 428 |
-
logger.info(
|
| 429 |
-
"Turnover/possession change detected at %.1fs: 40 → %d after %.1fs at 40. Recording play end at %.1fs.",
|
| 430 |
-
timestamp,
|
| 431 |
-
clock_value,
|
| 432 |
-
time_at_40,
|
| 433 |
-
play_end_time,
|
| 434 |
-
)
|
| 435 |
-
return self._end_play_with_backward_calc(timestamp, clock_value, play_end_time)
|
| 436 |
-
# Brief time at 40, likely timeout/reset, not a real play
|
| 437 |
-
logger.warning(
|
| 438 |
-
"Abnormal clock drop detected at %.1fs: 40 → %d (drop of %d seconds, only %.1fs at 40). "
|
| 439 |
-
"Likely timeout/injury reset, not a real play. Resetting to PRE_SNAP.",
|
| 440 |
-
timestamp,
|
| 441 |
-
clock_value,
|
| 442 |
-
clock_drop,
|
| 443 |
-
time_at_40,
|
| 444 |
-
)
|
| 445 |
-
self._reset_play_tracking()
|
| 446 |
-
self._state.state = PlayState.PRE_SNAP
|
| 447 |
-
return None
|
| 448 |
|
| 449 |
-
|
| 450 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 451 |
self._state.countdown_history.append((timestamp, clock_value))
|
| 452 |
|
| 453 |
# Check if we have enough consecutive descending values
|
| 454 |
-
if len(self._state.countdown_history)
|
| 455 |
-
|
| 456 |
-
recent = self._state.countdown_history[-self.config.required_countdown_ticks :]
|
| 457 |
-
values = [v for _, v in recent]
|
| 458 |
-
|
| 459 |
-
# Check if values are strictly descending (or stable which means same second)
|
| 460 |
-
is_valid_countdown = True
|
| 461 |
-
for i in range(1, len(values)):
|
| 462 |
-
# Allow same value (within same second) or descending
|
| 463 |
-
if values[i] > values[i - 1]:
|
| 464 |
-
is_valid_countdown = False
|
| 465 |
-
break
|
| 466 |
-
|
| 467 |
-
if is_valid_countdown:
|
| 468 |
-
# Use the first reading in our confirmed sequence for backward calculation
|
| 469 |
-
first_timestamp, first_value = recent[0]
|
| 470 |
-
# Use the correct clock base (40 for normal plays, 25 for special teams)
|
| 471 |
-
calculated_end_time = first_timestamp - (self._state.current_play_clock_base - first_value)
|
| 472 |
-
logger.info(
|
| 473 |
-
"Play END confirmed via %d-tick countdown: %.1fs (clock=%d→%d, base=%d, observed %.1fs-%.1fs)",
|
| 474 |
-
self.config.required_countdown_ticks,
|
| 475 |
-
calculated_end_time,
|
| 476 |
-
values[0],
|
| 477 |
-
values[-1],
|
| 478 |
-
self._state.current_play_clock_base,
|
| 479 |
-
recent[0][0],
|
| 480 |
-
recent[-1][0],
|
| 481 |
-
)
|
| 482 |
-
self._state.direct_end_time = timestamp # When we confirmed the countdown
|
| 483 |
-
self._state.countdown_history = [] # Reset for next play
|
| 484 |
-
return self._end_play_with_backward_calc(timestamp, first_value, calculated_end_time)
|
| 485 |
|
| 486 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 487 |
|
| 488 |
-
def _handle_post_play(self, timestamp: float,
|
| 489 |
"""Handle clock reading during POST_PLAY state."""
|
| 490 |
-
# Note: clock_value unused for now, but kept for
|
| 491 |
# Transition back to PRE_SNAP
|
| 492 |
logger.debug("Transitioning from POST_PLAY to PRE_SNAP at %.1fs", timestamp)
|
| 493 |
self._state.state = PlayState.PRE_SNAP
|
|
|
|
|
|
|
| 1 |
"""
|
| 2 |
Play state machine module for detecting play start and end times.
|
| 3 |
|
|
|
|
| 22 |
logger = logging.getLogger(__name__)
|
| 23 |
|
| 24 |
|
| 25 |
+
class TrackPlayState:
|
| 26 |
"""
|
| 27 |
State machine for detecting play boundaries using play clock behavior.
|
| 28 |
|
|
|
|
| 314 |
self._state.current_play_type = "special"
|
| 315 |
self._start_play(timestamp, "clock_reset_special", 40)
|
| 316 |
|
| 317 |
+
# Play tracking is started, not completed - no PlayEvent to return
|
| 318 |
+
# (implicitly returns None)
|
| 319 |
|
| 320 |
def _check_timeout_change(self, timeout_info: Optional[TimeoutInfo] = None) -> Optional[str]:
|
| 321 |
"""
|
|
|
|
| 341 |
|
| 342 |
return None
|
| 343 |
|
|
|
|
| 344 |
def _handle_play_in_progress(self, timestamp: float, clock_value: int) -> Optional[PlayEvent]:
|
| 345 |
+
"""Handle clock reading during PLAY_IN_PROGRESS state.
|
| 346 |
+
|
| 347 |
+
Delegates to helper methods for each detection scenario to reduce complexity.
|
| 348 |
+
"""
|
| 349 |
if self._state.current_play_start_time is None:
|
| 350 |
return None
|
| 351 |
|
| 352 |
# Check for play duration timeout
|
| 353 |
+
result = self._check_play_timeout(timestamp, clock_value)
|
| 354 |
+
if result is not None:
|
| 355 |
+
return result
|
| 356 |
+
|
| 357 |
+
# Handle clock still at 40 (waiting for countdown)
|
| 358 |
+
if clock_value == 40:
|
| 359 |
+
self._handle_clock_at_40(timestamp)
|
| 360 |
+
return None
|
| 361 |
+
|
| 362 |
+
# Check for possession change (40→25 transition)
|
| 363 |
+
# Note: This function only updates state, never returns a PlayEvent
|
| 364 |
+
self._check_possession_change(timestamp, clock_value)
|
| 365 |
+
|
| 366 |
+
# Check for abnormal clock drop (first reading after 40)
|
| 367 |
+
result = self._check_abnormal_clock_drop(timestamp, clock_value)
|
| 368 |
+
if result is not None:
|
| 369 |
+
return result
|
| 370 |
+
|
| 371 |
+
# Check for countdown confirmation (play end detection)
|
| 372 |
+
return self._check_countdown_confirmation(timestamp, clock_value)
|
| 373 |
+
|
| 374 |
+
def _check_play_timeout(self, timestamp: float, clock_value: int) -> Optional[PlayEvent]:
|
| 375 |
+
"""Check if play duration has exceeded maximum allowed time."""
|
| 376 |
play_duration = timestamp - self._state.current_play_start_time
|
| 377 |
if play_duration > self.config.max_play_duration:
|
| 378 |
# Cap the end time at start + max_duration to avoid inflated durations
|
|
|
|
| 379 |
capped_end_time = self._state.current_play_start_time + self.config.max_play_duration
|
| 380 |
logger.warning(
|
| 381 |
"Play duration (%.1fs) exceeded max (%.1fs), forcing end at %.1fs (capped from %.1fs)",
|
|
|
|
| 385 |
timestamp,
|
| 386 |
)
|
| 387 |
self._state.direct_end_time = capped_end_time
|
| 388 |
+
self._state.countdown_history = []
|
| 389 |
return self._end_play_capped(capped_end_time, clock_value, "max_duration")
|
| 390 |
+
return None
|
| 391 |
|
| 392 |
+
def _handle_clock_at_40(self, timestamp: float) -> None:
|
| 393 |
+
"""Handle case when clock is still at 40 (play just started, waiting for countdown)."""
|
| 394 |
+
if self._state.first_40_timestamp is None:
|
| 395 |
+
self._state.first_40_timestamp = timestamp
|
| 396 |
+
logger.debug(
|
| 397 |
+
"Play in progress at %.1fs, clock still at 40 (%.1fs at 40)",
|
| 398 |
+
timestamp,
|
| 399 |
+
timestamp - self._state.first_40_timestamp,
|
| 400 |
+
)
|
| 401 |
+
self._state.countdown_history = []
|
| 402 |
|
| 403 |
+
def _check_possession_change(self, timestamp: float, clock_value: int) -> None:
|
| 404 |
+
"""Check for rapid 40→25 transition indicating possession change during play.
|
| 405 |
+
|
| 406 |
+
This happens during punts, kickoffs, and after XPs/FGs. The play continues
|
| 407 |
+
(e.g., punt return in progress) - we don't end the play here.
|
| 408 |
+
"""
|
| 409 |
+
if clock_value != 25 or self._state.first_40_timestamp is None:
|
| 410 |
+
return # Not a 40→25 transition
|
| 411 |
+
|
| 412 |
+
time_at_40 = timestamp - self._state.first_40_timestamp
|
| 413 |
+
max_time_for_possession_change = 5.0
|
| 414 |
+
min_time_at_40 = 0.5
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 415 |
|
| 416 |
+
if min_time_at_40 <= time_at_40 <= max_time_for_possession_change and len(self._state.countdown_history) == 0:
|
| 417 |
+
logger.info(
|
| 418 |
+
"Possession change detected at %.1fs: 40 → 25 after %.1fs. Play continues (return may be in progress).",
|
| 419 |
+
timestamp,
|
| 420 |
+
time_at_40,
|
| 421 |
+
)
|
| 422 |
+
# Mark as special play and update clock base for proper end detection
|
| 423 |
+
self._state.current_play_type = "special"
|
| 424 |
+
self._state.current_play_clock_base = 25
|
| 425 |
+
self._state.first_40_timestamp = None
|
| 426 |
+
self._state.countdown_history = []
|
| 427 |
+
|
| 428 |
+
def _check_abnormal_clock_drop(self, timestamp: float, clock_value: int) -> Optional[PlayEvent]:
|
| 429 |
+
"""Check for abnormal clock drop on first reading after 40.
|
| 430 |
+
|
| 431 |
+
Distinguishes between:
|
| 432 |
+
- Turnover/possession change (play happened, clock was at 40 for several seconds)
|
| 433 |
+
- Timeout/injury reset (no play, clock was at 40 briefly)
|
| 434 |
+
"""
|
| 435 |
+
if len(self._state.countdown_history) != 0:
|
| 436 |
+
return None # Not the first reading after 40
|
| 437 |
+
|
| 438 |
+
clock_drop = 40 - clock_value
|
| 439 |
+
max_normal_drop = 5
|
| 440 |
+
if clock_drop <= max_normal_drop:
|
| 441 |
+
return None # Normal drop, not abnormal
|
| 442 |
+
|
| 443 |
+
time_at_40 = (timestamp - self._state.first_40_timestamp) if self._state.first_40_timestamp else 0
|
| 444 |
+
min_time_for_play = 2.0
|
| 445 |
+
|
| 446 |
+
if time_at_40 > min_time_for_play:
|
| 447 |
+
# Ball was snapped, play happened - likely a turnover/possession change
|
| 448 |
+
play_end_time = timestamp - 1.0
|
| 449 |
+
logger.info(
|
| 450 |
+
"Turnover/possession change detected at %.1fs: 40 → %d after %.1fs at 40. Recording play end at %.1fs.",
|
| 451 |
+
timestamp,
|
| 452 |
+
clock_value,
|
| 453 |
+
time_at_40,
|
| 454 |
+
play_end_time,
|
| 455 |
+
)
|
| 456 |
+
return self._end_play_with_backward_calc(timestamp, clock_value, play_end_time)
|
| 457 |
+
|
| 458 |
+
# Brief time at 40, likely timeout/reset, not a real play
|
| 459 |
+
logger.warning(
|
| 460 |
+
"Abnormal clock drop detected at %.1fs: 40 → %d (drop of %d seconds, only %.1fs at 40). Likely timeout/injury reset, not a real play. Resetting to PRE_SNAP.",
|
| 461 |
+
timestamp,
|
| 462 |
+
clock_value,
|
| 463 |
+
clock_drop,
|
| 464 |
+
time_at_40,
|
| 465 |
+
)
|
| 466 |
+
self._reset_play_tracking()
|
| 467 |
+
self._state.state = PlayState.PRE_SNAP
|
| 468 |
+
return None
|
| 469 |
+
|
| 470 |
+
def _check_countdown_confirmation(self, timestamp: float, clock_value: int) -> Optional[PlayEvent]:
|
| 471 |
+
"""Check for countdown confirmation to detect play end.
|
| 472 |
+
|
| 473 |
+
Requires K consecutive descending ticks to confirm play has ended.
|
| 474 |
+
"""
|
| 475 |
+
# Track countdown history
|
| 476 |
self._state.countdown_history.append((timestamp, clock_value))
|
| 477 |
|
| 478 |
# Check if we have enough consecutive descending values
|
| 479 |
+
if len(self._state.countdown_history) < self.config.required_countdown_ticks:
|
| 480 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 481 |
|
| 482 |
+
# Get last K readings
|
| 483 |
+
recent = self._state.countdown_history[-self.config.required_countdown_ticks :]
|
| 484 |
+
values = [v for _, v in recent]
|
| 485 |
+
|
| 486 |
+
# Check if values are strictly descending (or stable which means same second)
|
| 487 |
+
for i in range(1, len(values)):
|
| 488 |
+
if values[i] > values[i - 1]:
|
| 489 |
+
return None # Not a valid countdown
|
| 490 |
+
|
| 491 |
+
# Valid countdown confirmed - calculate play end time
|
| 492 |
+
first_timestamp, first_value = recent[0]
|
| 493 |
+
calculated_end_time = first_timestamp - (self._state.current_play_clock_base - first_value)
|
| 494 |
+
logger.info(
|
| 495 |
+
"Play END confirmed via %d-tick countdown: %.1fs (clock=%d→%d, base=%d, observed %.1fs-%.1fs)",
|
| 496 |
+
self.config.required_countdown_ticks,
|
| 497 |
+
calculated_end_time,
|
| 498 |
+
values[0],
|
| 499 |
+
values[-1],
|
| 500 |
+
self._state.current_play_clock_base,
|
| 501 |
+
recent[0][0],
|
| 502 |
+
recent[-1][0],
|
| 503 |
+
)
|
| 504 |
+
self._state.direct_end_time = timestamp
|
| 505 |
+
self._state.countdown_history = []
|
| 506 |
+
return self._end_play_with_backward_calc(timestamp, first_value, calculated_end_time)
|
| 507 |
|
| 508 |
+
def _handle_post_play(self, timestamp: float, _clock_value: int) -> None:
|
| 509 |
"""Handle clock reading during POST_PLAY state."""
|
| 510 |
+
# Note: clock_value unused for now, but kept for consistent API
|
| 511 |
# Transition back to PRE_SNAP
|
| 512 |
logger.debug("Transitioning from POST_PLAY to PRE_SNAP at %.1fs", timestamp)
|
| 513 |
self._state.state = PlayState.PRE_SNAP
|
src/ui/sessions.py
CHANGED
|
@@ -17,8 +17,16 @@ from .selector import KeyHandler, RegionSelector
|
|
| 17 |
|
| 18 |
|
| 19 |
def _extract_sample_frames_for_selection(video_path: str, start_time: float, num_frames: int = 5, interval: float = 2.0) -> List[Tuple[float, np.ndarray]]:
|
| 20 |
-
"""Extract sample frames
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
return extract_sample_frames(video_path, start_time, num_frames, interval)
|
| 24 |
|
|
|
|
| 17 |
|
| 18 |
|
| 19 |
def _extract_sample_frames_for_selection(video_path: str, start_time: float, num_frames: int = 5, interval: float = 2.0) -> List[Tuple[float, np.ndarray]]:
|
| 20 |
+
"""Extract sample frames for interactive selection.
|
| 21 |
+
|
| 22 |
+
Note: Import is inside function to avoid circular imports.
|
| 23 |
+
The video.frame_extractor module imports from ui, so importing
|
| 24 |
+
it at module level would create a circular dependency.
|
| 25 |
+
"""
|
| 26 |
+
# pylint: disable=import-outside-toplevel
|
| 27 |
+
# Import must be inside function to avoid circular dependency:
|
| 28 |
+
# ui.sessions -> video.frame_extractor -> ui (circular)
|
| 29 |
+
from video.frame_extractor import extract_sample_frames
|
| 30 |
|
| 31 |
return extract_sample_frames(video_path, start_time, num_frames, interval)
|
| 32 |
|
src/utils/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Shared utility modules for the cfb40 project.
|
| 3 |
+
|
| 4 |
+
This package contains utilities that are used across multiple modules
|
| 5 |
+
to avoid code duplication and circular imports.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from .color import detect_red_digits, normalize_to_grayscale
|
| 9 |
+
from .regions import (
|
| 10 |
+
extract_left_region,
|
| 11 |
+
extract_right_region,
|
| 12 |
+
extract_center_region,
|
| 13 |
+
extract_far_left_region,
|
| 14 |
+
preprocess_playclock_region,
|
| 15 |
+
)
|
| 16 |
+
from .frame_result import create_frame_result
|
| 17 |
+
|
| 18 |
+
__all__ = [
|
| 19 |
+
"detect_red_digits",
|
| 20 |
+
"normalize_to_grayscale",
|
| 21 |
+
"extract_left_region",
|
| 22 |
+
"extract_right_region",
|
| 23 |
+
"extract_center_region",
|
| 24 |
+
"extract_far_left_region",
|
| 25 |
+
"preprocess_playclock_region",
|
| 26 |
+
"create_frame_result",
|
| 27 |
+
]
|
src/utils/color.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Color detection and normalization utilities for play clock digit processing.
|
| 3 |
+
|
| 4 |
+
This module provides utilities for detecting red digits (shown when play clock <= 5)
|
| 5 |
+
and normalizing play clock regions to grayscale for consistent template matching.
|
| 6 |
+
|
| 7 |
+
These utilities are shared across:
|
| 8 |
+
- readers/playclock.py (template matching)
|
| 9 |
+
- setup/template_builder.py (template building)
|
| 10 |
+
- setup/playclock_region.py (OCR preprocessing)
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import logging
|
| 14 |
+
|
| 15 |
+
import cv2
|
| 16 |
+
import numpy as np
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def detect_red_digits(region: np.ndarray) -> bool:
|
| 22 |
+
"""
|
| 23 |
+
Detect if the play clock digits are red.
|
| 24 |
+
|
| 25 |
+
Red digits appear when the play clock has 5 seconds or less remaining.
|
| 26 |
+
Red digits have high red channel values with very low green and blue.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
region: Play clock region (BGR format)
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
True if red digits detected, False otherwise
|
| 33 |
+
"""
|
| 34 |
+
# Split into BGR channels
|
| 35 |
+
b, g, r = cv2.split(region)
|
| 36 |
+
|
| 37 |
+
# Calculate mean values for each channel
|
| 38 |
+
r_mean = np.mean(r)
|
| 39 |
+
g_mean = np.mean(g)
|
| 40 |
+
b_mean = np.mean(b)
|
| 41 |
+
|
| 42 |
+
# Red digits: high red channel, very low green/blue, red > 2x green
|
| 43 |
+
max_gb = max(g_mean, b_mean)
|
| 44 |
+
is_red = r_mean > 15 > max_gb and r_mean > g_mean * 2
|
| 45 |
+
|
| 46 |
+
if is_red:
|
| 47 |
+
logger.debug("Red digits detected: R=%.1f, G=%.1f, B=%.1f", r_mean, g_mean, b_mean)
|
| 48 |
+
|
| 49 |
+
return is_red
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def normalize_to_grayscale(region: np.ndarray) -> np.ndarray:
|
| 53 |
+
"""
|
| 54 |
+
Normalize a play clock region to grayscale, handling both red and white digits.
|
| 55 |
+
|
| 56 |
+
Red digits (displayed when clock <= 5) are converted to white-like grayscale
|
| 57 |
+
by extracting the red channel. White digits use standard grayscale conversion.
|
| 58 |
+
This allows a single set of templates to match both color variants.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
region: Play clock region (BGR format)
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
Grayscale image where digits appear as bright pixels on dark background
|
| 65 |
+
"""
|
| 66 |
+
is_red = detect_red_digits(region)
|
| 67 |
+
|
| 68 |
+
if is_red:
|
| 69 |
+
# For red digits, use the red channel directly as grayscale
|
| 70 |
+
# This converts red digits to white-like appearance
|
| 71 |
+
_, _, r = cv2.split(region)
|
| 72 |
+
return r
|
| 73 |
+
|
| 74 |
+
# Standard grayscale conversion for white digits
|
| 75 |
+
return cv2.cvtColor(region, cv2.COLOR_BGR2GRAY)
|
src/utils/frame_result.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Frame result factory for creating standardized detection result dictionaries.
|
| 3 |
+
|
| 4 |
+
This module provides a factory function for creating frame-level detection results
|
| 5 |
+
used in both sequential and parallel processing pipelines.
|
| 6 |
+
|
| 7 |
+
These utilities are shared across:
|
| 8 |
+
- pipeline/parallel.py
|
| 9 |
+
- pipeline/play_detector.py
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from typing import Any, Dict, Optional, Tuple
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def create_frame_result(
|
| 16 |
+
timestamp: float,
|
| 17 |
+
scorebug_detected: bool,
|
| 18 |
+
scorebug_bbox: Optional[Tuple[int, int, int, int]] = None,
|
| 19 |
+
home_timeouts: Optional[int] = None,
|
| 20 |
+
away_timeouts: Optional[int] = None,
|
| 21 |
+
clock_value: Optional[int] = None,
|
| 22 |
+
clock_detected: bool = False,
|
| 23 |
+
) -> Dict[str, Any]:
|
| 24 |
+
"""
|
| 25 |
+
Create a standardized frame detection result dictionary.
|
| 26 |
+
|
| 27 |
+
This factory function ensures consistent structure for frame results
|
| 28 |
+
across both sequential and parallel processing pipelines.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
timestamp: Frame timestamp in seconds.
|
| 32 |
+
scorebug_detected: Whether scorebug was detected in this frame.
|
| 33 |
+
scorebug_bbox: Scorebug bounding box (x, y, w, h) if detected.
|
| 34 |
+
home_timeouts: Number of home team timeouts remaining.
|
| 35 |
+
away_timeouts: Number of away team timeouts remaining.
|
| 36 |
+
clock_value: Detected play clock value (0-40).
|
| 37 |
+
clock_detected: Whether clock value was successfully read.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
Dictionary with frame detection results.
|
| 41 |
+
"""
|
| 42 |
+
return {
|
| 43 |
+
"timestamp": timestamp,
|
| 44 |
+
"scorebug_detected": scorebug_detected,
|
| 45 |
+
"scorebug_bbox": scorebug_bbox,
|
| 46 |
+
"home_timeouts": home_timeouts,
|
| 47 |
+
"away_timeouts": away_timeouts,
|
| 48 |
+
"clock_value": clock_value,
|
| 49 |
+
"clock_detected": clock_detected,
|
| 50 |
+
}
|
src/utils/regions.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Region extraction utilities for play clock digit processing.
|
| 3 |
+
|
| 4 |
+
This module provides utilities for extracting digit regions from preprocessed
|
| 5 |
+
play clock images. The regions are used for both template building and matching.
|
| 6 |
+
|
| 7 |
+
Play clock display layouts:
|
| 8 |
+
- Single-digit (0-9): Digit is CENTER-aligned, tens position is blank
|
| 9 |
+
- Double-digit (10-40): Tens on LEFT, ones on RIGHT
|
| 10 |
+
|
| 11 |
+
These utilities are shared across:
|
| 12 |
+
- readers/playclock.py (template matching)
|
| 13 |
+
- setup/template_builder.py (template building)
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import cv2
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
from .color import normalize_to_grayscale
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def extract_left_region(preprocessed: np.ndarray) -> np.ndarray:
|
| 23 |
+
"""
|
| 24 |
+
Extract left region for tens digit in double-digit displays.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
preprocessed: Preprocessed play clock image (scaled up)
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
Left region image for tens digit
|
| 31 |
+
"""
|
| 32 |
+
_, w = preprocessed.shape[:2]
|
| 33 |
+
mid_x = w // 2
|
| 34 |
+
return preprocessed[:, : mid_x - 2] # Small gap in middle
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def extract_right_region(preprocessed: np.ndarray) -> np.ndarray:
|
| 38 |
+
"""
|
| 39 |
+
Extract right region for ones digit in double-digit displays.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
preprocessed: Preprocessed play clock image (scaled up)
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
Right region image for ones digit
|
| 46 |
+
"""
|
| 47 |
+
_, w = preprocessed.shape[:2]
|
| 48 |
+
mid_x = w // 2
|
| 49 |
+
return preprocessed[:, mid_x + 2 :]
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def extract_center_region(preprocessed: np.ndarray) -> np.ndarray:
|
| 53 |
+
"""
|
| 54 |
+
Extract center region for ones digit in single-digit displays.
|
| 55 |
+
|
| 56 |
+
The center region spans approximately 60% of the width, centered.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
preprocessed: Preprocessed play clock image (scaled up)
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
Center region image for centered single digit
|
| 63 |
+
"""
|
| 64 |
+
_, w = preprocessed.shape[:2]
|
| 65 |
+
center_start = int(w * 0.20)
|
| 66 |
+
center_end = int(w * 0.80)
|
| 67 |
+
return preprocessed[:, center_start:center_end]
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def extract_far_left_region(preprocessed: np.ndarray) -> np.ndarray:
|
| 71 |
+
"""
|
| 72 |
+
Extract far left region for blank detection in single-digit displays.
|
| 73 |
+
|
| 74 |
+
This narrow region (0%-20% of width) doesn't overlap with a centered digit,
|
| 75 |
+
so it should be truly empty when the clock shows a single digit.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
preprocessed: Preprocessed play clock image (scaled up)
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
Far left region image (should be mostly black for single digits)
|
| 82 |
+
"""
|
| 83 |
+
_, w = preprocessed.shape[:2]
|
| 84 |
+
far_left_end = int(w * 0.20)
|
| 85 |
+
return preprocessed[:, :far_left_end]
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def preprocess_playclock_region(region: np.ndarray, scale_factor: int = 4) -> np.ndarray:
|
| 89 |
+
"""
|
| 90 |
+
Preprocess play clock region for template matching or building.
|
| 91 |
+
|
| 92 |
+
Uses color normalization to handle both red and white digits uniformly,
|
| 93 |
+
then scales and binarizes the image.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
region: Play clock region (BGR format)
|
| 97 |
+
scale_factor: Scale factor for upscaling (default: 4)
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
Preprocessed binary image (white digits on black background)
|
| 101 |
+
"""
|
| 102 |
+
# Normalize color (red → white conversion happens here)
|
| 103 |
+
gray = normalize_to_grayscale(region)
|
| 104 |
+
|
| 105 |
+
# Scale up for better template quality/matching
|
| 106 |
+
scaled = cv2.resize(gray, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR)
|
| 107 |
+
|
| 108 |
+
# Use Otsu's thresholding
|
| 109 |
+
_, binary = cv2.threshold(scaled, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
| 110 |
+
|
| 111 |
+
# Ensure white digits on black background (digits should be bright)
|
| 112 |
+
mean_intensity = np.mean(binary)
|
| 113 |
+
if mean_intensity > 128:
|
| 114 |
+
binary = cv2.bitwise_not(binary)
|
| 115 |
+
|
| 116 |
+
return binary
|