Spaces:
Sleeping
Sleeping
Merge pull request #6 from andytaylor-smg/code-quality
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- README.md +258 -204
- main.py +7 -11
- pyproject.toml +14 -0
- scripts/archive/v2/benchmark_ocr.py +5 -7
- scripts/archive/v2/benchmark_ocr_batching.py +10 -9
- scripts/archive/v2/detect_plays.py +3 -5
- scripts/archive/v2/diagnose_play_clock.py +11 -12
- scripts/archive/v2/diagnose_scorebug_threshold.py +1 -2
- scripts/archive/v2/identify_play_clock_region.py +6 -8
- scripts/archive/v2/test_play_clock_reader.py +13 -13
- scripts/archive/v2/test_state_machine.py +5 -10
- scripts/archive/v3/cache_detections.py +7 -10
- scripts/archive/v3/configure_timeout_tracker.py +6 -11
- scripts/archive/v3/debug_red_preprocessing.py +0 -4
- scripts/archive/v3/diagnose_red_play_clock.py +6 -9
- scripts/archive/v3/replay_state_machine.py +4 -6
- scripts/investigate_missed_plays.py +5 -8
- scripts/test_digit_extraction.py +5 -7
- scripts/test_fast_full_video.py +0 -3
- scripts/test_full_segment.py +0 -3
- scripts/test_full_video_evaluation.py +3 -6
- scripts/test_missed_play.py +0 -3
- scripts/test_template_accuracy.py +6 -8
- src/config/__init__.py +1 -1
- src/config/models.py +55 -0
- src/config/session.py +159 -122
- src/detection/__init__.py +26 -0
- src/detection/models.py +51 -0
- src/{detectors/scorebug_detector.py → detection/scorebug.py} +21 -12
- src/{detectors/timeout_tracker.py → detection/timeouts.py} +12 -8
- src/detectors/__init__.py +0 -55
- src/detectors/digit_template_reader.py +0 -1113
- src/detectors/models.py +0 -165
- src/detectors/play_state_machine.py +0 -602
- src/pipeline/__init__.py +8 -6
- src/pipeline/models.py +63 -48
- src/pipeline/orchestrator.py +23 -29
- src/pipeline/parallel.py +124 -121
- src/pipeline/{play_detector.py → play_extractor.py} +164 -705
- src/pipeline/template_builder_pass.py +334 -0
- src/readers/__init__.py +28 -0
- src/readers/models.py +38 -0
- src/readers/playclock.py +543 -0
- src/setup/__init__.py +25 -0
- src/setup/coverage.py +84 -0
- src/setup/models.py +51 -0
- src/{detectors/play_clock_reader.py → setup/playclock_region.py} +45 -172
- src/setup/template_builder.py +340 -0
- src/setup/template_library.py +214 -0
- src/tracking/__init__.py +37 -0
README.md
CHANGED
|
@@ -4,14 +4,20 @@ Automatically detect and extract live plays from college football game videos us
|
|
| 4 |
|
| 5 |
## Project Status
|
| 6 |
|
| 7 |
-
**Status:**
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
### Detection Capabilities
|
| 10 |
- ✅ **Normal plays** - Regular offensive plays (149+ per game)
|
| 11 |
- ✅ **Extra points (XPs)** - 100% detection rate
|
| 12 |
- ✅ **Field goals (FGs)** - 50% detection rate
|
| 13 |
- ✅ **Punts** - 90% detection rate
|
| 14 |
-
- ✅ **Timeout tracking** - Team timeout detection
|
| 15 |
- ⚠️ **Opening kickoffs** - Not yet supported
|
| 16 |
|
| 17 |
## Overview
|
|
@@ -34,196 +40,231 @@ python main.py --video "full_videos/OSU vs Tenn 12.21.24.mkv"
|
|
| 34 |
|
| 35 |
# Testing mode (uses 10-minute segment at 38:40-48:40)
|
| 36 |
python main.py --testing
|
|
|
|
|
|
|
|
|
|
| 37 |
```
|
| 38 |
|
| 39 |
## Project Structure
|
| 40 |
|
| 41 |
```
|
| 42 |
cfb40/
|
| 43 |
-
├── main.py #
|
| 44 |
├── src/
|
| 45 |
-
│ ├──
|
| 46 |
-
│ │ ├── models.py # Pydantic
|
| 47 |
-
│ │ ├──
|
| 48 |
-
│ │ ├── sessions.py # Interactive selection session classes
|
| 49 |
-
│ │ ├── api.py # Public API functions
|
| 50 |
│ │ └── __init__.py
|
| 51 |
-
│ ├──
|
| 52 |
-
│ │ ├──
|
| 53 |
-
│ │ ├──
|
|
|
|
| 54 |
│ │ └── __init__.py
|
| 55 |
-
│ ├──
|
| 56 |
-
│ │ ├──
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
│ │ └── __init__.py
|
| 58 |
-
│ ├──
|
| 59 |
-
│ │ ├──
|
| 60 |
-
│ │ ├──
|
| 61 |
-
│ │ ├──
|
| 62 |
-
│ │ ├──
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
│ │ └── __init__.py
|
| 64 |
-
│ └──
|
| 65 |
-
│ ├──
|
| 66 |
-
│ ├──
|
|
|
|
| 67 |
│ └── __init__.py
|
| 68 |
-
├── scripts/ #
|
| 69 |
-
│ ├──
|
| 70 |
-
│ ├──
|
| 71 |
-
│ ├──
|
| 72 |
-
│
|
| 73 |
-
│ ├── diagnose_red_play_clock.py # Debug red play clock OCR
|
| 74 |
-
│ └── debug_red_preprocessing.py # Debug red digit preprocessing
|
| 75 |
-
├── scripts/archive/ # Archived/legacy scripts
|
| 76 |
-
│ ├── identify_play_clock_region.py # Interactive tool to locate play clock
|
| 77 |
-
│ ├── detect_plays.py # CLI for running detection
|
| 78 |
-
│ ├── visualize_detections.py # Visualize and validate results
|
| 79 |
-
│ ├── test_play_clock_reader.py # Test OCR accuracy
|
| 80 |
-
│ ├── test_state_machine.py # Test state machine logic
|
| 81 |
-
│ ├── diagnose_play_clock.py # Debug play clock detection
|
| 82 |
-
│ ├── diagnose_missed_plays.py # Analyze missed play detection
|
| 83 |
-
│ ├── diagnose_scorebug_threshold.py # Debug scorebug detection
|
| 84 |
-
│ ├── diagnose_confidence_distribution.py # Analyze confidence distribution
|
| 85 |
-
│ ├── benchmark_ffmpeg_concat.py # Benchmark clip concatenation methods
|
| 86 |
-
│ ├── benchmark_ocr.py # Benchmark OCR performance
|
| 87 |
-
│ └── benchmark_ocr_batching.py # Benchmark OCR batching strategies
|
| 88 |
├── data/
|
| 89 |
-
│ ├── config/
|
| 90 |
-
│ │ └── play_clock_region.json # Play clock location config
|
| 91 |
│ └── templates/ # Scorebug template images
|
| 92 |
├── output/ # Detection results and clips
|
| 93 |
│ ├── clips/ # Generated video clips
|
| 94 |
-
│ ├── benchmarks/ # Benchmark results (v1
|
| 95 |
│ ├── cache/ # Detection cache files
|
| 96 |
-
│ └──
|
| 97 |
├── full_videos/ # Source video files (gitignored)
|
| 98 |
├── docs/ # Documentation
|
| 99 |
-
│ ├──
|
| 100 |
-
│ ├──
|
| 101 |
-
│ ├──
|
| 102 |
-
│
|
| 103 |
-
│ └── play_detection_improvements.md
|
| 104 |
-
├── tests/
|
| 105 |
-
│ └── test_data/
|
| 106 |
-
│ └── ground_truth_plays.json # Ground truth for validation
|
| 107 |
├── pyproject.toml
|
| 108 |
├── AGENTS.md # AI assistant guidelines
|
| 109 |
└── README.md
|
| 110 |
```
|
| 111 |
|
| 112 |
-
##
|
| 113 |
|
| 114 |
-
|
| 115 |
-
- Python 3.13+
|
| 116 |
-
- ffmpeg installed (`brew install ffmpeg` on macOS)
|
| 117 |
-
- Virtual environment (`.venv`)
|
| 118 |
|
| 119 |
-
###
|
| 120 |
|
| 121 |
-
|
| 122 |
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
``
|
|
|
|
| 128 |
|
| 129 |
-
|
| 130 |
-
```bash
|
| 131 |
-
pip install -e .
|
| 132 |
-
# Or manually:
|
| 133 |
-
pip install opencv-python numpy pillow easyocr torch
|
| 134 |
-
```
|
| 135 |
|
| 136 |
-
|
| 137 |
|
| 138 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
|
| 140 |
-
|
|
|
|
|
|
|
|
|
|
| 141 |
|
| 142 |
-
``
|
| 143 |
-
# Interactive mode - select video from list
|
| 144 |
-
python main.py
|
| 145 |
|
| 146 |
-
|
| 147 |
-
python main.py --video "full_videos/OSU vs ND 01.20.25.mp4"
|
| 148 |
|
| 149 |
-
|
| 150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
-
|
| 153 |
-
|
|
|
|
|
|
|
| 154 |
|
| 155 |
-
#
|
| 156 |
-
python main.py --testing --skip-clips
|
| 157 |
|
| 158 |
-
|
| 159 |
-
python main.py --testing --all-plays-debug
|
| 160 |
-
```
|
| 161 |
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
6. **Clip extraction** - Generate video clips for each play
|
| 169 |
|
| 170 |
-
|
|
|
|
|
|
|
| 171 |
|
| 172 |
-
|
| 173 |
|
| 174 |
-
|
| 175 |
-
|--------|-------|-----------|----------|
|
| 176 |
-
| `stream_copy` (default) | Fastest | Same as source | Keyframe-aligned |
|
| 177 |
-
| `ultrafast` | Fast | Larger | Frame-accurate |
|
| 178 |
-
| `reencode` | Slowest | Smaller | Frame-accurate |
|
| 179 |
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
``
|
|
|
|
|
|
|
| 184 |
|
| 185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
|
| 187 |
-
|
| 188 |
|
| 189 |
-
|
| 190 |
-
# Analyze special play detection
|
| 191 |
-
python scripts/diagnose_special_plays.py
|
| 192 |
|
| 193 |
-
|
| 194 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
|
| 196 |
-
|
| 197 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
|
| 199 |
-
#
|
| 200 |
-
python scripts/replay_state_machine.py
|
| 201 |
-
```
|
| 202 |
|
| 203 |
-
|
| 204 |
-
- `visualize_detections.py` - Visualize detection results
|
| 205 |
-
- `diagnose_play_clock.py` - Debug play clock reading
|
| 206 |
-
- `benchmark_*.py` - Various benchmarking scripts
|
| 207 |
|
| 208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
|
| 210 |
-
|
| 211 |
|
| 212 |
-
|
| 213 |
-
|--------|---------|
|
| 214 |
-
| `src/ui/` | Interactive user interface components (OpenCV-based region selection) |
|
| 215 |
-
| `src/video/` | Video processing (FFmpeg operations, frame extraction) |
|
| 216 |
-
| `src/config/` | Configuration management (SessionConfig, project constants) |
|
| 217 |
-
| `src/detectors/` | Detection components (scorebug, play clock, timeout tracking) |
|
| 218 |
-
| `src/pipeline/` | Pipeline orchestration (detection flow, result handling) |
|
| 219 |
|
| 220 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
|
| 222 |
-
|
| 223 |
-
-
|
| 224 |
-
-
|
| 225 |
-
-
|
| 226 |
-
- **`PlayStateMachine`** (`detectors/play_state_machine.py`) - Play boundary detection logic
|
| 227 |
|
| 228 |
## Detection Algorithm
|
| 229 |
|
|
@@ -235,20 +276,11 @@ The codebase is organized into modular components under `src/`:
|
|
| 235 |
| **Special** | 40→25 transition | XPs, FGs, punts, injury stoppages |
|
| 236 |
| **Timeout** | Timeout indicator change | Team-called timeouts |
|
| 237 |
|
| 238 |
-
### Play Clock State Machine
|
| 239 |
-
|
| 240 |
-
The system tracks the play clock through these states:
|
| 241 |
-
1. **IDLE** - Waiting for scorebug
|
| 242 |
-
2. **PRE_SNAP** - Clock ticking down before snap
|
| 243 |
-
3. **PLAY_IN_PROGRESS** - Ball snapped, play is live
|
| 244 |
-
4. **POST_PLAY** - Play ended, transitioning
|
| 245 |
-
5. **NO_SCOREBUG** - Scorebug lost (e.g., during replay)
|
| 246 |
-
|
| 247 |
### Backward Counting (Primary Method for Play End)
|
| 248 |
|
| 249 |
When the play clock reappears showing value X (where X < 40):
|
| 250 |
```
|
| 251 |
-
play_end_time = current_time - (
|
| 252 |
```
|
| 253 |
|
| 254 |
Example: If at timestamp 100.0s we see play clock at 35, play ended at:
|
|
@@ -256,15 +288,6 @@ Example: If at timestamp 100.0s we see play clock at 35, play ended at:
|
|
| 256 |
100.0 - (40 - 35) = 95.0s
|
| 257 |
```
|
| 258 |
|
| 259 |
-
This method is reliable even when broadcasts cut to replays.
|
| 260 |
-
|
| 261 |
-
### Special Play Detection (40→25 Clock Reset)
|
| 262 |
-
|
| 263 |
-
After possession-changing plays (XPs, FGs, punts), the clock resets to 25 instead of 40:
|
| 264 |
-
1. Clock shows 40 briefly after the snap
|
| 265 |
-
2. Clock quickly transitions to 25 (possession change)
|
| 266 |
-
3. Play classified as "special" with 10-second max duration
|
| 267 |
-
|
| 268 |
### 3-Class Clock Reset Classification
|
| 269 |
|
| 270 |
When a 40→25 transition is detected:
|
|
@@ -274,13 +297,50 @@ When a 40→25 transition is detected:
|
|
| 274 |
|
| 275 |
### Quiet Time Filter
|
| 276 |
|
| 277 |
-
A 10-second quiet time is enforced after normal plays end. During this window, special/timeout plays are rejected
|
| 278 |
-
- Penalties during plays (false starts, delay of game)
|
| 279 |
-
- Clock resets from ongoing play situations
|
| 280 |
|
| 281 |
### Play Filtering
|
| 282 |
|
| 283 |
-
Plays shorter than 3 seconds are automatically filtered out (typically clock operator errors
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 284 |
|
| 285 |
## Output Files
|
| 286 |
|
|
@@ -291,12 +351,12 @@ For a video named `OSU vs Tenn 12.21.24.mkv`:
|
|
| 291 |
| `output/OSU_vs_Tenn_12_21_24_config.json` | Session configuration |
|
| 292 |
| `output/OSU_vs_Tenn_12_21_24_plays.json` | Detected plays with timestamps |
|
| 293 |
| `output/OSU_vs_Tenn_12_21_24_template.png` | Scorebug template image |
|
| 294 |
-
| `output/
|
|
|
|
| 295 |
| `output/clips/OSU_vs_Tenn_12_21_24_all_plays.mp4` | Concatenated highlight video |
|
| 296 |
|
| 297 |
### Play JSON Format
|
| 298 |
|
| 299 |
-
Each play includes:
|
| 300 |
```json
|
| 301 |
{
|
| 302 |
"play_number": 1,
|
|
@@ -312,75 +372,69 @@ Each play includes:
|
|
| 312 |
|
| 313 |
Play types: `normal`, `special`, `timeout`
|
| 314 |
|
| 315 |
-
##
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 316 |
|
| 317 |
-
|
| 318 |
-
|---------|-------------|--------|---------|---------|------------------------|
|
| 319 |
-
| V1 (Baseline) | ~130 | ~130 | 0 | 0 | 0% |
|
| 320 |
-
| V2 (Split Detection) | 149 | 149 | 0 | 0 | 0% |
|
| 321 |
-
| V3 (Special Plays) | 176 | 150 | 22 | 4 | 85% (17/20) |
|
| 322 |
|
| 323 |
-
|
|
|
|
|
|
|
|
|
|
| 324 |
|
| 325 |
-
##
|
|
|
|
|
|
|
|
|
|
|
|
|
| 326 |
|
| 327 |
-
###
|
| 328 |
-
- **File**: `full_videos/OSU vs Tenn 12.21.24.mkv`
|
| 329 |
-
- **Resolution**: 1920×1080 @ 59.94fps
|
| 330 |
-
- **Final Score**: Ohio State 42, Tennessee 17
|
| 331 |
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
|
|
|
| 335 |
|
| 336 |
-
#
|
| 337 |
-
|
| 338 |
-
|
| 339 |
|
| 340 |
## Development
|
| 341 |
|
| 342 |
### Code Style
|
| 343 |
-
- Use Black formatter with `line-length=180`
|
| 344 |
-
- Add descriptive inline comments
|
| 345 |
-
- Include logging for debugging
|
| 346 |
|
| 347 |
```bash
|
|
|
|
| 348 |
python -m black src/ scripts/ main.py --line-length=180
|
| 349 |
-
```
|
| 350 |
|
| 351 |
-
#
|
| 352 |
-
```bash
|
| 353 |
-
# Lint main modules
|
| 354 |
pylint src/ main.py
|
| 355 |
-
|
| 356 |
-
# Lint active scripts
|
| 357 |
-
pylint scripts/*.py
|
| 358 |
-
|
| 359 |
-
# Lint archived scripts (from archive directory)
|
| 360 |
-
cd scripts/archive && pylint *.py
|
| 361 |
```
|
| 362 |
|
| 363 |
-
###
|
|
|
|
| 364 |
```bash
|
| 365 |
-
#
|
| 366 |
-
python
|
| 367 |
|
| 368 |
-
#
|
| 369 |
-
python
|
| 370 |
```
|
| 371 |
|
| 372 |
## Dependencies
|
| 373 |
|
| 374 |
- `opencv-python>=4.8.0` - Video processing
|
| 375 |
- `numpy>=1.24.0` - Array operations
|
| 376 |
-
- `
|
| 377 |
-
- `easyocr>=1.7.0` - OCR for
|
| 378 |
- `torch>=2.0.0` - Required by EasyOCR
|
| 379 |
|
| 380 |
-
### Development Dependencies
|
| 381 |
-
- `black>=24.0.0` - Code formatting
|
| 382 |
-
- `pylint>=3.0.0` - Linting
|
| 383 |
-
|
| 384 |
## License
|
| 385 |
|
| 386 |
Private project - All rights reserved
|
|
|
|
| 4 |
|
| 5 |
## Project Status
|
| 6 |
|
| 7 |
+
**Status:** V4 - Production-ready with template-based detection
|
| 8 |
+
|
| 9 |
+
### Key Features
|
| 10 |
+
- ✅ **Template-based clock reading** - 34x faster than OCR (~0.3ms vs ~49ms per frame)
|
| 11 |
+
- ✅ **Streaming processing** - Read frame → process immediately (no intermediate storage)
|
| 12 |
+
- ✅ **Parallel processing** - ~26% speedup with 2 workers
|
| 13 |
+
- ✅ **Threaded video I/O** - Background reading overlaps with processing
|
| 14 |
|
| 15 |
### Detection Capabilities
|
| 16 |
- ✅ **Normal plays** - Regular offensive plays (149+ per game)
|
| 17 |
- ✅ **Extra points (XPs)** - 100% detection rate
|
| 18 |
- ✅ **Field goals (FGs)** - 50% detection rate
|
| 19 |
- ✅ **Punts** - 90% detection rate
|
| 20 |
+
- ✅ **Timeout tracking** - Team timeout detection via indicator analysis
|
| 21 |
- ⚠️ **Opening kickoffs** - Not yet supported
|
| 22 |
|
| 23 |
## Overview
|
|
|
|
| 40 |
|
| 41 |
# Testing mode (uses 10-minute segment at 38:40-48:40)
|
| 42 |
python main.py --testing
|
| 43 |
+
|
| 44 |
+
# Parallel processing (faster)
|
| 45 |
+
python main.py --testing --workers 2
|
| 46 |
```
|
| 47 |
|
| 48 |
## Project Structure
|
| 49 |
|
| 50 |
```
|
| 51 |
cfb40/
|
| 52 |
+
├── main.py # Pipeline entry point
|
| 53 |
├── src/
|
| 54 |
+
│ ├── config/ # Configuration management
|
| 55 |
+
│ │ ├── models.py # SessionConfig Pydantic model
|
| 56 |
+
│ │ ├── session.py # Save/load config, project constants
|
|
|
|
|
|
|
| 57 |
│ │ └── __init__.py
|
| 58 |
+
│ ├── detection/ # Per-frame detection components
|
| 59 |
+
│ │ ├── models.py # ScorebugDetection, TimeoutReading models
|
| 60 |
+
│ │ ├── scorebug.py # DetectScoreBug (template matching)
|
| 61 |
+
│ │ ├── timeouts.py # DetectTimeouts (oval brightness analysis)
|
| 62 |
│ │ └── __init__.py
|
| 63 |
+
│ ├── pipeline/ # Video processing orchestration
|
| 64 |
+
│ │ ├── models.py # DetectionConfig, DetectionResult, etc.
|
| 65 |
+
│ │ ├── orchestrator.py # run_extraction(), print_results_summary()
|
| 66 |
+
│ │ ├── parallel.py # Parallel chunk processing
|
| 67 |
+
│ │ ├── play_extractor.py # PlayExtractor main pipeline class
|
| 68 |
+
│ │ ├── template_builder_pass.py # Pass 0: OCR-based template building
|
| 69 |
+
│ │ └── __init__.py
|
| 70 |
+
│ ├── readers/ # Value reading from detected regions
|
| 71 |
+
│ │ ├── models.py # PlayClockReading, TemplateMatchResult
|
| 72 |
+
│ │ ├── playclock.py # ReadPlayClock (template matching)
|
| 73 |
+
│ │ └── __init__.py
|
| 74 |
+
│ ├── setup/ # One-time preparation before processing
|
| 75 |
+
│ │ ├── models.py # DigitSample, DigitTemplate, PlayClockRegionConfig
|
| 76 |
+
│ │ ├── coverage.py # Template coverage calculation utilities
|
| 77 |
+
│ │ ├── playclock_region.py # PlayClockRegionExtractor
|
| 78 |
+
│ │ ├── template_builder.py # DigitTemplateBuilder
|
| 79 |
+
│ │ ├── template_library.py # DigitTemplateLibrary
|
| 80 |
+
│ │ └── __init__.py
|
| 81 |
+
│ ├── tracking/ # Cross-frame state management
|
| 82 |
+
│ │ ├── models.py # PlayEvent, PlayState, PlayTrackingState
|
| 83 |
+
│ │ ├── play_state.py # TrackPlayState (main state machine facade)
|
| 84 |
+
│ │ ├── play_lifecycle.py # PlayLifecycle (start/end/reset plays)
|
| 85 |
+
│ │ ├── play_identification_checks.py # Clock analysis for play boundaries
|
| 86 |
+
│ │ ├── state_handlers.py # State machine transition handlers
|
| 87 |
+
│ │ ├── play_merger.py # PlayMerger (deduplicate/filter plays)
|
| 88 |
+
│ │ ├── clock_reset_identifier.py # Post-hoc 40→25 transition analysis
|
| 89 |
│ │ └── __init__.py
|
| 90 |
+
│ ├── ui/ # Interactive region selection
|
| 91 |
+
│ │ ├── models.py # BBox, SelectionState, SelectionViewConfig
|
| 92 |
+
│ │ ├── selector.py # RegionSelector (mouse callbacks)
|
| 93 |
+
│ │ ├── sessions.py # Selection session classes
|
| 94 |
+
│ │ ├── api.py # Public API functions
|
| 95 |
+
│ │ └── __init__.py
|
| 96 |
+
│ ├── utils/ # Shared utilities
|
| 97 |
+
│ │ ├── color.py # Red digit detection, color normalization
|
| 98 |
+
│ │ ├── frame_result.py # Standardized frame result factory
|
| 99 |
+
│ │ ├── regions.py # Digit region extraction utilities
|
| 100 |
│ │ └── __init__.py
|
| 101 |
+
│ └── video/ # Video I/O and FFmpeg operations
|
| 102 |
+
│ ├── ffmpeg_ops.py # Clip extraction/concatenation
|
| 103 |
+
│ ├── frame_extractor.py # Frame extraction utilities
|
| 104 |
+
│ ├── frame_reader.py # ThreadedFrameReader
|
| 105 |
│ └── __init__.py
|
| 106 |
+
├── scripts/ # Utility scripts
|
| 107 |
+
│ ├── test_full_segment.py # Test on video segment
|
| 108 |
+
│ ├── test_full_video_evaluation.py # Full video evaluation
|
| 109 |
+
│ ├── test_template_accuracy.py # Template matching accuracy test
|
| 110 |
+
│ └── archive/ # Archived scripts (v2, v3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
├── data/
|
| 112 |
+
│ ├── config/ # Region configuration files
|
|
|
|
| 113 |
│ └── templates/ # Scorebug template images
|
| 114 |
├── output/ # Detection results and clips
|
| 115 |
│ ├── clips/ # Generated video clips
|
| 116 |
+
│ ├── benchmarks/ # Benchmark results (v1-v4 baselines)
|
| 117 |
│ ├── cache/ # Detection cache files
|
| 118 |
+
│ └── debug/ # Debug output (digit templates, etc.)
|
| 119 |
├── full_videos/ # Source video files (gitignored)
|
| 120 |
├── docs/ # Documentation
|
| 121 |
+
│ ├── ocr_to_template_migration.md # Template matching migration details
|
| 122 |
+
│ ├── ocr_benchmark.md # OCR vs template performance comparison
|
| 123 |
+
│ ├── video_processing_speedup.md # Streaming/parallel optimization docs
|
| 124 |
+
│ └── ...
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
├── pyproject.toml
|
| 126 |
├── AGENTS.md # AI assistant guidelines
|
| 127 |
└── README.md
|
| 128 |
```
|
| 129 |
|
| 130 |
+
## Module Architecture
|
| 131 |
|
| 132 |
+
The codebase is organized into specialized modules under `src/`:
|
|
|
|
|
|
|
|
|
|
| 133 |
|
| 134 |
+
### `src/config/` - Configuration Management
|
| 135 |
|
| 136 |
+
Session configuration and project constants for the detection pipeline.
|
| 137 |
|
| 138 |
+
| Component | Description |
|
| 139 |
+
|-----------|-------------|
|
| 140 |
+
| `SessionConfig` | Pydantic model with video path, region coordinates, generated paths |
|
| 141 |
+
| `save_session_config()` | Saves config JSON, template image, playclock/timeout configs |
|
| 142 |
+
| `load_session_config()` | Loads session from JSON file |
|
| 143 |
+
| Project constants | `PROJECT_ROOT`, `OUTPUT_DIR`, test segment bounds |
|
| 144 |
|
| 145 |
+
### `src/detection/` - Per-Frame Detection
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
+
Components that process individual video frames to detect visual elements.
|
| 148 |
|
| 149 |
+
| Component | Description |
|
| 150 |
+
|-----------|-------------|
|
| 151 |
+
| `DetectScoreBug` | Template matching with split-detection for partial overlays |
|
| 152 |
+
| `DetectTimeouts` | Reads timeout indicators via oval brightness analysis |
|
| 153 |
+
| `ScorebugDetection` | Detection result model (detected, confidence, bbox) |
|
| 154 |
+
| `TimeoutReading` | Timeout reading result (home/away counts, oval states) |
|
| 155 |
|
| 156 |
+
**Detection Modes:**
|
| 157 |
+
- **Full-frame search**: Template matching across entire frame (initial detection)
|
| 158 |
+
- **Fixed-region check**: Fast verification at known location (subsequent frames)
|
| 159 |
+
- **Split detection**: Left/right half matching for robustness to overlays
|
| 160 |
|
| 161 |
+
### `src/pipeline/` - Video Processing Orchestration
|
|
|
|
|
|
|
| 162 |
|
| 163 |
+
Orchestrates the complete play extraction pipeline with streaming processing.
|
|
|
|
| 164 |
|
| 165 |
+
| Component | Description |
|
| 166 |
+
|-----------|-------------|
|
| 167 |
+
| `PlayExtractor` | Main pipeline class coordinating all extraction components |
|
| 168 |
+
| `run_extraction()` | High-level extraction function with filtering |
|
| 169 |
+
| `TemplateBuildingPass` | Pass 0: Builds digit templates using OCR on scorebug-verified frames |
|
| 170 |
+
| `process_video_parallel()` | Parallel chunk processing for speedup |
|
| 171 |
|
| 172 |
+
**Pipeline Stages:**
|
| 173 |
+
1. **Pass 0** (if needed): Build digit templates using OCR
|
| 174 |
+
2. **Streaming pass**: Read frame → detect scorebug → extract region → template match → state machine
|
| 175 |
+
3. **Finalize**: Post-hoc clock reset identification, play merging, result building
|
| 176 |
|
| 177 |
+
### `src/readers/` - Value Reading
|
|
|
|
| 178 |
|
| 179 |
+
Reads values from detected regions using template matching.
|
|
|
|
|
|
|
| 180 |
|
| 181 |
+
| Component | Description |
|
| 182 |
+
|-----------|-------------|
|
| 183 |
+
| `ReadPlayClock` | Template-based clock reading with dual-mode detection |
|
| 184 |
+
| `backfill_missing_readings()` | Gap interpolation for missing readings |
|
| 185 |
+
| `PlayClockReading` | Reading result (detected, value, confidence) |
|
| 186 |
+
| `TemplatePlayClockReading` | Detailed result with per-digit match info |
|
|
|
|
| 187 |
|
| 188 |
+
**Dual-Mode Detection:**
|
| 189 |
+
- **Single-digit (0-9)**: Digit is CENTER-aligned
|
| 190 |
+
- **Double-digit (10-40)**: Tens on LEFT, ones on RIGHT
|
| 191 |
|
| 192 |
+
### `src/setup/` - One-Time Preparation
|
| 193 |
|
| 194 |
+
Components for building digit templates before video processing.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
|
| 196 |
+
| Component | Description |
|
| 197 |
+
|-----------|-------------|
|
| 198 |
+
| `DigitTemplateBuilder` | Collects OCR samples, extracts digits, builds averaged templates |
|
| 199 |
+
| `DigitTemplateLibrary` | Stores/loads templates (25 total for full coverage) |
|
| 200 |
+
| `PlayClockRegionExtractor` | Extracts play clock region, preprocesses for OCR |
|
| 201 |
+
| `DigitSample` / `DigitTemplate` | Data models for samples and templates |
|
| 202 |
|
| 203 |
+
**Template Coverage (25 templates needed):**
|
| 204 |
+
- 10 ones-center templates (single-digit: 0-9)
|
| 205 |
+
- 10 ones-right templates (double-digit: 0-9)
|
| 206 |
+
- 4 tens-left templates (1, 2, 3, 4)
|
| 207 |
+
- 1 blank template (tens position for single digits)
|
| 208 |
|
| 209 |
+
### `src/tracking/` - Cross-Frame State Management
|
| 210 |
|
| 211 |
+
Tracks state across frames to identify play boundaries.
|
|
|
|
|
|
|
| 212 |
|
| 213 |
+
| Component | Description |
|
| 214 |
+
|-----------|-------------|
|
| 215 |
+
| `TrackPlayState` | Main state machine facade (delegates to helpers) |
|
| 216 |
+
| `PlayLifecycle` | Manages play start, end, and state reset |
|
| 217 |
+
| `PlayIdentificationChecks` | Clock analysis for play boundary detection |
|
| 218 |
+
| `StateHandlers` | State machine transition handlers |
|
| 219 |
+
| `PlayMerger` | Merges plays from multiple sources, deduplicates |
|
| 220 |
+
| `ClockResetIdentifier` | Post-hoc 40→25 transition analysis |
|
| 221 |
|
| 222 |
+
**State Machine States:**
|
| 223 |
+
1. **IDLE** - Waiting for scorebug
|
| 224 |
+
2. **PRE_SNAP** - Clock ticking down before snap
|
| 225 |
+
3. **PLAY_IN_PROGRESS** - Ball snapped, play is live
|
| 226 |
+
4. **POST_PLAY** - Play ended, transitioning
|
| 227 |
+
5. **NO_SCOREBUG** - Scorebug lost (e.g., during replay)
|
| 228 |
|
| 229 |
+
### `src/ui/` - Interactive Region Selection
|
|
|
|
|
|
|
| 230 |
|
| 231 |
+
Tools for interactively selecting regions in video frames.
|
|
|
|
|
|
|
|
|
|
| 232 |
|
| 233 |
+
| Component | Description |
|
| 234 |
+
|-----------|-------------|
|
| 235 |
+
| `RegionSelector` | Mouse callback handler (two-click or drag modes) |
|
| 236 |
+
| `ScorebugSelectionSession` | Full-frame scorebug selection with frame navigation |
|
| 237 |
+
| `PlayClockSelectionSession` | Zoomed scorebug view for precise clock selection |
|
| 238 |
+
| `TimeoutSelectionSession` | Padded view for timeout indicator selection |
|
| 239 |
+
| `BBox` | Bounding box model with scaling/offset utilities |
|
| 240 |
+
|
| 241 |
+
### `src/utils/` - Shared Utilities
|
| 242 |
+
|
| 243 |
+
Utilities shared across modules to avoid code duplication.
|
| 244 |
+
|
| 245 |
+
| Component | Description |
|
| 246 |
+
|-----------|-------------|
|
| 247 |
+
| `detect_red_digits()` | Detects red digits (clock ≤ 5 seconds) |
|
| 248 |
+
| `normalize_to_grayscale()` | Converts red digits to white for uniform templates |
|
| 249 |
+
| `preprocess_playclock_region()` | Full preprocessing pipeline for clock regions |
|
| 250 |
+
| `extract_*_region()` | Digit region extraction (left, right, center, far-left) |
|
| 251 |
+
| `create_frame_result()` | Standardized frame result dictionary factory |
|
| 252 |
|
| 253 |
+
### `src/video/` - Video I/O and FFmpeg Operations
|
| 254 |
|
| 255 |
+
Video processing, frame extraction, and clip generation.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 256 |
|
| 257 |
+
| Component | Description |
|
| 258 |
+
|-----------|-------------|
|
| 259 |
+
| `ThreadedFrameReader` | Background thread for overlapped video I/O |
|
| 260 |
+
| `extract_sample_frames()` | Extract frames at intervals for selection |
|
| 261 |
+
| `generate_clips()` | Generate play clips with configurable method |
|
| 262 |
+
| `concatenate_clips()` | Combine clips into highlight video |
|
| 263 |
|
| 264 |
+
**Clip Methods:**
|
| 265 |
+
- `stream_copy`: Fastest, keyframe-aligned cuts
|
| 266 |
+
- `reencode`: Frame-accurate, best compression
|
| 267 |
+
- `ultrafast`: Frame-accurate, faster encoding
|
|
|
|
| 268 |
|
| 269 |
## Detection Algorithm
|
| 270 |
|
|
|
|
| 276 |
| **Special** | 40→25 transition | XPs, FGs, punts, injury stoppages |
|
| 277 |
| **Timeout** | Timeout indicator change | Team-called timeouts |
|
| 278 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 279 |
### Backward Counting (Primary Method for Play End)
|
| 280 |
|
| 281 |
When the play clock reappears showing value X (where X < 40):
|
| 282 |
```
|
| 283 |
+
play_end_time = current_time - (clock_base - X)
|
| 284 |
```
|
| 285 |
|
| 286 |
Example: If at timestamp 100.0s we see play clock at 35, play ended at:
|
|
|
|
| 288 |
100.0 - (40 - 35) = 95.0s
|
| 289 |
```
|
| 290 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 291 |
### 3-Class Clock Reset Classification
|
| 292 |
|
| 293 |
When a 40→25 transition is detected:
|
|
|
|
| 297 |
|
| 298 |
### Quiet Time Filter
|
| 299 |
|
| 300 |
+
A 10-second quiet time is enforced after normal plays end. During this window, special/timeout plays are rejected to filter false positives from penalties.
|
|
|
|
|
|
|
| 301 |
|
| 302 |
### Play Filtering
|
| 303 |
|
| 304 |
+
Plays shorter than 3 seconds are automatically filtered out (typically clock operator errors).
|
| 305 |
+
|
| 306 |
+
## Usage
|
| 307 |
+
|
| 308 |
+
### Main Pipeline
|
| 309 |
+
|
| 310 |
+
```bash
|
| 311 |
+
# Interactive mode - select video from list
|
| 312 |
+
python main.py
|
| 313 |
+
|
| 314 |
+
# Specify video directly
|
| 315 |
+
python main.py --video "full_videos/OSU vs ND 01.20.25.mp4"
|
| 316 |
+
|
| 317 |
+
# Process specific segment
|
| 318 |
+
python main.py --video "full_videos/game.mp4" --start 30:00 --end 60:00
|
| 319 |
+
|
| 320 |
+
# Testing mode (10-minute segment from test video)
|
| 321 |
+
python main.py --testing
|
| 322 |
+
|
| 323 |
+
# Parallel processing (faster)
|
| 324 |
+
python main.py --testing --workers 2
|
| 325 |
+
|
| 326 |
+
# Skip clip generation (detection only)
|
| 327 |
+
python main.py --testing --skip-clips
|
| 328 |
+
|
| 329 |
+
# Generate individual play clips
|
| 330 |
+
python main.py --testing --all-plays-debug
|
| 331 |
+
|
| 332 |
+
# Use frame-accurate encoding
|
| 333 |
+
python main.py --testing --clip-method reencode
|
| 334 |
+
```
|
| 335 |
+
|
| 336 |
+
### Pipeline Phases
|
| 337 |
+
|
| 338 |
+
1. **Video selection** - Choose video file to process
|
| 339 |
+
2. **Scorebug region selection** - Click to define scorebug location
|
| 340 |
+
3. **Play clock region selection** - Click to define play clock within scorebug
|
| 341 |
+
4. **Timeout region selection** - Click to define timeout indicators for each team
|
| 342 |
+
5. **Play detection** - Template building + streaming extraction
|
| 343 |
+
6. **Clip extraction** - Generate video clips for each play
|
| 344 |
|
| 345 |
## Output Files
|
| 346 |
|
|
|
|
| 351 |
| `output/OSU_vs_Tenn_12_21_24_config.json` | Session configuration |
|
| 352 |
| `output/OSU_vs_Tenn_12_21_24_plays.json` | Detected plays with timestamps |
|
| 353 |
| `output/OSU_vs_Tenn_12_21_24_template.png` | Scorebug template image |
|
| 354 |
+
| `output/OSU_vs_Tenn_12_21_24_playclock_config.json` | Play clock region config |
|
| 355 |
+
| `output/OSU_vs_Tenn_12_21_24_timeout_config.json` | Timeout tracker config |
|
| 356 |
| `output/clips/OSU_vs_Tenn_12_21_24_all_plays.mp4` | Concatenated highlight video |
|
| 357 |
|
| 358 |
### Play JSON Format
|
| 359 |
|
|
|
|
| 360 |
```json
|
| 361 |
{
|
| 362 |
"play_number": 1,
|
|
|
|
| 372 |
|
| 373 |
Play types: `normal`, `special`, `timeout`
|
| 374 |
|
| 375 |
+
## Performance
|
| 376 |
+
|
| 377 |
+
### Template Matching vs OCR
|
| 378 |
+
|
| 379 |
+
| Metric | EasyOCR | Template Matching |
|
| 380 |
+
|--------|---------|-------------------|
|
| 381 |
+
| Time per frame | 48.9ms | 0.3ms |
|
| 382 |
+
| Speedup | 1x | **163x** |
|
| 383 |
|
| 384 |
+
### Processing Modes
|
|
|
|
|
|
|
|
|
|
|
|
|
| 385 |
|
| 386 |
+
| Mode | Description | Use Case |
|
| 387 |
+
|------|-------------|----------|
|
| 388 |
+
| Sequential | Single-threaded streaming | Default, simpler |
|
| 389 |
+
| Parallel (2 workers) | Multi-process chunks | ~26% faster |
|
| 390 |
|
| 391 |
+
## Setup
|
| 392 |
+
|
| 393 |
+
### Prerequisites
|
| 394 |
+
- Python 3.13+
|
| 395 |
+
- ffmpeg installed (`brew install ffmpeg` on macOS)
|
| 396 |
|
| 397 |
+
### Installation
|
|
|
|
|
|
|
|
|
|
| 398 |
|
| 399 |
+
```bash
|
| 400 |
+
# Create and activate virtual environment
|
| 401 |
+
python -m venv .venv
|
| 402 |
+
source .venv/bin/activate
|
| 403 |
|
| 404 |
+
# Install dependencies
|
| 405 |
+
pip install -e .
|
| 406 |
+
```
|
| 407 |
|
| 408 |
## Development
|
| 409 |
|
| 410 |
### Code Style
|
|
|
|
|
|
|
|
|
|
| 411 |
|
| 412 |
```bash
|
| 413 |
+
# Format with Black (line-length=180)
|
| 414 |
python -m black src/ scripts/ main.py --line-length=180
|
|
|
|
| 415 |
|
| 416 |
+
# Lint
|
|
|
|
|
|
|
| 417 |
pylint src/ main.py
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 418 |
```
|
| 419 |
|
| 420 |
+
### Testing
|
| 421 |
+
|
| 422 |
```bash
|
| 423 |
+
# Quick test (3 minutes, ~5 plays)
|
| 424 |
+
python main.py --testing --start 38:40 --end 41:40
|
| 425 |
|
| 426 |
+
# Extended test (10 minutes, ~12 plays)
|
| 427 |
+
python main.py --testing
|
| 428 |
```
|
| 429 |
|
| 430 |
## Dependencies
|
| 431 |
|
| 432 |
- `opencv-python>=4.8.0` - Video processing
|
| 433 |
- `numpy>=1.24.0` - Array operations
|
| 434 |
+
- `pydantic>=2.0.0` - Data validation
|
| 435 |
+
- `easyocr>=1.7.0` - OCR for template building (Pass 0 only)
|
| 436 |
- `torch>=2.0.0` - Required by EasyOCR
|
| 437 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 438 |
## License
|
| 439 |
|
| 440 |
Private project - All rights reserved
|
main.py
CHANGED
|
@@ -36,10 +36,6 @@ import sys
|
|
| 36 |
from pathlib import Path
|
| 37 |
from typing import Any, Dict, Optional, Tuple
|
| 38 |
|
| 39 |
-
# Add src to path for imports
|
| 40 |
-
sys.path.insert(0, str(Path(__file__).parent / "src"))
|
| 41 |
-
|
| 42 |
-
# pylint: disable=wrong-import-position
|
| 43 |
from config import (
|
| 44 |
SessionConfig,
|
| 45 |
save_session_config,
|
|
@@ -62,7 +58,7 @@ from ui import (
|
|
| 62 |
)
|
| 63 |
from ui.api import extract_sample_frames_for_selection
|
| 64 |
from video import generate_clips
|
| 65 |
-
from pipeline import
|
| 66 |
|
| 67 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 68 |
logger = logging.getLogger(__name__)
|
|
@@ -372,19 +368,19 @@ def _print_region_summary(config: SessionConfig) -> None:
|
|
| 372 |
# =============================================================================
|
| 373 |
|
| 374 |
|
| 375 |
-
def
|
| 376 |
"""
|
| 377 |
-
Phase 3: Run play
|
| 378 |
|
| 379 |
Args:
|
| 380 |
session_config: Configuration with video path and regions.
|
| 381 |
num_workers: Number of parallel workers.
|
| 382 |
|
| 383 |
Returns:
|
| 384 |
-
|
| 385 |
"""
|
| 386 |
print("\n" + "=" * 60)
|
| 387 |
-
return
|
| 388 |
|
| 389 |
|
| 390 |
# =============================================================================
|
|
@@ -496,8 +492,8 @@ def main() -> int:
|
|
| 496 |
if session_config is None:
|
| 497 |
return 1
|
| 498 |
|
| 499 |
-
# Phase 3:
|
| 500 |
-
results =
|
| 501 |
|
| 502 |
# Phase 4: Clip Generation
|
| 503 |
clip_timing = {}
|
|
|
|
| 36 |
from pathlib import Path
|
| 37 |
from typing import Any, Dict, Optional, Tuple
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
from config import (
|
| 40 |
SessionConfig,
|
| 41 |
save_session_config,
|
|
|
|
| 58 |
)
|
| 59 |
from ui.api import extract_sample_frames_for_selection
|
| 60 |
from video import generate_clips
|
| 61 |
+
from pipeline import run_extraction, print_results_summary
|
| 62 |
|
| 63 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 64 |
logger = logging.getLogger(__name__)
|
|
|
|
| 368 |
# =============================================================================
|
| 369 |
|
| 370 |
|
| 371 |
+
def _phase3_extraction(session_config: SessionConfig, num_workers: int) -> Dict[str, Any]:
|
| 372 |
"""
|
| 373 |
+
Phase 3: Run play extraction on the video.
|
| 374 |
|
| 375 |
Args:
|
| 376 |
session_config: Configuration with video path and regions.
|
| 377 |
num_workers: Number of parallel workers.
|
| 378 |
|
| 379 |
Returns:
|
| 380 |
+
Extraction results dictionary.
|
| 381 |
"""
|
| 382 |
print("\n" + "=" * 60)
|
| 383 |
+
return run_extraction(session_config, OUTPUT_DIR, num_workers=num_workers)
|
| 384 |
|
| 385 |
|
| 386 |
# =============================================================================
|
|
|
|
| 492 |
if session_config is None:
|
| 493 |
return 1
|
| 494 |
|
| 495 |
+
# Phase 3: Extraction
|
| 496 |
+
results = _phase3_extraction(session_config, args.parallel)
|
| 497 |
|
| 498 |
# Phase 4: Clip Generation
|
| 499 |
clip_timing = {}
|
pyproject.toml
CHANGED
|
@@ -10,6 +10,7 @@ dependencies = [
|
|
| 10 |
"opencv-python>=4.8.0",
|
| 11 |
"numpy>=1.24.0",
|
| 12 |
"pillow>=10.0.0",
|
|
|
|
| 13 |
"pytesseract>=0.3.10",
|
| 14 |
]
|
| 15 |
|
|
@@ -18,17 +19,30 @@ dev = [
|
|
| 18 |
"black>=24.0.0",
|
| 19 |
"pylint>=3.0.0",
|
| 20 |
"pylint-pydantic>=0.3.0",
|
|
|
|
| 21 |
]
|
| 22 |
|
| 23 |
[build-system]
|
| 24 |
requires = ["setuptools>=61.0", "wheel"]
|
| 25 |
build-backend = "setuptools.build_meta"
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
[tool.mypy]
|
| 28 |
python_version = "3.13"
|
| 29 |
strict = true
|
| 30 |
ignore_missing_imports = true
|
| 31 |
exclude = ["tests", "scripts", "examples", "static"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
[tool.black]
|
| 34 |
line-length = 180
|
|
|
|
| 10 |
"opencv-python>=4.8.0",
|
| 11 |
"numpy>=1.24.0",
|
| 12 |
"pillow>=10.0.0",
|
| 13 |
+
"pydantic>=2.0.0",
|
| 14 |
"pytesseract>=0.3.10",
|
| 15 |
]
|
| 16 |
|
|
|
|
| 19 |
"black>=24.0.0",
|
| 20 |
"pylint>=3.0.0",
|
| 21 |
"pylint-pydantic>=0.3.0",
|
| 22 |
+
"mypy>=1.0.0",
|
| 23 |
]
|
| 24 |
|
| 25 |
[build-system]
|
| 26 |
requires = ["setuptools>=61.0", "wheel"]
|
| 27 |
build-backend = "setuptools.build_meta"
|
| 28 |
|
| 29 |
+
[tool.setuptools.package-dir]
|
| 30 |
+
"" = "src"
|
| 31 |
+
|
| 32 |
+
[tool.setuptools.packages.find]
|
| 33 |
+
where = ["src"]
|
| 34 |
+
|
| 35 |
[tool.mypy]
|
| 36 |
python_version = "3.13"
|
| 37 |
strict = true
|
| 38 |
ignore_missing_imports = true
|
| 39 |
exclude = ["tests", "scripts", "examples", "static"]
|
| 40 |
+
plugins = ["pydantic.mypy"]
|
| 41 |
+
|
| 42 |
+
[tool.pydantic-mypy]
|
| 43 |
+
init_forbid_extra = true
|
| 44 |
+
init_typed = true
|
| 45 |
+
warn_required_dynamic_aliases = true
|
| 46 |
|
| 47 |
[tool.black]
|
| 48 |
line-length = 180
|
scripts/archive/v2/benchmark_ocr.py
CHANGED
|
@@ -20,12 +20,10 @@ from typing import List, Tuple, Optional, Dict
|
|
| 20 |
import cv2
|
| 21 |
import numpy as np
|
| 22 |
|
| 23 |
-
|
| 24 |
-
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
| 25 |
-
sys.path.insert(0, str(PROJECT_ROOT / "src"))
|
| 26 |
|
| 27 |
-
#
|
| 28 |
-
|
| 29 |
|
| 30 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 31 |
logger = logging.getLogger(__name__)
|
|
@@ -51,7 +49,7 @@ def load_play_clock_config() -> Tuple[int, int, int, int]:
|
|
| 51 |
return (data["x_offset"], data["y_offset"], data["width"], data["height"])
|
| 52 |
|
| 53 |
|
| 54 |
-
def extract_test_frames(video_path: Path, detector:
|
| 55 |
"""Extract frames with scorebug for testing."""
|
| 56 |
cap = cv2.VideoCapture(str(video_path))
|
| 57 |
if not cap.isOpened():
|
|
@@ -521,7 +519,7 @@ def main():
|
|
| 521 |
logger.info(f"Play clock config: {config}")
|
| 522 |
|
| 523 |
# Initialize scorebug detector
|
| 524 |
-
detector =
|
| 525 |
|
| 526 |
# Extract test frames
|
| 527 |
logger.info(f"Extracting {len(TEST_TIMESTAMPS)} test frames...")
|
|
|
|
| 20 |
import cv2
|
| 21 |
import numpy as np
|
| 22 |
|
| 23 |
+
from detection import DetectScoreBug
|
|
|
|
|
|
|
| 24 |
|
| 25 |
+
# Path reference for constants
|
| 26 |
+
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
| 27 |
|
| 28 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 29 |
logger = logging.getLogger(__name__)
|
|
|
|
| 49 |
return (data["x_offset"], data["y_offset"], data["width"], data["height"])
|
| 50 |
|
| 51 |
|
| 52 |
+
def extract_test_frames(video_path: Path, detector: DetectScoreBug, timestamps: List[float]) -> List[Tuple[float, np.ndarray, Tuple[int, int, int, int]]]:
|
| 53 |
"""Extract frames with scorebug for testing."""
|
| 54 |
cap = cv2.VideoCapture(str(video_path))
|
| 55 |
if not cap.isOpened():
|
|
|
|
| 519 |
logger.info(f"Play clock config: {config}")
|
| 520 |
|
| 521 |
# Initialize scorebug detector
|
| 522 |
+
detector = DetectScoreBug(template_path=str(TEMPLATE_PATH))
|
| 523 |
|
| 524 |
# Extract test frames
|
| 525 |
logger.info(f"Extracting {len(TEST_TIMESTAMPS)} test frames...")
|
scripts/archive/v2/benchmark_ocr_batching.py
CHANGED
|
@@ -25,12 +25,13 @@ from typing import List, Tuple, Optional
|
|
| 25 |
import cv2
|
| 26 |
import numpy as np
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
|
|
|
| 34 |
|
| 35 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 36 |
logger = logging.getLogger(__name__)
|
|
@@ -57,8 +58,8 @@ def extract_frames_sequential(
|
|
| 57 |
start_time: float,
|
| 58 |
end_time: float,
|
| 59 |
frame_interval: float,
|
| 60 |
-
scorebug_detector:
|
| 61 |
-
clock_reader:
|
| 62 |
) -> Tuple[List[FrameData], dict]:
|
| 63 |
"""
|
| 64 |
Extract and preprocess frames using sequential reading (new optimized approach).
|
|
@@ -293,11 +294,11 @@ def main():
|
|
| 293 |
config = json.load(f)
|
| 294 |
|
| 295 |
# Initialize detectors
|
| 296 |
-
scorebug_detector =
|
| 297 |
fixed_region = (config["scorebug_x"], config["scorebug_y"], config["scorebug_width"], config["scorebug_height"])
|
| 298 |
scorebug_detector.set_fixed_region(fixed_region)
|
| 299 |
|
| 300 |
-
clock_reader =
|
| 301 |
|
| 302 |
# Extract and preprocess all frames
|
| 303 |
logger.info("\n--- Frame Extraction Phase ---")
|
|
|
|
| 25 |
import cv2
|
| 26 |
import numpy as np
|
| 27 |
|
| 28 |
+
from detection import DetectScoreBug
|
| 29 |
+
from readers import PlayClockReading
|
| 30 |
+
from setup import PlayClockRegionExtractor
|
| 31 |
|
| 32 |
+
# Path reference for constants
|
| 33 |
+
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
| 34 |
+
# Note: _get_easyocr_reader was removed during migration to template matching
|
| 35 |
|
| 36 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 37 |
logger = logging.getLogger(__name__)
|
|
|
|
| 58 |
start_time: float,
|
| 59 |
end_time: float,
|
| 60 |
frame_interval: float,
|
| 61 |
+
scorebug_detector: DetectScoreBug,
|
| 62 |
+
clock_reader: PlayClockRegionExtractor,
|
| 63 |
) -> Tuple[List[FrameData], dict]:
|
| 64 |
"""
|
| 65 |
Extract and preprocess frames using sequential reading (new optimized approach).
|
|
|
|
| 294 |
config = json.load(f)
|
| 295 |
|
| 296 |
# Initialize detectors
|
| 297 |
+
scorebug_detector = DetectScoreBug(template_path=str(template_path))
|
| 298 |
fixed_region = (config["scorebug_x"], config["scorebug_y"], config["scorebug_width"], config["scorebug_height"])
|
| 299 |
scorebug_detector.set_fixed_region(fixed_region)
|
| 300 |
|
| 301 |
+
clock_reader = PlayClockRegionExtractor(region_config_path=str(playclock_config_path))
|
| 302 |
|
| 303 |
# Extract and preprocess all frames
|
| 304 |
logger.info("\n--- Frame Extraction Phase ---")
|
scripts/archive/v2/detect_plays.py
CHANGED
|
@@ -24,12 +24,10 @@ import logging
|
|
| 24 |
import sys
|
| 25 |
from pathlib import Path
|
| 26 |
|
| 27 |
-
# Add src to path for imports (scripts/archive/ -> project root)
|
| 28 |
-
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
| 29 |
-
sys.path.insert(0, str(PROJECT_ROOT / "src"))
|
| 30 |
-
|
| 31 |
-
# pylint: disable=wrong-import-position
|
| 32 |
from pipeline import PlayDetector
|
|
|
|
|
|
|
|
|
|
| 33 |
from pipeline.play_detector import DetectionConfig
|
| 34 |
|
| 35 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
|
|
| 24 |
import sys
|
| 25 |
from pathlib import Path
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
from pipeline import PlayDetector
|
| 28 |
+
|
| 29 |
+
# Path reference for constants
|
| 30 |
+
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
| 31 |
from pipeline.play_detector import DetectionConfig
|
| 32 |
|
| 33 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
scripts/archive/v2/diagnose_play_clock.py
CHANGED
|
@@ -18,12 +18,11 @@ from pathlib import Path
|
|
| 18 |
import cv2
|
| 19 |
import numpy as np
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
sys.path.insert(0, str(PROJECT_ROOT / "src"))
|
| 24 |
|
| 25 |
-
#
|
| 26 |
-
|
| 27 |
|
| 28 |
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 29 |
logger = logging.getLogger(__name__)
|
|
@@ -38,14 +37,14 @@ OUTPUT_DIR = PROJECT_ROOT / "output" / "debug"
|
|
| 38 |
TEST_TIMESTAMPS = [2320.0, 2321.0, 2322.0, 2325.0, 2328.0] # Sample timestamps in seconds
|
| 39 |
|
| 40 |
|
| 41 |
-
def extract_debug_info(video_path: Path, detector:
|
| 42 |
"""
|
| 43 |
Extract frames and save debug visualizations.
|
| 44 |
|
| 45 |
Args:
|
| 46 |
video_path: Path to video file
|
| 47 |
-
detector:
|
| 48 |
-
reader:
|
| 49 |
timestamps: List of timestamps to analyze
|
| 50 |
"""
|
| 51 |
cap = cv2.VideoCapture(str(video_path))
|
|
@@ -92,7 +91,7 @@ def extract_debug_info(video_path: Path, detector: ScorebugDetector, reader: Pla
|
|
| 92 |
# Extract play clock region
|
| 93 |
play_clock_region = frame[pc_y : pc_y + pc_h, pc_x : pc_x + pc_w].copy()
|
| 94 |
|
| 95 |
-
# Preprocess for OCR (same as
|
| 96 |
preprocessed = preprocess_for_debug(play_clock_region)
|
| 97 |
|
| 98 |
# Run OCR and get result
|
|
@@ -144,7 +143,7 @@ def extract_debug_info(video_path: Path, detector: ScorebugDetector, reader: Pla
|
|
| 144 |
|
| 145 |
def preprocess_for_debug(region: np.ndarray) -> np.ndarray:
|
| 146 |
"""
|
| 147 |
-
Preprocess the play clock region for OCR (same as
|
| 148 |
Returns the preprocessed image for debugging.
|
| 149 |
"""
|
| 150 |
# Convert to grayscale
|
|
@@ -195,8 +194,8 @@ def main():
|
|
| 195 |
|
| 196 |
# Initialize
|
| 197 |
logger.info("Initializing detectors...")
|
| 198 |
-
detector =
|
| 199 |
-
reader =
|
| 200 |
|
| 201 |
# Run diagnostic
|
| 202 |
logger.info("Extracting debug info for %d timestamps...", len(TEST_TIMESTAMPS))
|
|
|
|
| 18 |
import cv2
|
| 19 |
import numpy as np
|
| 20 |
|
| 21 |
+
from detection import DetectScoreBug
|
| 22 |
+
from setup import PlayClockRegionExtractor
|
|
|
|
| 23 |
|
| 24 |
+
# Path reference for constants
|
| 25 |
+
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
| 26 |
|
| 27 |
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 28 |
logger = logging.getLogger(__name__)
|
|
|
|
| 37 |
TEST_TIMESTAMPS = [2320.0, 2321.0, 2322.0, 2325.0, 2328.0] # Sample timestamps in seconds
|
| 38 |
|
| 39 |
|
| 40 |
+
def extract_debug_info(video_path: Path, detector: DetectScoreBug, reader: PlayClockRegionExtractor, timestamps: list) -> None:
|
| 41 |
"""
|
| 42 |
Extract frames and save debug visualizations.
|
| 43 |
|
| 44 |
Args:
|
| 45 |
video_path: Path to video file
|
| 46 |
+
detector: DetectScoreBug instance
|
| 47 |
+
reader: PlayClockRegionExtractor instance
|
| 48 |
timestamps: List of timestamps to analyze
|
| 49 |
"""
|
| 50 |
cap = cv2.VideoCapture(str(video_path))
|
|
|
|
| 91 |
# Extract play clock region
|
| 92 |
play_clock_region = frame[pc_y : pc_y + pc_h, pc_x : pc_x + pc_w].copy()
|
| 93 |
|
| 94 |
+
# Preprocess for OCR (same as PlayClockRegionExtractor)
|
| 95 |
preprocessed = preprocess_for_debug(play_clock_region)
|
| 96 |
|
| 97 |
# Run OCR and get result
|
|
|
|
| 143 |
|
| 144 |
def preprocess_for_debug(region: np.ndarray) -> np.ndarray:
|
| 145 |
"""
|
| 146 |
+
Preprocess the play clock region for OCR (same as PlayClockRegionExtractor).
|
| 147 |
Returns the preprocessed image for debugging.
|
| 148 |
"""
|
| 149 |
# Convert to grayscale
|
|
|
|
| 194 |
|
| 195 |
# Initialize
|
| 196 |
logger.info("Initializing detectors...")
|
| 197 |
+
detector = DetectScoreBug(template_path=str(TEMPLATE_PATH))
|
| 198 |
+
reader = PlayClockRegionExtractor(region_config_path=str(CONFIG_PATH))
|
| 199 |
|
| 200 |
# Run diagnostic
|
| 201 |
logger.info("Extracting debug info for %d timestamps...", len(TEST_TIMESTAMPS))
|
scripts/archive/v2/diagnose_scorebug_threshold.py
CHANGED
|
@@ -17,9 +17,8 @@ from typing import List, Tuple, Dict
|
|
| 17 |
import cv2
|
| 18 |
import numpy as np
|
| 19 |
|
| 20 |
-
#
|
| 21 |
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
| 22 |
-
sys.path.insert(0, str(PROJECT_ROOT / "src"))
|
| 23 |
|
| 24 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 25 |
logger = logging.getLogger(__name__)
|
|
|
|
| 17 |
import cv2
|
| 18 |
import numpy as np
|
| 19 |
|
| 20 |
+
# Path reference for constants
|
| 21 |
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
|
|
|
| 22 |
|
| 23 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 24 |
logger = logging.getLogger(__name__)
|
scripts/archive/v2/identify_play_clock_region.py
CHANGED
|
@@ -26,12 +26,10 @@ from typing import Optional, Tuple, List, Any
|
|
| 26 |
|
| 27 |
import cv2
|
| 28 |
|
| 29 |
-
|
| 30 |
-
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
| 31 |
-
sys.path.insert(0, str(PROJECT_ROOT / "src"))
|
| 32 |
|
| 33 |
-
#
|
| 34 |
-
|
| 35 |
|
| 36 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 37 |
logger = logging.getLogger(__name__)
|
|
@@ -118,14 +116,14 @@ class RegionSelector:
|
|
| 118 |
|
| 119 |
|
| 120 |
def extract_frames_with_scorebug(
|
| 121 |
-
video_path: Path, detector:
|
| 122 |
) -> List[Tuple[float, Any, Tuple[int, int, int, int]]]:
|
| 123 |
"""
|
| 124 |
Extract frames from video where scorebug is detected.
|
| 125 |
|
| 126 |
Args:
|
| 127 |
video_path: Path to video file
|
| 128 |
-
detector:
|
| 129 |
start_time: Start time in seconds
|
| 130 |
end_time: End time in seconds
|
| 131 |
interval: Sampling interval in seconds
|
|
@@ -327,7 +325,7 @@ def main():
|
|
| 327 |
|
| 328 |
# Initialize scorebug detector
|
| 329 |
logger.info("Loading scorebug template: %s", TEMPLATE_PATH)
|
| 330 |
-
detector =
|
| 331 |
|
| 332 |
# Extract frames with scorebug
|
| 333 |
logger.info("Extracting frames from %ds to %ds...", START_TIME_SECONDS, END_TIME_SECONDS)
|
|
|
|
| 26 |
|
| 27 |
import cv2
|
| 28 |
|
| 29 |
+
from detection import DetectScoreBug
|
|
|
|
|
|
|
| 30 |
|
| 31 |
+
# Path reference for constants
|
| 32 |
+
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
| 33 |
|
| 34 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 35 |
logger = logging.getLogger(__name__)
|
|
|
|
| 116 |
|
| 117 |
|
| 118 |
def extract_frames_with_scorebug(
|
| 119 |
+
video_path: Path, detector: DetectScoreBug, start_time: float, end_time: float, interval: float
|
| 120 |
) -> List[Tuple[float, Any, Tuple[int, int, int, int]]]:
|
| 121 |
"""
|
| 122 |
Extract frames from video where scorebug is detected.
|
| 123 |
|
| 124 |
Args:
|
| 125 |
video_path: Path to video file
|
| 126 |
+
detector: DetectScoreBug instance
|
| 127 |
start_time: Start time in seconds
|
| 128 |
end_time: End time in seconds
|
| 129 |
interval: Sampling interval in seconds
|
|
|
|
| 325 |
|
| 326 |
# Initialize scorebug detector
|
| 327 |
logger.info("Loading scorebug template: %s", TEMPLATE_PATH)
|
| 328 |
+
detector = DetectScoreBug(template_path=str(TEMPLATE_PATH))
|
| 329 |
|
| 330 |
# Extract frames with scorebug
|
| 331 |
logger.info("Extracting frames from %ds to %ds...", START_TIME_SECONDS, END_TIME_SECONDS)
|
scripts/archive/v2/test_play_clock_reader.py
CHANGED
|
@@ -21,12 +21,12 @@ from typing import List, Tuple, Any
|
|
| 21 |
import cv2
|
| 22 |
import numpy as np
|
| 23 |
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
|
| 28 |
-
#
|
| 29 |
-
|
| 30 |
|
| 31 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 32 |
logger = logging.getLogger(__name__)
|
|
@@ -44,14 +44,14 @@ SAMPLE_INTERVAL_SECONDS = 0.5 # Sample every 0.5 seconds for detailed analysis
|
|
| 44 |
|
| 45 |
|
| 46 |
def extract_test_frames(
|
| 47 |
-
video_path: Path, detector:
|
| 48 |
) -> List[Tuple[float, Any, Tuple[int, int, int, int]]]:
|
| 49 |
"""
|
| 50 |
Extract frames from video where scorebug is detected for testing.
|
| 51 |
|
| 52 |
Args:
|
| 53 |
video_path: Path to video file
|
| 54 |
-
detector:
|
| 55 |
start_time: Start time in seconds
|
| 56 |
end_time: End time in seconds
|
| 57 |
interval: Sampling interval in seconds
|
|
@@ -92,13 +92,13 @@ def extract_test_frames(
|
|
| 92 |
return frames
|
| 93 |
|
| 94 |
|
| 95 |
-
def run_reading_tests(frames: List[Tuple[float, Any, Tuple[int, int, int, int]]], reader:
|
| 96 |
"""
|
| 97 |
Run play clock reader on all extracted frames.
|
| 98 |
|
| 99 |
Args:
|
| 100 |
frames: List of (timestamp, frame, scorebug_bbox) tuples
|
| 101 |
-
reader:
|
| 102 |
|
| 103 |
Returns:
|
| 104 |
List of (timestamp, reading, frame) tuples
|
|
@@ -119,7 +119,7 @@ def run_reading_tests(frames: List[Tuple[float, Any, Tuple[int, int, int, int]]]
|
|
| 119 |
|
| 120 |
def create_visualization_grid(
|
| 121 |
results: List[Tuple[float, PlayClockReading, Any]],
|
| 122 |
-
reader:
|
| 123 |
scorebug_bboxes: List[Tuple[int, int, int, int]],
|
| 124 |
output_path: Path,
|
| 125 |
grid_cols: int = 5,
|
|
@@ -129,7 +129,7 @@ def create_visualization_grid(
|
|
| 129 |
|
| 130 |
Args:
|
| 131 |
results: List of (timestamp, reading, frame) tuples
|
| 132 |
-
reader:
|
| 133 |
scorebug_bboxes: List of scorebug bounding boxes
|
| 134 |
output_path: Path to save the visualization
|
| 135 |
grid_cols: Number of columns in the grid
|
|
@@ -243,8 +243,8 @@ def main():
|
|
| 243 |
|
| 244 |
# Initialize detectors
|
| 245 |
logger.info("Initializing detectors...")
|
| 246 |
-
scorebug_detector =
|
| 247 |
-
play_clock_reader =
|
| 248 |
|
| 249 |
# Extract test frames
|
| 250 |
logger.info("Extracting test frames from %.1fs to %.1fs...", START_TIME_SECONDS, END_TIME_SECONDS)
|
|
|
|
| 21 |
import cv2
|
| 22 |
import numpy as np
|
| 23 |
|
| 24 |
+
from detection import DetectScoreBug
|
| 25 |
+
from readers import PlayClockReading
|
| 26 |
+
from setup import PlayClockRegionExtractor
|
| 27 |
|
| 28 |
+
# Path reference for constants
|
| 29 |
+
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
| 30 |
|
| 31 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 32 |
logger = logging.getLogger(__name__)
|
|
|
|
| 44 |
|
| 45 |
|
| 46 |
def extract_test_frames(
|
| 47 |
+
video_path: Path, detector: DetectScoreBug, start_time: float, end_time: float, interval: float, max_frames: int = 50
|
| 48 |
) -> List[Tuple[float, Any, Tuple[int, int, int, int]]]:
|
| 49 |
"""
|
| 50 |
Extract frames from video where scorebug is detected for testing.
|
| 51 |
|
| 52 |
Args:
|
| 53 |
video_path: Path to video file
|
| 54 |
+
detector: DetectScoreBug instance
|
| 55 |
start_time: Start time in seconds
|
| 56 |
end_time: End time in seconds
|
| 57 |
interval: Sampling interval in seconds
|
|
|
|
| 92 |
return frames
|
| 93 |
|
| 94 |
|
| 95 |
+
def run_reading_tests(frames: List[Tuple[float, Any, Tuple[int, int, int, int]]], reader: PlayClockRegionExtractor) -> List[Tuple[float, PlayClockReading, Any]]:
|
| 96 |
"""
|
| 97 |
Run play clock reader on all extracted frames.
|
| 98 |
|
| 99 |
Args:
|
| 100 |
frames: List of (timestamp, frame, scorebug_bbox) tuples
|
| 101 |
+
reader: PlayClockRegionExtractor instance
|
| 102 |
|
| 103 |
Returns:
|
| 104 |
List of (timestamp, reading, frame) tuples
|
|
|
|
| 119 |
|
| 120 |
def create_visualization_grid(
|
| 121 |
results: List[Tuple[float, PlayClockReading, Any]],
|
| 122 |
+
reader: PlayClockRegionExtractor,
|
| 123 |
scorebug_bboxes: List[Tuple[int, int, int, int]],
|
| 124 |
output_path: Path,
|
| 125 |
grid_cols: int = 5,
|
|
|
|
| 129 |
|
| 130 |
Args:
|
| 131 |
results: List of (timestamp, reading, frame) tuples
|
| 132 |
+
reader: PlayClockRegionExtractor for visualization
|
| 133 |
scorebug_bboxes: List of scorebug bounding boxes
|
| 134 |
output_path: Path to save the visualization
|
| 135 |
grid_cols: Number of columns in the grid
|
|
|
|
| 243 |
|
| 244 |
# Initialize detectors
|
| 245 |
logger.info("Initializing detectors...")
|
| 246 |
+
scorebug_detector = DetectScoreBug(template_path=str(TEMPLATE_PATH))
|
| 247 |
+
play_clock_reader = PlayClockRegionExtractor(region_config_path=str(CONFIG_PATH))
|
| 248 |
|
| 249 |
# Extract test frames
|
| 250 |
logger.info("Extracting test frames from %.1fs to %.1fs...", START_TIME_SECONDS, END_TIME_SECONDS)
|
scripts/archive/v2/test_state_machine.py
CHANGED
|
@@ -23,14 +23,9 @@ from dataclasses import dataclass
|
|
| 23 |
from pathlib import Path
|
| 24 |
from typing import List, Tuple, Optional
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
# pylint: disable=wrong-import-position
|
| 31 |
-
from detectors import PlayStateMachine, PlayEvent
|
| 32 |
-
from detectors.scorebug_detector import ScorebugDetection
|
| 33 |
-
from detectors.play_clock_reader import PlayClockReading
|
| 34 |
|
| 35 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 36 |
logger = logging.getLogger(__name__)
|
|
@@ -74,7 +69,7 @@ def run_test_sequence(name: str, frames: List[TestFrame], expected_plays: int) -
|
|
| 74 |
logger.info("TEST: %s", name)
|
| 75 |
logger.info("=" * 60)
|
| 76 |
|
| 77 |
-
state_machine =
|
| 78 |
detected_plays = []
|
| 79 |
|
| 80 |
for frame in frames:
|
|
@@ -299,7 +294,7 @@ def test_direct_vs_backward_comparison() -> bool:
|
|
| 299 |
|
| 300 |
def main():
|
| 301 |
"""Run all state machine tests."""
|
| 302 |
-
logger.info("
|
| 303 |
logger.info("=" * 60)
|
| 304 |
|
| 305 |
tests = [
|
|
|
|
| 23 |
from pathlib import Path
|
| 24 |
from typing import List, Tuple, Optional
|
| 25 |
|
| 26 |
+
from tracking import TrackPlayState, PlayEvent
|
| 27 |
+
from detection import ScorebugDetection
|
| 28 |
+
from readers import PlayClockReading
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 31 |
logger = logging.getLogger(__name__)
|
|
|
|
| 69 |
logger.info("TEST: %s", name)
|
| 70 |
logger.info("=" * 60)
|
| 71 |
|
| 72 |
+
state_machine = TrackPlayState()
|
| 73 |
detected_plays = []
|
| 74 |
|
| 75 |
for frame in frames:
|
|
|
|
| 294 |
|
| 295 |
def main():
|
| 296 |
"""Run all state machine tests."""
|
| 297 |
+
logger.info("TrackPlayState Test Suite")
|
| 298 |
logger.info("=" * 60)
|
| 299 |
|
| 300 |
tests = [
|
scripts/archive/v3/cache_detections.py
CHANGED
|
@@ -28,12 +28,9 @@ from typing import List, Dict, Any, Optional, Tuple
|
|
| 28 |
import cv2
|
| 29 |
import numpy as np
|
| 30 |
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
# pylint: disable=wrong-import-position
|
| 35 |
-
from detectors import ScorebugDetector, PlayClockReader, TimeoutTracker
|
| 36 |
-
from detectors.play_clock_reader import _get_easyocr_reader
|
| 37 |
|
| 38 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 39 |
logger = logging.getLogger(__name__)
|
|
@@ -85,13 +82,13 @@ def cache_detections(
|
|
| 85 |
|
| 86 |
# Initialize detectors
|
| 87 |
logger.info("Initializing detectors...")
|
| 88 |
-
scorebug_detector =
|
| 89 |
-
clock_reader =
|
| 90 |
|
| 91 |
# Initialize timeout tracker if config provided
|
| 92 |
-
timeout_tracker: Optional[
|
| 93 |
if timeout_config_path and timeout_config_path.exists():
|
| 94 |
-
timeout_tracker =
|
| 95 |
logger.info("Timeout tracker initialized from: %s", timeout_config_path)
|
| 96 |
else:
|
| 97 |
logger.info("Timeout tracker not configured (no config or file not found)")
|
|
|
|
| 28 |
import cv2
|
| 29 |
import numpy as np
|
| 30 |
|
| 31 |
+
from detection import DetectScoreBug, TrackTimeouts
|
| 32 |
+
from setup import PlayClockRegionExtractor
|
| 33 |
+
# Note: _get_easyocr_reader was removed during migration to template matching
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 36 |
logger = logging.getLogger(__name__)
|
|
|
|
| 82 |
|
| 83 |
# Initialize detectors
|
| 84 |
logger.info("Initializing detectors...")
|
| 85 |
+
scorebug_detector = DetectScoreBug(template_path=str(template_path), use_split_detection=True)
|
| 86 |
+
clock_reader = PlayClockRegionExtractor(region_config_path=str(clock_config_path))
|
| 87 |
|
| 88 |
# Initialize timeout tracker if config provided
|
| 89 |
+
timeout_tracker: Optional[TrackTimeouts] = None
|
| 90 |
if timeout_config_path and timeout_config_path.exists():
|
| 91 |
+
timeout_tracker = TrackTimeouts(config_path=str(timeout_config_path))
|
| 92 |
logger.info("Timeout tracker initialized from: %s", timeout_config_path)
|
| 93 |
else:
|
| 94 |
logger.info("Timeout tracker not configured (no config or file not found)")
|
scripts/archive/v3/configure_timeout_tracker.py
CHANGED
|
@@ -30,12 +30,7 @@ from typing import Optional, Tuple, List, Any
|
|
| 30 |
import cv2
|
| 31 |
import numpy as np
|
| 32 |
|
| 33 |
-
|
| 34 |
-
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
|
| 35 |
-
|
| 36 |
-
# pylint: disable=wrong-import-position
|
| 37 |
-
from detectors import ScorebugDetector
|
| 38 |
-
from detectors.timeout_tracker import TimeoutTracker, TimeoutRegionConfig
|
| 39 |
from ui import RegionSelector
|
| 40 |
|
| 41 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
@@ -54,14 +49,14 @@ SAMPLE_INTERVAL_SECONDS = 3 # Sample every 3 seconds
|
|
| 54 |
|
| 55 |
|
| 56 |
def extract_frames_with_scorebug(
|
| 57 |
-
video_path: Path, detector:
|
| 58 |
) -> List[Tuple[float, Any, Tuple[int, int, int, int]]]:
|
| 59 |
"""
|
| 60 |
Extract frames from video where scorebug is detected.
|
| 61 |
|
| 62 |
Args:
|
| 63 |
video_path: Path to video file
|
| 64 |
-
detector:
|
| 65 |
start_time: Start time in seconds
|
| 66 |
end_time: End time in seconds
|
| 67 |
interval: Sampling interval in seconds
|
|
@@ -122,7 +117,7 @@ def convert_to_absolute_bbox(relative_bbox: Tuple[int, int, int, int], scorebug_
|
|
| 122 |
return (abs_x, abs_y, original_rel_w, original_rel_h)
|
| 123 |
|
| 124 |
|
| 125 |
-
def test_timeout_detection(frame: np.ndarray, tracker:
|
| 126 |
"""Test and display timeout detection on a frame."""
|
| 127 |
if not tracker.is_configured():
|
| 128 |
logger.warning("Tracker not fully configured yet")
|
|
@@ -167,7 +162,7 @@ def run_region_selection(frames_with_scorebug: List[Tuple[float, Any, Tuple[int,
|
|
| 167 |
away_region: Optional[TimeoutRegionConfig] = None
|
| 168 |
|
| 169 |
# Create tracker for testing
|
| 170 |
-
tracker =
|
| 171 |
|
| 172 |
frame_idx = 0
|
| 173 |
scale_factor = 2 # Scale up for easier selection
|
|
@@ -313,7 +308,7 @@ def main():
|
|
| 313 |
|
| 314 |
# Initialize scorebug detector
|
| 315 |
logger.info("Loading scorebug template: %s", TEMPLATE_PATH)
|
| 316 |
-
detector =
|
| 317 |
|
| 318 |
# Extract frames with scorebug
|
| 319 |
logger.info("Extracting frames from %ds to %ds...", START_TIME_SECONDS, END_TIME_SECONDS)
|
|
|
|
| 30 |
import cv2
|
| 31 |
import numpy as np
|
| 32 |
|
| 33 |
+
from detection import DetectScoreBug, TrackTimeouts, TimeoutRegionConfig
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
from ui import RegionSelector
|
| 35 |
|
| 36 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
|
|
| 49 |
|
| 50 |
|
| 51 |
def extract_frames_with_scorebug(
|
| 52 |
+
video_path: Path, detector: DetectScoreBug, start_time: float, end_time: float, interval: float
|
| 53 |
) -> List[Tuple[float, Any, Tuple[int, int, int, int]]]:
|
| 54 |
"""
|
| 55 |
Extract frames from video where scorebug is detected.
|
| 56 |
|
| 57 |
Args:
|
| 58 |
video_path: Path to video file
|
| 59 |
+
detector: DetectScoreBug instance
|
| 60 |
start_time: Start time in seconds
|
| 61 |
end_time: End time in seconds
|
| 62 |
interval: Sampling interval in seconds
|
|
|
|
| 117 |
return (abs_x, abs_y, original_rel_w, original_rel_h)
|
| 118 |
|
| 119 |
|
| 120 |
+
def test_timeout_detection(frame: np.ndarray, tracker: TrackTimeouts) -> None:
|
| 121 |
"""Test and display timeout detection on a frame."""
|
| 122 |
if not tracker.is_configured():
|
| 123 |
logger.warning("Tracker not fully configured yet")
|
|
|
|
| 162 |
away_region: Optional[TimeoutRegionConfig] = None
|
| 163 |
|
| 164 |
# Create tracker for testing
|
| 165 |
+
tracker = TrackTimeouts()
|
| 166 |
|
| 167 |
frame_idx = 0
|
| 168 |
scale_factor = 2 # Scale up for easier selection
|
|
|
|
| 308 |
|
| 309 |
# Initialize scorebug detector
|
| 310 |
logger.info("Loading scorebug template: %s", TEMPLATE_PATH)
|
| 311 |
+
detector = DetectScoreBug(template_path=str(TEMPLATE_PATH))
|
| 312 |
|
| 313 |
# Extract frames with scorebug
|
| 314 |
logger.info("Extracting frames from %ds to %ds...", START_TIME_SECONDS, END_TIME_SECONDS)
|
scripts/archive/v3/debug_red_preprocessing.py
CHANGED
|
@@ -2,15 +2,11 @@
|
|
| 2 |
Debug script to visualize each preprocessing step for red play clock digits.
|
| 3 |
"""
|
| 4 |
|
| 5 |
-
import sys
|
| 6 |
from pathlib import Path
|
| 7 |
|
| 8 |
import cv2
|
| 9 |
import numpy as np
|
| 10 |
|
| 11 |
-
# Add src to path
|
| 12 |
-
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
|
| 13 |
-
|
| 14 |
|
| 15 |
def main():
|
| 16 |
# Load the red play clock region (frame 472849, which shows "5")
|
|
|
|
| 2 |
Debug script to visualize each preprocessing step for red play clock digits.
|
| 3 |
"""
|
| 4 |
|
|
|
|
| 5 |
from pathlib import Path
|
| 6 |
|
| 7 |
import cv2
|
| 8 |
import numpy as np
|
| 9 |
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
def main():
|
| 12 |
# Load the red play clock region (frame 472849, which shows "5")
|
scripts/archive/v3/diagnose_red_play_clock.py
CHANGED
|
@@ -8,18 +8,15 @@ Test case: 2:11:20 for 15 seconds in OSU vs Tenn 12.21.24.mkv
|
|
| 8 |
Expected: Play clock should tick down to 0 and stay at 0 for a few seconds.
|
| 9 |
"""
|
| 10 |
|
| 11 |
-
import sys
|
| 12 |
import logging
|
| 13 |
from pathlib import Path
|
| 14 |
|
| 15 |
import cv2
|
| 16 |
import numpy as np
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
from detectors.play_clock_reader import PlayClockReader, backfill_missing_readings
|
| 22 |
-
from detectors.scorebug_detector import ScorebugDetector
|
| 23 |
|
| 24 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 25 |
logger = logging.getLogger(__name__)
|
|
@@ -84,8 +81,8 @@ def main():
|
|
| 84 |
logger.info(f"Start time: {start_seconds:.2f}s")
|
| 85 |
|
| 86 |
# Initialize detectors
|
| 87 |
-
scorebug_detector =
|
| 88 |
-
play_clock_reader =
|
| 89 |
|
| 90 |
# Open video
|
| 91 |
cap = cv2.VideoCapture(str(video_path))
|
|
@@ -209,7 +206,7 @@ def main():
|
|
| 209 |
# Collect all readings for backfill test
|
| 210 |
all_readings = []
|
| 211 |
for r in results:
|
| 212 |
-
from
|
| 213 |
|
| 214 |
all_readings.append(
|
| 215 |
PlayClockReading(
|
|
|
|
| 8 |
Expected: Play clock should tick down to 0 and stay at 0 for a few seconds.
|
| 9 |
"""
|
| 10 |
|
|
|
|
| 11 |
import logging
|
| 12 |
from pathlib import Path
|
| 13 |
|
| 14 |
import cv2
|
| 15 |
import numpy as np
|
| 16 |
|
| 17 |
+
from detection import DetectScoreBug
|
| 18 |
+
from readers import backfill_missing_readings
|
| 19 |
+
from setup import PlayClockRegionExtractor
|
|
|
|
|
|
|
| 20 |
|
| 21 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 22 |
logger = logging.getLogger(__name__)
|
|
|
|
| 81 |
logger.info(f"Start time: {start_seconds:.2f}s")
|
| 82 |
|
| 83 |
# Initialize detectors
|
| 84 |
+
scorebug_detector = DetectScoreBug(str(template_path))
|
| 85 |
+
play_clock_reader = PlayClockRegionExtractor(str(play_clock_config_path))
|
| 86 |
|
| 87 |
# Open video
|
| 88 |
cap = cv2.VideoCapture(str(video_path))
|
|
|
|
| 206 |
# Collect all readings for backfill test
|
| 207 |
all_readings = []
|
| 208 |
for r in results:
|
| 209 |
+
from detection import PlayClockReading
|
| 210 |
|
| 211 |
all_readings.append(
|
| 212 |
PlayClockReading(
|
scripts/archive/v3/replay_state_machine.py
CHANGED
|
@@ -30,11 +30,9 @@ from datetime import datetime
|
|
| 30 |
from pathlib import Path
|
| 31 |
from typing import Dict, Any, List, Optional
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
# pylint: disable=wrong-import-position
|
| 37 |
-
from detectors import PlayStateMachine, ScorebugDetection, PlayClockReading, PlayEvent
|
| 38 |
|
| 39 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 40 |
logger = logging.getLogger(__name__)
|
|
@@ -479,7 +477,7 @@ def replay_state_machine(
|
|
| 479 |
logger.info(" Special plays: %d", special_count)
|
| 480 |
|
| 481 |
# Initialize state machine for regular play detection
|
| 482 |
-
state_machine =
|
| 483 |
|
| 484 |
t_start = time.perf_counter()
|
| 485 |
|
|
|
|
| 30 |
from pathlib import Path
|
| 31 |
from typing import Dict, Any, List, Optional
|
| 32 |
|
| 33 |
+
from detection import ScorebugDetection
|
| 34 |
+
from readers import PlayClockReading
|
| 35 |
+
from tracking import TrackPlayState, PlayEvent
|
|
|
|
|
|
|
| 36 |
|
| 37 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 38 |
logger = logging.getLogger(__name__)
|
|
|
|
| 477 |
logger.info(" Special plays: %d", special_count)
|
| 478 |
|
| 479 |
# Initialize state machine for regular play detection
|
| 480 |
+
state_machine = TrackPlayState()
|
| 481 |
|
| 482 |
t_start = time.perf_counter()
|
| 483 |
|
scripts/investigate_missed_plays.py
CHANGED
|
@@ -20,16 +20,13 @@ Usage:
|
|
| 20 |
|
| 21 |
import json
|
| 22 |
import logging
|
| 23 |
-
import sys
|
| 24 |
from pathlib import Path
|
| 25 |
from typing import List, Dict, Tuple, Optional
|
| 26 |
|
| 27 |
import cv2
|
| 28 |
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
from detectors.digit_template_reader import DigitTemplateLibrary, TemplatePlayClockReader
|
| 33 |
|
| 34 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 35 |
logger = logging.getLogger(__name__)
|
|
@@ -88,7 +85,7 @@ def find_missed_plays(baseline: List[Dict], detected: List[Dict], tolerance: flo
|
|
| 88 |
|
| 89 |
def collect_clock_readings(
|
| 90 |
video_path: str,
|
| 91 |
-
template_reader:
|
| 92 |
playclock_coords: Tuple[int, int, int, int],
|
| 93 |
start_time: float,
|
| 94 |
end_time: float,
|
|
@@ -138,7 +135,7 @@ def format_readings(readings: List[Tuple[float, Optional[int], float]]) -> str:
|
|
| 138 |
|
| 139 |
def analyze_missed_play(
|
| 140 |
play: Dict,
|
| 141 |
-
template_reader:
|
| 142 |
playclock_coords: Tuple[int, int, int, int],
|
| 143 |
video_path: str,
|
| 144 |
) -> Dict:
|
|
@@ -318,7 +315,7 @@ def main():
|
|
| 318 |
|
| 319 |
playclock_coords = get_absolute_playclock_coords()
|
| 320 |
pc_w, pc_h = playclock_coords[2], playclock_coords[3]
|
| 321 |
-
template_reader =
|
| 322 |
|
| 323 |
# Analyze first 10 missed plays
|
| 324 |
logger.info("\n[Step 3] Analyzing first 10 missed plays...")
|
|
|
|
| 20 |
|
| 21 |
import json
|
| 22 |
import logging
|
|
|
|
| 23 |
from pathlib import Path
|
| 24 |
from typing import List, Dict, Tuple, Optional
|
| 25 |
|
| 26 |
import cv2
|
| 27 |
|
| 28 |
+
from setup import DigitTemplateLibrary
|
| 29 |
+
from readers import ReadPlayClock
|
|
|
|
|
|
|
| 30 |
|
| 31 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 32 |
logger = logging.getLogger(__name__)
|
|
|
|
| 85 |
|
| 86 |
def collect_clock_readings(
|
| 87 |
video_path: str,
|
| 88 |
+
template_reader: ReadPlayClock,
|
| 89 |
playclock_coords: Tuple[int, int, int, int],
|
| 90 |
start_time: float,
|
| 91 |
end_time: float,
|
|
|
|
| 135 |
|
| 136 |
def analyze_missed_play(
|
| 137 |
play: Dict,
|
| 138 |
+
template_reader: ReadPlayClock,
|
| 139 |
playclock_coords: Tuple[int, int, int, int],
|
| 140 |
video_path: str,
|
| 141 |
) -> Dict:
|
|
|
|
| 315 |
|
| 316 |
playclock_coords = get_absolute_playclock_coords()
|
| 317 |
pc_w, pc_h = playclock_coords[2], playclock_coords[3]
|
| 318 |
+
template_reader = ReadPlayClock(template_library, region_width=pc_w, region_height=pc_h)
|
| 319 |
|
| 320 |
# Analyze first 10 missed plays
|
| 321 |
logger.info("\n[Step 3] Analyzing first 10 missed plays...")
|
scripts/test_digit_extraction.py
CHANGED
|
@@ -19,11 +19,9 @@ from pathlib import Path
|
|
| 19 |
|
| 20 |
import cv2
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
from detectors import PlayClockReader, ScorebugDetector
|
| 26 |
-
from detectors.digit_template_reader import DigitTemplateBuilder, normalize_to_grayscale, detect_red_digits
|
| 27 |
|
| 28 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 29 |
logger = logging.getLogger(__name__)
|
|
@@ -46,8 +44,8 @@ def extract_samples_with_ocr(video_path: str, start_time: float, end_time: float
|
|
| 46 |
Returns list of (timestamp, clock_value, region_image, confidence)
|
| 47 |
"""
|
| 48 |
# Initialize components
|
| 49 |
-
scorebug_detector =
|
| 50 |
-
clock_reader =
|
| 51 |
|
| 52 |
# Open video
|
| 53 |
cap = cv2.VideoCapture(video_path)
|
|
|
|
| 19 |
|
| 20 |
import cv2
|
| 21 |
|
| 22 |
+
from detection import DetectScoreBug
|
| 23 |
+
from readers import normalize_to_grayscale, detect_red_digits
|
| 24 |
+
from setup import DigitTemplateBuilder, PlayClockRegionExtractor
|
|
|
|
|
|
|
| 25 |
|
| 26 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 27 |
logger = logging.getLogger(__name__)
|
|
|
|
| 44 |
Returns list of (timestamp, clock_value, region_image, confidence)
|
| 45 |
"""
|
| 46 |
# Initialize components
|
| 47 |
+
scorebug_detector = DetectScoreBug(template_path=TEMPLATE_PATH)
|
| 48 |
+
clock_reader = PlayClockRegionExtractor(region_config_path=PLAYCLOCK_CONFIG_PATH)
|
| 49 |
|
| 50 |
# Open video
|
| 51 |
cap = cv2.VideoCapture(video_path)
|
scripts/test_fast_full_video.py
CHANGED
|
@@ -30,9 +30,6 @@ import time
|
|
| 30 |
from pathlib import Path
|
| 31 |
from typing import List, Dict, Any, Optional
|
| 32 |
|
| 33 |
-
# Add src to path
|
| 34 |
-
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src"))
|
| 35 |
-
|
| 36 |
from pipeline.play_detector import PlayDetector, DetectionConfig
|
| 37 |
|
| 38 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
|
|
| 30 |
from pathlib import Path
|
| 31 |
from typing import List, Dict, Any, Optional
|
| 32 |
|
|
|
|
|
|
|
|
|
|
| 33 |
from pipeline.play_detector import PlayDetector, DetectionConfig
|
| 34 |
|
| 35 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
scripts/test_full_segment.py
CHANGED
|
@@ -17,9 +17,6 @@ import logging
|
|
| 17 |
import sys
|
| 18 |
from pathlib import Path
|
| 19 |
|
| 20 |
-
# Add src to path
|
| 21 |
-
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src"))
|
| 22 |
-
|
| 23 |
from pipeline.play_detector import PlayDetector, DetectionConfig
|
| 24 |
|
| 25 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
|
|
| 17 |
import sys
|
| 18 |
from pathlib import Path
|
| 19 |
|
|
|
|
|
|
|
|
|
|
| 20 |
from pipeline.play_detector import PlayDetector, DetectionConfig
|
| 21 |
|
| 22 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
scripts/test_full_video_evaluation.py
CHANGED
|
@@ -23,12 +23,9 @@ import sys
|
|
| 23 |
import time
|
| 24 |
from pathlib import Path
|
| 25 |
|
| 26 |
-
# Add src to path
|
| 27 |
-
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src"))
|
| 28 |
-
|
| 29 |
from pipeline.play_detector import DetectionConfig, PlayDetector
|
| 30 |
-
from
|
| 31 |
-
from
|
| 32 |
|
| 33 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 34 |
logger = logging.getLogger(__name__)
|
|
@@ -182,7 +179,7 @@ def run_full_video_evaluation():
|
|
| 182 |
# Initialize timeout tracker if config exists
|
| 183 |
timeout_tracker = None
|
| 184 |
if Path(TIMEOUT_CONFIG_PATH).exists():
|
| 185 |
-
timeout_tracker =
|
| 186 |
logger.info("Timeout tracker initialized")
|
| 187 |
|
| 188 |
# Initialize detector
|
|
|
|
| 23 |
import time
|
| 24 |
from pathlib import Path
|
| 25 |
|
|
|
|
|
|
|
|
|
|
| 26 |
from pipeline.play_detector import DetectionConfig, PlayDetector
|
| 27 |
+
from setup import DigitTemplateLibrary
|
| 28 |
+
from detection import TrackTimeouts
|
| 29 |
|
| 30 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 31 |
logger = logging.getLogger(__name__)
|
|
|
|
| 179 |
# Initialize timeout tracker if config exists
|
| 180 |
timeout_tracker = None
|
| 181 |
if Path(TIMEOUT_CONFIG_PATH).exists():
|
| 182 |
+
timeout_tracker = TrackTimeouts(config_path=TIMEOUT_CONFIG_PATH)
|
| 183 |
logger.info("Timeout tracker initialized")
|
| 184 |
|
| 185 |
# Initialize detector
|
scripts/test_missed_play.py
CHANGED
|
@@ -17,9 +17,6 @@ import logging
|
|
| 17 |
import sys
|
| 18 |
from pathlib import Path
|
| 19 |
|
| 20 |
-
# Add src to path
|
| 21 |
-
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src"))
|
| 22 |
-
|
| 23 |
from pipeline.play_detector import PlayDetector, DetectionConfig
|
| 24 |
|
| 25 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
|
|
| 17 |
import sys
|
| 18 |
from pathlib import Path
|
| 19 |
|
|
|
|
|
|
|
|
|
|
| 20 |
from pipeline.play_detector import PlayDetector, DetectionConfig
|
| 21 |
|
| 22 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
scripts/test_template_accuracy.py
CHANGED
|
@@ -27,11 +27,9 @@ from typing import List
|
|
| 27 |
import cv2
|
| 28 |
import numpy as np
|
| 29 |
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
from detectors import PlayClockReader, ScorebugDetector
|
| 34 |
-
from detectors.digit_template_reader import DigitTemplateBuilder, TemplatePlayClockReader
|
| 35 |
|
| 36 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 37 |
logger = logging.getLogger(__name__)
|
|
@@ -57,8 +55,8 @@ def collect_all_samples(video_path: str, start_time: float, end_time: float, sam
|
|
| 57 |
|
| 58 |
Returns list of (timestamp, clock_value, region_image, confidence)
|
| 59 |
"""
|
| 60 |
-
scorebug_detector =
|
| 61 |
-
clock_reader =
|
| 62 |
|
| 63 |
cap = cv2.VideoCapture(video_path)
|
| 64 |
if not cap.isOpened():
|
|
@@ -234,7 +232,7 @@ def test_template_accuracy():
|
|
| 234 |
|
| 235 |
# Test template matching on test set
|
| 236 |
logger.info("\n[Step 4] Testing template matching accuracy...")
|
| 237 |
-
template_reader =
|
| 238 |
|
| 239 |
correct = 0
|
| 240 |
wrong = 0
|
|
|
|
| 27 |
import cv2
|
| 28 |
import numpy as np
|
| 29 |
|
| 30 |
+
from detection import DetectScoreBug
|
| 31 |
+
from readers import ReadPlayClock
|
| 32 |
+
from setup import DigitTemplateBuilder, PlayClockRegionExtractor
|
|
|
|
|
|
|
| 33 |
|
| 34 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
| 35 |
logger = logging.getLogger(__name__)
|
|
|
|
| 55 |
|
| 56 |
Returns list of (timestamp, clock_value, region_image, confidence)
|
| 57 |
"""
|
| 58 |
+
scorebug_detector = DetectScoreBug(template_path=TEMPLATE_PATH)
|
| 59 |
+
clock_reader = PlayClockRegionExtractor(region_config_path=PLAYCLOCK_CONFIG_PATH)
|
| 60 |
|
| 61 |
cap = cv2.VideoCapture(video_path)
|
| 62 |
if not cap.isOpened():
|
|
|
|
| 232 |
|
| 233 |
# Test template matching on test set
|
| 234 |
logger.info("\n[Step 4] Testing template matching accuracy...")
|
| 235 |
+
template_reader = ReadPlayClock(library)
|
| 236 |
|
| 237 |
correct = 0
|
| 238 |
wrong = 0
|
src/config/__init__.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
"""Configuration modules for session management and project settings."""
|
| 2 |
|
|
|
|
| 3 |
from .session import (
|
| 4 |
-
SessionConfig,
|
| 5 |
save_session_config,
|
| 6 |
load_session_config,
|
| 7 |
get_video_basename,
|
|
|
|
| 1 |
"""Configuration modules for session management and project settings."""
|
| 2 |
|
| 3 |
+
from .models import SessionConfig
|
| 4 |
from .session import (
|
|
|
|
| 5 |
save_session_config,
|
| 6 |
load_session_config,
|
| 7 |
get_video_basename,
|
src/config/models.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Pydantic models for CFB40 configuration.
|
| 3 |
+
|
| 4 |
+
This module contains validated configuration models using Pydantic BaseModel.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
from pydantic import BaseModel
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class SessionConfig(BaseModel):
|
| 13 |
+
"""
|
| 14 |
+
Configuration for a detection session, including user-specified regions.
|
| 15 |
+
|
| 16 |
+
This model contains all the information needed to run play detection
|
| 17 |
+
on a video, including the selected scorebug, play clock, and timeout regions.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
# Video settings
|
| 21 |
+
video_path: str
|
| 22 |
+
start_time: float
|
| 23 |
+
end_time: Optional[float]
|
| 24 |
+
|
| 25 |
+
# Scorebug region (absolute coordinates in frame)
|
| 26 |
+
scorebug_x: int
|
| 27 |
+
scorebug_y: int
|
| 28 |
+
scorebug_width: int
|
| 29 |
+
scorebug_height: int
|
| 30 |
+
|
| 31 |
+
# Play clock region (relative to scorebug)
|
| 32 |
+
playclock_x_offset: int
|
| 33 |
+
playclock_y_offset: int
|
| 34 |
+
playclock_width: int
|
| 35 |
+
playclock_height: int
|
| 36 |
+
|
| 37 |
+
# Timeout tracker regions (absolute coordinates in frame)
|
| 38 |
+
# Home team timeout indicators (3 ovals stacked vertically)
|
| 39 |
+
home_timeout_x: int = 0
|
| 40 |
+
home_timeout_y: int = 0
|
| 41 |
+
home_timeout_width: int = 0
|
| 42 |
+
home_timeout_height: int = 0
|
| 43 |
+
|
| 44 |
+
# Away team timeout indicators (3 ovals stacked vertically)
|
| 45 |
+
away_timeout_x: int = 0
|
| 46 |
+
away_timeout_y: int = 0
|
| 47 |
+
away_timeout_width: int = 0
|
| 48 |
+
away_timeout_height: int = 0
|
| 49 |
+
|
| 50 |
+
# Generated paths
|
| 51 |
+
template_path: str = ""
|
| 52 |
+
config_path: str = ""
|
| 53 |
+
|
| 54 |
+
# Video identifier for output naming
|
| 55 |
+
video_basename: str = ""
|
src/config/session.py
CHANGED
|
@@ -1,19 +1,20 @@
|
|
| 1 |
"""
|
| 2 |
Session configuration management for CFB40.
|
| 3 |
|
| 4 |
-
This module provides
|
| 5 |
including region selections, video paths, and project constants.
|
| 6 |
"""
|
| 7 |
|
| 8 |
import json
|
| 9 |
import logging
|
| 10 |
-
from dataclasses import asdict, dataclass
|
| 11 |
from pathlib import Path
|
| 12 |
-
from typing import Optional, Tuple
|
| 13 |
|
| 14 |
import cv2
|
| 15 |
import numpy as np
|
| 16 |
|
|
|
|
|
|
|
| 17 |
logger = logging.getLogger(__name__)
|
| 18 |
|
| 19 |
# =============================================================================
|
|
@@ -34,58 +35,6 @@ EXPECTED_PLAYS_TESTING = 12
|
|
| 34 |
MIN_PLAY_DURATION = 3.0 # seconds
|
| 35 |
|
| 36 |
|
| 37 |
-
# =============================================================================
|
| 38 |
-
# Session Configuration Dataclass
|
| 39 |
-
# =============================================================================
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
@dataclass
|
| 43 |
-
class SessionConfig: # pylint: disable=too-many-instance-attributes
|
| 44 |
-
"""
|
| 45 |
-
Configuration for a detection session, including user-specified regions.
|
| 46 |
-
|
| 47 |
-
This dataclass contains all the information needed to run play detection
|
| 48 |
-
on a video, including the selected scorebug, play clock, and timeout regions.
|
| 49 |
-
"""
|
| 50 |
-
|
| 51 |
-
# Video settings
|
| 52 |
-
video_path: str
|
| 53 |
-
start_time: float
|
| 54 |
-
end_time: Optional[float]
|
| 55 |
-
|
| 56 |
-
# Scorebug region (absolute coordinates in frame)
|
| 57 |
-
scorebug_x: int
|
| 58 |
-
scorebug_y: int
|
| 59 |
-
scorebug_width: int
|
| 60 |
-
scorebug_height: int
|
| 61 |
-
|
| 62 |
-
# Play clock region (relative to scorebug)
|
| 63 |
-
playclock_x_offset: int
|
| 64 |
-
playclock_y_offset: int
|
| 65 |
-
playclock_width: int
|
| 66 |
-
playclock_height: int
|
| 67 |
-
|
| 68 |
-
# Timeout tracker regions (absolute coordinates in frame)
|
| 69 |
-
# Home team timeout indicators (3 ovals stacked vertically)
|
| 70 |
-
home_timeout_x: int = 0
|
| 71 |
-
home_timeout_y: int = 0
|
| 72 |
-
home_timeout_width: int = 0
|
| 73 |
-
home_timeout_height: int = 0
|
| 74 |
-
|
| 75 |
-
# Away team timeout indicators (3 ovals stacked vertically)
|
| 76 |
-
away_timeout_x: int = 0
|
| 77 |
-
away_timeout_y: int = 0
|
| 78 |
-
away_timeout_width: int = 0
|
| 79 |
-
away_timeout_height: int = 0
|
| 80 |
-
|
| 81 |
-
# Generated paths
|
| 82 |
-
template_path: str = ""
|
| 83 |
-
config_path: str = ""
|
| 84 |
-
|
| 85 |
-
# Video identifier for output naming
|
| 86 |
-
video_basename: str = ""
|
| 87 |
-
|
| 88 |
-
|
| 89 |
# =============================================================================
|
| 90 |
# Utility Functions
|
| 91 |
# =============================================================================
|
|
@@ -154,67 +103,88 @@ def format_time(seconds: float) -> str:
|
|
| 154 |
|
| 155 |
|
| 156 |
# =============================================================================
|
| 157 |
-
# Save/Load Functions
|
| 158 |
# =============================================================================
|
| 159 |
|
| 160 |
|
| 161 |
-
|
| 162 |
-
def save_session_config(
|
| 163 |
-
config: SessionConfig,
|
| 164 |
-
output_dir: Path,
|
| 165 |
-
selected_frame: Tuple[float, np.ndarray] = None,
|
| 166 |
-
) -> Tuple[str, str]:
|
| 167 |
"""
|
| 168 |
-
Save session configuration
|
| 169 |
|
| 170 |
Args:
|
| 171 |
config: Session configuration to save.
|
| 172 |
output_dir: Output directory.
|
| 173 |
-
|
| 174 |
-
region selection. Used to generate template image.
|
| 175 |
|
| 176 |
Returns:
|
| 177 |
-
|
| 178 |
"""
|
| 179 |
-
output_dir.mkdir(parents=True, exist_ok=True)
|
| 180 |
-
basename = config.video_basename
|
| 181 |
-
|
| 182 |
-
# Save config JSON with video-specific name
|
| 183 |
config_path = output_dir / f"{basename}_config.json"
|
| 184 |
-
config_dict =
|
| 185 |
with open(config_path, "w", encoding="utf-8") as f:
|
| 186 |
json.dump(config_dict, f, indent=2)
|
| 187 |
logger.info("Config saved to: %s", config_path)
|
|
|
|
| 188 |
|
| 189 |
-
|
| 190 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
frame = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
# Fallback: extract from video at start_time
|
| 199 |
-
cap = cv2.VideoCapture(config.video_path)
|
| 200 |
-
if cap.isOpened():
|
| 201 |
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 202 |
-
frame_number = int(config.start_time * fps)
|
| 203 |
-
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
|
| 204 |
-
ret, frame = cap.read()
|
| 205 |
-
if not ret:
|
| 206 |
-
frame = None
|
| 207 |
-
cap.release()
|
| 208 |
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 218 |
playclock_config_path = output_dir / f"{basename}_playclock_config.json"
|
| 219 |
playclock_config = {
|
| 220 |
"x_offset": config.playclock_x_offset,
|
|
@@ -227,34 +197,101 @@ def save_session_config(
|
|
| 227 |
}
|
| 228 |
with open(playclock_config_path, "w", encoding="utf-8") as f:
|
| 229 |
json.dump(playclock_config, f, indent=2)
|
|
|
|
|
|
|
|
|
|
| 230 |
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 243 |
},
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
},
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
|
| 259 |
return str(config_path), str(template_path)
|
| 260 |
|
|
|
|
| 1 |
"""
|
| 2 |
Session configuration management for CFB40.
|
| 3 |
|
| 4 |
+
This module provides functions for managing session configuration,
|
| 5 |
including region selections, video paths, and project constants.
|
| 6 |
"""
|
| 7 |
|
| 8 |
import json
|
| 9 |
import logging
|
|
|
|
| 10 |
from pathlib import Path
|
| 11 |
+
from typing import Any, Optional, Tuple
|
| 12 |
|
| 13 |
import cv2
|
| 14 |
import numpy as np
|
| 15 |
|
| 16 |
+
from config.models import SessionConfig
|
| 17 |
+
|
| 18 |
logger = logging.getLogger(__name__)
|
| 19 |
|
| 20 |
# =============================================================================
|
|
|
|
| 35 |
MIN_PLAY_DURATION = 3.0 # seconds
|
| 36 |
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
# =============================================================================
|
| 39 |
# Utility Functions
|
| 40 |
# =============================================================================
|
|
|
|
| 103 |
|
| 104 |
|
| 105 |
# =============================================================================
|
| 106 |
+
# Save/Load Helper Functions
|
| 107 |
# =============================================================================
|
| 108 |
|
| 109 |
|
| 110 |
+
def _save_config_json(config: SessionConfig, output_dir: Path, basename: str) -> Path:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
"""
|
| 112 |
+
Save the main session configuration as JSON.
|
| 113 |
|
| 114 |
Args:
|
| 115 |
config: Session configuration to save.
|
| 116 |
output_dir: Output directory.
|
| 117 |
+
basename: Base name for the output file.
|
|
|
|
| 118 |
|
| 119 |
Returns:
|
| 120 |
+
Path to the saved config file.
|
| 121 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
config_path = output_dir / f"{basename}_config.json"
|
| 123 |
+
config_dict = config.model_dump()
|
| 124 |
with open(config_path, "w", encoding="utf-8") as f:
|
| 125 |
json.dump(config_dict, f, indent=2)
|
| 126 |
logger.info("Config saved to: %s", config_path)
|
| 127 |
+
return config_path
|
| 128 |
|
| 129 |
+
|
| 130 |
+
def _extract_frame_from_video(video_path: str, timestamp: float) -> np.ndarray[Any, Any] | None:
|
| 131 |
+
"""
|
| 132 |
+
Extract a single frame from a video at the given timestamp.
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
video_path: Path to the video file.
|
| 136 |
+
timestamp: Time in seconds to extract the frame.
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
The extracted frame as a numpy array, or None if extraction failed.
|
| 140 |
+
"""
|
| 141 |
+
cap = cv2.VideoCapture(video_path)
|
| 142 |
frame = None
|
| 143 |
+
if cap.isOpened():
|
| 144 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 145 |
+
frame_number = int(timestamp * fps)
|
| 146 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
|
| 147 |
+
ret, frame = cap.read()
|
| 148 |
+
if not ret:
|
| 149 |
+
frame = None
|
| 150 |
+
cap.release()
|
| 151 |
+
return frame
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def _save_template_image(config: SessionConfig, output_dir: Path, basename: str, frame: np.ndarray[Any, Any]) -> Path:
|
| 155 |
+
"""
|
| 156 |
+
Extract the scorebug region from a frame and save it as the template image.
|
| 157 |
|
| 158 |
+
Args:
|
| 159 |
+
config: Session configuration with scorebug coordinates.
|
| 160 |
+
output_dir: Output directory.
|
| 161 |
+
basename: Base name for the output file.
|
| 162 |
+
frame: The frame to extract the template from.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
|
| 164 |
+
Returns:
|
| 165 |
+
Path to the saved template image.
|
| 166 |
+
"""
|
| 167 |
+
template_path = output_dir / f"{basename}_template.png"
|
| 168 |
+
sb_x, sb_y = config.scorebug_x, config.scorebug_y
|
| 169 |
+
sb_w, sb_h = config.scorebug_width, config.scorebug_height
|
| 170 |
+
template = frame[sb_y : sb_y + sb_h, sb_x : sb_x + sb_w]
|
| 171 |
+
cv2.imwrite(str(template_path), template)
|
| 172 |
+
logger.info("Template saved to: %s", template_path)
|
| 173 |
+
return template_path
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def _save_playclock_config(config: SessionConfig, output_dir: Path, basename: str) -> Path:
|
| 177 |
+
"""
|
| 178 |
+
Save the play clock region configuration in the format expected by ReadPlayClock.
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
config: Session configuration with play clock coordinates.
|
| 182 |
+
output_dir: Output directory.
|
| 183 |
+
basename: Base name for the output file.
|
| 184 |
+
|
| 185 |
+
Returns:
|
| 186 |
+
Path to the saved playclock config file.
|
| 187 |
+
"""
|
| 188 |
playclock_config_path = output_dir / f"{basename}_playclock_config.json"
|
| 189 |
playclock_config = {
|
| 190 |
"x_offset": config.playclock_x_offset,
|
|
|
|
| 197 |
}
|
| 198 |
with open(playclock_config_path, "w", encoding="utf-8") as f:
|
| 199 |
json.dump(playclock_config, f, indent=2)
|
| 200 |
+
logger.info("Playclock config saved to: %s", playclock_config_path)
|
| 201 |
+
return playclock_config_path
|
| 202 |
+
|
| 203 |
|
| 204 |
+
def _save_timeout_config(config: SessionConfig, output_dir: Path, basename: str) -> Path | None:
|
| 205 |
+
"""
|
| 206 |
+
Save the timeout tracker configuration in the format expected by DetectTimeouts.
|
| 207 |
+
|
| 208 |
+
Only saves if both home and away timeout regions are configured.
|
| 209 |
+
|
| 210 |
+
Args:
|
| 211 |
+
config: Session configuration with timeout region coordinates.
|
| 212 |
+
output_dir: Output directory.
|
| 213 |
+
basename: Base name for the output file.
|
| 214 |
+
|
| 215 |
+
Returns:
|
| 216 |
+
Path to the saved timeout config file, or None if not configured.
|
| 217 |
+
"""
|
| 218 |
+
# Only save if timeout regions are configured
|
| 219 |
+
if config.home_timeout_width <= 0 or config.away_timeout_width <= 0:
|
| 220 |
+
return None
|
| 221 |
+
|
| 222 |
+
timeout_config_path = output_dir / f"{basename}_timeout_config.json"
|
| 223 |
+
timeout_config = {
|
| 224 |
+
"home_timeout_region": {
|
| 225 |
+
"team_name": "home",
|
| 226 |
+
"bbox": {
|
| 227 |
+
"x": config.home_timeout_x,
|
| 228 |
+
"y": config.home_timeout_y,
|
| 229 |
+
"width": config.home_timeout_width,
|
| 230 |
+
"height": config.home_timeout_height,
|
| 231 |
},
|
| 232 |
+
},
|
| 233 |
+
"away_timeout_region": {
|
| 234 |
+
"team_name": "away",
|
| 235 |
+
"bbox": {
|
| 236 |
+
"x": config.away_timeout_x,
|
| 237 |
+
"y": config.away_timeout_y,
|
| 238 |
+
"width": config.away_timeout_width,
|
| 239 |
+
"height": config.away_timeout_height,
|
| 240 |
},
|
| 241 |
+
},
|
| 242 |
+
"source_video": Path(config.video_path).name,
|
| 243 |
+
}
|
| 244 |
+
with open(timeout_config_path, "w", encoding="utf-8") as f:
|
| 245 |
+
json.dump(timeout_config, f, indent=2)
|
| 246 |
+
logger.info("Timeout tracker config saved to: %s", timeout_config_path)
|
| 247 |
+
return timeout_config_path
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
# =============================================================================
|
| 251 |
+
# Save/Load Functions
|
| 252 |
+
# =============================================================================
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def save_session_config(
|
| 256 |
+
config: SessionConfig,
|
| 257 |
+
output_dir: Path,
|
| 258 |
+
selected_frame: Tuple[float, np.ndarray[Any, Any]] | None = None,
|
| 259 |
+
) -> Tuple[str, str]:
|
| 260 |
+
"""
|
| 261 |
+
Save session configuration and generate template image.
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
config: Session configuration to save.
|
| 265 |
+
output_dir: Output directory.
|
| 266 |
+
selected_frame: The specific (timestamp, frame) tuple selected during
|
| 267 |
+
region selection. Used to generate template image.
|
| 268 |
+
|
| 269 |
+
Returns:
|
| 270 |
+
Tuple of (config_path, template_path).
|
| 271 |
+
"""
|
| 272 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 273 |
+
basename = config.video_basename
|
| 274 |
+
|
| 275 |
+
# Save the main session config JSON
|
| 276 |
+
config_path = _save_config_json(config, output_dir, basename)
|
| 277 |
+
|
| 278 |
+
# Get frame for template generation
|
| 279 |
+
frame: Optional[np.ndarray[Any, Any]] = None
|
| 280 |
+
if selected_frame is not None:
|
| 281 |
+
timestamp, frame = selected_frame
|
| 282 |
+
logger.info("Using selected frame @ %.1fs for template generation", timestamp)
|
| 283 |
+
else:
|
| 284 |
+
# Fallback: extract from video at start_time
|
| 285 |
+
frame = _extract_frame_from_video(config.video_path, config.start_time)
|
| 286 |
+
|
| 287 |
+
# Save template image from the frame
|
| 288 |
+
template_path = output_dir / f"{basename}_template.png"
|
| 289 |
+
if frame is not None:
|
| 290 |
+
template_path = _save_template_image(config, output_dir, basename, frame)
|
| 291 |
+
|
| 292 |
+
# Save auxiliary config files
|
| 293 |
+
_save_playclock_config(config, output_dir, basename)
|
| 294 |
+
_save_timeout_config(config, output_dir, basename)
|
| 295 |
|
| 296 |
return str(config_path), str(template_path)
|
| 297 |
|
src/detection/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Detection modules for per-frame analysis during video scanning.
|
| 2 |
+
|
| 3 |
+
This package contains components that process individual video frames to detect:
|
| 4 |
+
- Scorebug presence and location
|
| 5 |
+
- Timeout indicator states
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from .models import (
|
| 9 |
+
ScorebugDetection,
|
| 10 |
+
TimeoutRegionConfig,
|
| 11 |
+
TimeoutReading,
|
| 12 |
+
)
|
| 13 |
+
from .scorebug import DetectScoreBug, create_template_from_frame
|
| 14 |
+
from .timeouts import DetectTimeouts
|
| 15 |
+
|
| 16 |
+
__all__ = [
|
| 17 |
+
# Models
|
| 18 |
+
"ScorebugDetection",
|
| 19 |
+
"TimeoutRegionConfig",
|
| 20 |
+
"TimeoutReading",
|
| 21 |
+
# Scorebug detection
|
| 22 |
+
"DetectScoreBug",
|
| 23 |
+
"create_template_from_frame",
|
| 24 |
+
# Timeout tracking
|
| 25 |
+
"DetectTimeouts",
|
| 26 |
+
]
|
src/detection/models.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Pydantic models for per-frame detection.
|
| 3 |
+
|
| 4 |
+
These models represent the results of detecting various elements in video frames:
|
| 5 |
+
scorebug presence and timeout indicators.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import Any, Optional, Tuple, List
|
| 9 |
+
|
| 10 |
+
from pydantic import BaseModel, Field
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ScorebugDetection(BaseModel):
|
| 14 |
+
"""Results from scorebug detection."""
|
| 15 |
+
|
| 16 |
+
detected: bool = Field(..., description="Whether scorebug was detected")
|
| 17 |
+
confidence: float = Field(..., description="Confidence score (0.0 to 1.0)")
|
| 18 |
+
bbox: Optional[Tuple[int, int, int, int]] = Field(None, description="Bounding box (x, y, width, height)")
|
| 19 |
+
method: str = Field("unknown", description="Detection method used")
|
| 20 |
+
left_confidence: Optional[float] = Field(None, description="Left half confidence (when split detection enabled)")
|
| 21 |
+
right_confidence: Optional[float] = Field(None, description="Right half confidence (when split detection enabled)")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class TimeoutRegionConfig(BaseModel):
|
| 25 |
+
"""Configuration for a team's timeout indicator region."""
|
| 26 |
+
|
| 27 |
+
team_name: str = Field(..., description="'home' or 'away'")
|
| 28 |
+
bbox: Tuple[int, int, int, int] = Field(..., description="x, y, width, height for the 3-oval group")
|
| 29 |
+
|
| 30 |
+
def to_dict(self) -> dict[str, object]:
|
| 31 |
+
"""Convert to dictionary for JSON serialization."""
|
| 32 |
+
return {
|
| 33 |
+
"team_name": self.team_name,
|
| 34 |
+
"bbox": {"x": self.bbox[0], "y": self.bbox[1], "width": self.bbox[2], "height": self.bbox[3]},
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
@classmethod
|
| 38 |
+
def from_dict(cls, data: dict[str, Any]) -> "TimeoutRegionConfig":
|
| 39 |
+
"""Create from dictionary."""
|
| 40 |
+
bbox = (data["bbox"]["x"], data["bbox"]["y"], data["bbox"]["width"], data["bbox"]["height"])
|
| 41 |
+
return cls(team_name=data["team_name"], bbox=bbox)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class TimeoutReading(BaseModel):
|
| 45 |
+
"""Results from timeout indicator reading."""
|
| 46 |
+
|
| 47 |
+
home_timeouts: int = Field(..., description="0-3 timeouts remaining")
|
| 48 |
+
away_timeouts: int = Field(..., description="0-3 timeouts remaining")
|
| 49 |
+
confidence: float = Field(..., description="Overall confidence in reading")
|
| 50 |
+
home_oval_states: Optional[List[bool]] = Field(None, description="True = white (available), False = dark (used)")
|
| 51 |
+
away_oval_states: Optional[List[bool]] = Field(None, description="True = white (available), False = dark (used)")
|
src/{detectors/scorebug_detector.py → detection/scorebug.py}
RENAMED
|
@@ -8,7 +8,7 @@ This module provides functions to detect the presence and location of the scoreb
|
|
| 8 |
import json
|
| 9 |
import logging
|
| 10 |
from pathlib import Path
|
| 11 |
-
from typing import Optional, Tuple
|
| 12 |
|
| 13 |
import cv2
|
| 14 |
import numpy as np
|
|
@@ -18,7 +18,7 @@ from .models import ScorebugDetection
|
|
| 18 |
logger = logging.getLogger(__name__)
|
| 19 |
|
| 20 |
|
| 21 |
-
class
|
| 22 |
"""
|
| 23 |
Detects the scorebug in video frames.
|
| 24 |
|
|
@@ -55,15 +55,15 @@ class ScorebugDetector:
|
|
| 55 |
fixed_region_config_path: Path to JSON config with fixed region (alternative to fixed_region)
|
| 56 |
use_split_detection: Enable split-half detection for robustness to partial overlays (default: True)
|
| 57 |
"""
|
| 58 |
-
self.template = None
|
| 59 |
self.template_path = template_path
|
| 60 |
self.fixed_region = fixed_region
|
| 61 |
self._use_fixed_region = fixed_region is not None
|
| 62 |
self.use_split_detection = use_split_detection
|
| 63 |
|
| 64 |
# Pre-computed template halves for split detection (populated when template is loaded)
|
| 65 |
-
self._template_left = None
|
| 66 |
-
self._template_right = None
|
| 67 |
|
| 68 |
if template_path:
|
| 69 |
self.load_template(template_path)
|
|
@@ -74,10 +74,15 @@ class ScorebugDetector:
|
|
| 74 |
|
| 75 |
mode = "fixed_region" if self._use_fixed_region else "full_search"
|
| 76 |
split_mode = "split_detection" if use_split_detection else "full_only"
|
| 77 |
-
logger.info("
|
| 78 |
if self._use_fixed_region:
|
| 79 |
logger.info(" Fixed region: %s", self.fixed_region)
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
def _load_fixed_region_config(self, config_path: str) -> None:
|
| 82 |
"""Load fixed region from a JSON config file."""
|
| 83 |
path = Path(config_path)
|
|
@@ -121,7 +126,7 @@ class ScorebugDetector:
|
|
| 121 |
self._template_right.shape[0],
|
| 122 |
)
|
| 123 |
|
| 124 |
-
def detect(self, frame: np.ndarray) -> ScorebugDetection:
|
| 125 |
"""
|
| 126 |
Detect scorebug in a frame.
|
| 127 |
|
|
@@ -162,7 +167,7 @@ class ScorebugDetector:
|
|
| 162 |
return detection
|
| 163 |
|
| 164 |
# pylint: disable=too-many-locals
|
| 165 |
-
def _detect_in_fixed_region(self, frame: np.ndarray) -> ScorebugDetection:
|
| 166 |
"""
|
| 167 |
Detect scorebug by checking only the fixed known location.
|
| 168 |
|
|
@@ -180,6 +185,10 @@ class ScorebugDetector:
|
|
| 180 |
Returns:
|
| 181 |
Detection result
|
| 182 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
x, y, _, _ = self.fixed_region
|
| 184 |
th, tw = self.template.shape[:2]
|
| 185 |
|
|
@@ -239,7 +248,7 @@ class ScorebugDetector:
|
|
| 239 |
return ScorebugDetection(detected=False, confidence=full_confidence, bbox=(x, y, tw, th), method="fixed_region")
|
| 240 |
|
| 241 |
# pylint: disable=too-many-locals
|
| 242 |
-
def _detect_by_template_fullsearch(self, frame: np.ndarray) -> ScorebugDetection:
|
| 243 |
"""
|
| 244 |
Detect scorebug using full-frame template matching.
|
| 245 |
|
|
@@ -346,7 +355,7 @@ class ScorebugDetector:
|
|
| 346 |
|
| 347 |
logger.info("Saved fixed region config to: %s", config_path)
|
| 348 |
|
| 349 |
-
def discover_and_lock_region(self, frame: np.ndarray) -> bool:
|
| 350 |
"""
|
| 351 |
Discover scorebug location using full search, then lock to fixed region mode.
|
| 352 |
|
|
@@ -371,7 +380,7 @@ class ScorebugDetector:
|
|
| 371 |
self._use_fixed_region = old_use_fixed
|
| 372 |
return False
|
| 373 |
|
| 374 |
-
def visualize_detection(self, frame: np.ndarray, detection: ScorebugDetection) -> np.ndarray:
|
| 375 |
"""
|
| 376 |
Draw detection results on frame for visualization.
|
| 377 |
|
|
@@ -398,7 +407,7 @@ class ScorebugDetector:
|
|
| 398 |
return vis_frame
|
| 399 |
|
| 400 |
|
| 401 |
-
def create_template_from_frame(frame: np.ndarray, bbox: Tuple[int, int, int, int], output_path: str) -> None:
|
| 402 |
"""
|
| 403 |
Extract a region from a frame to use as a template.
|
| 404 |
|
|
|
|
| 8 |
import json
|
| 9 |
import logging
|
| 10 |
from pathlib import Path
|
| 11 |
+
from typing import Any, Optional, Tuple
|
| 12 |
|
| 13 |
import cv2
|
| 14 |
import numpy as np
|
|
|
|
| 18 |
logger = logging.getLogger(__name__)
|
| 19 |
|
| 20 |
|
| 21 |
+
class DetectScoreBug:
|
| 22 |
"""
|
| 23 |
Detects the scorebug in video frames.
|
| 24 |
|
|
|
|
| 55 |
fixed_region_config_path: Path to JSON config with fixed region (alternative to fixed_region)
|
| 56 |
use_split_detection: Enable split-half detection for robustness to partial overlays (default: True)
|
| 57 |
"""
|
| 58 |
+
self.template: Optional[np.ndarray[Any, Any]] = None
|
| 59 |
self.template_path = template_path
|
| 60 |
self.fixed_region = fixed_region
|
| 61 |
self._use_fixed_region = fixed_region is not None
|
| 62 |
self.use_split_detection = use_split_detection
|
| 63 |
|
| 64 |
# Pre-computed template halves for split detection (populated when template is loaded)
|
| 65 |
+
self._template_left: Optional[np.ndarray[Any, Any]] = None
|
| 66 |
+
self._template_right: Optional[np.ndarray[Any, Any]] = None
|
| 67 |
|
| 68 |
if template_path:
|
| 69 |
self.load_template(template_path)
|
|
|
|
| 74 |
|
| 75 |
mode = "fixed_region" if self._use_fixed_region else "full_search"
|
| 76 |
split_mode = "split_detection" if use_split_detection else "full_only"
|
| 77 |
+
logger.info("DetectScoreBug initialized (template: %s, mode: %s, split: %s)", template_path is not None, mode, split_mode)
|
| 78 |
if self._use_fixed_region:
|
| 79 |
logger.info(" Fixed region: %s", self.fixed_region)
|
| 80 |
|
| 81 |
+
@property
|
| 82 |
+
def is_fixed_region_mode(self) -> bool:
|
| 83 |
+
"""Check if detector is using fixed region mode for faster detection."""
|
| 84 |
+
return self._use_fixed_region
|
| 85 |
+
|
| 86 |
def _load_fixed_region_config(self, config_path: str) -> None:
|
| 87 |
"""Load fixed region from a JSON config file."""
|
| 88 |
path = Path(config_path)
|
|
|
|
| 126 |
self._template_right.shape[0],
|
| 127 |
)
|
| 128 |
|
| 129 |
+
def detect(self, frame: np.ndarray[Any, Any]) -> ScorebugDetection:
|
| 130 |
"""
|
| 131 |
Detect scorebug in a frame.
|
| 132 |
|
|
|
|
| 167 |
return detection
|
| 168 |
|
| 169 |
# pylint: disable=too-many-locals
|
| 170 |
+
def _detect_in_fixed_region(self, frame: np.ndarray[Any, Any]) -> ScorebugDetection:
|
| 171 |
"""
|
| 172 |
Detect scorebug by checking only the fixed known location.
|
| 173 |
|
|
|
|
| 185 |
Returns:
|
| 186 |
Detection result
|
| 187 |
"""
|
| 188 |
+
# Asserts: this method should only be called when fixed_region and template are set
|
| 189 |
+
assert self.fixed_region is not None
|
| 190 |
+
assert self.template is not None
|
| 191 |
+
|
| 192 |
x, y, _, _ = self.fixed_region
|
| 193 |
th, tw = self.template.shape[:2]
|
| 194 |
|
|
|
|
| 248 |
return ScorebugDetection(detected=False, confidence=full_confidence, bbox=(x, y, tw, th), method="fixed_region")
|
| 249 |
|
| 250 |
# pylint: disable=too-many-locals
|
| 251 |
+
def _detect_by_template_fullsearch(self, frame: np.ndarray[Any, Any]) -> ScorebugDetection:
|
| 252 |
"""
|
| 253 |
Detect scorebug using full-frame template matching.
|
| 254 |
|
|
|
|
| 355 |
|
| 356 |
logger.info("Saved fixed region config to: %s", config_path)
|
| 357 |
|
| 358 |
+
def discover_and_lock_region(self, frame: np.ndarray[Any, Any]) -> bool:
|
| 359 |
"""
|
| 360 |
Discover scorebug location using full search, then lock to fixed region mode.
|
| 361 |
|
|
|
|
| 380 |
self._use_fixed_region = old_use_fixed
|
| 381 |
return False
|
| 382 |
|
| 383 |
+
def visualize_detection(self, frame: np.ndarray[Any, Any], detection: ScorebugDetection) -> np.ndarray[Any, Any]:
|
| 384 |
"""
|
| 385 |
Draw detection results on frame for visualization.
|
| 386 |
|
|
|
|
| 407 |
return vis_frame
|
| 408 |
|
| 409 |
|
| 410 |
+
def create_template_from_frame(frame: np.ndarray[Any, Any], bbox: Tuple[int, int, int, int], output_path: str) -> None:
|
| 411 |
"""
|
| 412 |
Extract a region from a frame to use as a template.
|
| 413 |
|
src/{detectors/timeout_tracker.py → detection/timeouts.py}
RENAMED
|
@@ -9,7 +9,7 @@ Detecting when an oval changes from white to dark indicates a timeout was called
|
|
| 9 |
import json
|
| 10 |
import logging
|
| 11 |
from pathlib import Path
|
| 12 |
-
from typing import Optional, Tuple, List
|
| 13 |
|
| 14 |
import cv2
|
| 15 |
import numpy as np
|
|
@@ -19,7 +19,7 @@ from .models import TimeoutRegionConfig, TimeoutReading
|
|
| 19 |
logger = logging.getLogger(__name__)
|
| 20 |
|
| 21 |
|
| 22 |
-
class
|
| 23 |
"""
|
| 24 |
Tracks timeout indicators on the scorebug.
|
| 25 |
|
|
@@ -67,11 +67,11 @@ class TimeoutTracker:
|
|
| 67 |
self._load_config(config_path)
|
| 68 |
|
| 69 |
if self._configured:
|
| 70 |
-
logger.info("
|
| 71 |
logger.info(" Home region: %s", self.home_region.bbox if self.home_region else None)
|
| 72 |
logger.info(" Away region: %s", self.away_region.bbox if self.away_region else None)
|
| 73 |
else:
|
| 74 |
-
logger.info("
|
| 75 |
|
| 76 |
def _load_config(self, config_path: str) -> None:
|
| 77 |
"""Load timeout regions from a JSON config file."""
|
|
@@ -128,7 +128,7 @@ class TimeoutTracker:
|
|
| 128 |
self._configured = True
|
| 129 |
logger.info("Timeout regions set: home=%s, away=%s", home_region.bbox, away_region.bbox)
|
| 130 |
|
| 131 |
-
def _extract_oval_bright_ratios(self, frame: np.ndarray, region: TimeoutRegionConfig) -> List[float]:
|
| 132 |
"""
|
| 133 |
Extract the ratio of bright pixels for each oval in a region.
|
| 134 |
|
|
@@ -194,7 +194,7 @@ class TimeoutTracker:
|
|
| 194 |
"""Count how many timeouts are available (white ovals)."""
|
| 195 |
return sum(1 for state in oval_states if state)
|
| 196 |
|
| 197 |
-
def read_timeouts(self, frame: np.ndarray) -> TimeoutReading:
|
| 198 |
"""
|
| 199 |
Read the current timeout count for each team.
|
| 200 |
|
|
@@ -208,6 +208,10 @@ class TimeoutTracker:
|
|
| 208 |
logger.warning("Timeout tracker not configured")
|
| 209 |
return TimeoutReading(home_timeouts=3, away_timeouts=3, confidence=0.0)
|
| 210 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 211 |
# Read home team timeouts using bright pixel ratio
|
| 212 |
home_bright_ratios = self._extract_oval_bright_ratios(frame, self.home_region)
|
| 213 |
home_states = self._classify_ovals(home_bright_ratios)
|
|
@@ -291,7 +295,7 @@ class TimeoutTracker:
|
|
| 291 |
self._prev_reading = curr_reading
|
| 292 |
return result
|
| 293 |
|
| 294 |
-
def update(self, frame: np.ndarray) -> Tuple[TimeoutReading, Optional[str]]:
|
| 295 |
"""
|
| 296 |
Read timeouts and detect any change in one call.
|
| 297 |
|
|
@@ -310,7 +314,7 @@ class TimeoutTracker:
|
|
| 310 |
self._prev_reading = None
|
| 311 |
logger.debug("Timeout tracking reset")
|
| 312 |
|
| 313 |
-
def visualize(self, frame: np.ndarray, reading: Optional[TimeoutReading] = None) -> np.ndarray:
|
| 314 |
"""
|
| 315 |
Draw timeout regions and states on frame for visualization.
|
| 316 |
|
|
|
|
| 9 |
import json
|
| 10 |
import logging
|
| 11 |
from pathlib import Path
|
| 12 |
+
from typing import Any, Optional, Tuple, List
|
| 13 |
|
| 14 |
import cv2
|
| 15 |
import numpy as np
|
|
|
|
| 19 |
logger = logging.getLogger(__name__)
|
| 20 |
|
| 21 |
|
| 22 |
+
class DetectTimeouts:
|
| 23 |
"""
|
| 24 |
Tracks timeout indicators on the scorebug.
|
| 25 |
|
|
|
|
| 67 |
self._load_config(config_path)
|
| 68 |
|
| 69 |
if self._configured:
|
| 70 |
+
logger.info("DetectTimeouts initialized with regions")
|
| 71 |
logger.info(" Home region: %s", self.home_region.bbox if self.home_region else None)
|
| 72 |
logger.info(" Away region: %s", self.away_region.bbox if self.away_region else None)
|
| 73 |
else:
|
| 74 |
+
logger.info("DetectTimeouts initialized (not configured - call configure_regions first)")
|
| 75 |
|
| 76 |
def _load_config(self, config_path: str) -> None:
|
| 77 |
"""Load timeout regions from a JSON config file."""
|
|
|
|
| 128 |
self._configured = True
|
| 129 |
logger.info("Timeout regions set: home=%s, away=%s", home_region.bbox, away_region.bbox)
|
| 130 |
|
| 131 |
+
def _extract_oval_bright_ratios(self, frame: np.ndarray[Any, Any], region: TimeoutRegionConfig) -> List[float]:
|
| 132 |
"""
|
| 133 |
Extract the ratio of bright pixels for each oval in a region.
|
| 134 |
|
|
|
|
| 194 |
"""Count how many timeouts are available (white ovals)."""
|
| 195 |
return sum(1 for state in oval_states if state)
|
| 196 |
|
| 197 |
+
def read_timeouts(self, frame: np.ndarray[Any, Any]) -> TimeoutReading:
|
| 198 |
"""
|
| 199 |
Read the current timeout count for each team.
|
| 200 |
|
|
|
|
| 208 |
logger.warning("Timeout tracker not configured")
|
| 209 |
return TimeoutReading(home_timeouts=3, away_timeouts=3, confidence=0.0)
|
| 210 |
|
| 211 |
+
# Asserts: _configured guarantees regions are set
|
| 212 |
+
assert self.home_region is not None
|
| 213 |
+
assert self.away_region is not None
|
| 214 |
+
|
| 215 |
# Read home team timeouts using bright pixel ratio
|
| 216 |
home_bright_ratios = self._extract_oval_bright_ratios(frame, self.home_region)
|
| 217 |
home_states = self._classify_ovals(home_bright_ratios)
|
|
|
|
| 295 |
self._prev_reading = curr_reading
|
| 296 |
return result
|
| 297 |
|
| 298 |
+
def update(self, frame: np.ndarray[Any, Any]) -> Tuple[TimeoutReading, Optional[str]]:
|
| 299 |
"""
|
| 300 |
Read timeouts and detect any change in one call.
|
| 301 |
|
|
|
|
| 314 |
self._prev_reading = None
|
| 315 |
logger.debug("Timeout tracking reset")
|
| 316 |
|
| 317 |
+
def visualize(self, frame: np.ndarray[Any, Any], reading: Optional[TimeoutReading] = None) -> np.ndarray[Any, Any]:
|
| 318 |
"""
|
| 319 |
Draw timeout regions and states on frame for visualization.
|
| 320 |
|
src/detectors/__init__.py
DELETED
|
@@ -1,55 +0,0 @@
|
|
| 1 |
-
"""Detector modules for identifying game elements."""
|
| 2 |
-
|
| 3 |
-
# Models (dataclasses)
|
| 4 |
-
from .models import (
|
| 5 |
-
ScorebugDetection,
|
| 6 |
-
PlayClockReading,
|
| 7 |
-
PlayClockRegionConfig,
|
| 8 |
-
TimeoutRegionConfig,
|
| 9 |
-
TimeoutReading,
|
| 10 |
-
PlayEvent,
|
| 11 |
-
DigitSample,
|
| 12 |
-
DigitTemplate,
|
| 13 |
-
TemplateMatchResult,
|
| 14 |
-
TemplatePlayClockReading,
|
| 15 |
-
)
|
| 16 |
-
|
| 17 |
-
# Detector classes
|
| 18 |
-
from .scorebug_detector import ScorebugDetector, create_template_from_frame
|
| 19 |
-
from .play_clock_reader import PlayClockReader
|
| 20 |
-
from .play_state_machine import PlayStateMachine, PlayState
|
| 21 |
-
from .timeout_tracker import TimeoutTracker
|
| 22 |
-
from .digit_template_reader import (
|
| 23 |
-
DigitTemplateLibrary,
|
| 24 |
-
DigitTemplateBuilder,
|
| 25 |
-
TemplatePlayClockReader,
|
| 26 |
-
normalize_to_grayscale,
|
| 27 |
-
detect_red_digits,
|
| 28 |
-
)
|
| 29 |
-
|
| 30 |
-
__all__ = [
|
| 31 |
-
# Models
|
| 32 |
-
"ScorebugDetection",
|
| 33 |
-
"PlayClockReading",
|
| 34 |
-
"PlayClockRegionConfig",
|
| 35 |
-
"TimeoutRegionConfig",
|
| 36 |
-
"TimeoutReading",
|
| 37 |
-
"PlayEvent",
|
| 38 |
-
"DigitSample",
|
| 39 |
-
"DigitTemplate",
|
| 40 |
-
"TemplateMatchResult",
|
| 41 |
-
"TemplatePlayClockReading",
|
| 42 |
-
# Detectors
|
| 43 |
-
"ScorebugDetector",
|
| 44 |
-
"create_template_from_frame",
|
| 45 |
-
"PlayClockReader",
|
| 46 |
-
"PlayStateMachine",
|
| 47 |
-
"PlayState",
|
| 48 |
-
"TimeoutTracker",
|
| 49 |
-
# Template-based play clock reader
|
| 50 |
-
"DigitTemplateLibrary",
|
| 51 |
-
"DigitTemplateBuilder",
|
| 52 |
-
"TemplatePlayClockReader",
|
| 53 |
-
"normalize_to_grayscale",
|
| 54 |
-
"detect_red_digits",
|
| 55 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/detectors/digit_template_reader.py
DELETED
|
@@ -1,1113 +0,0 @@
|
|
| 1 |
-
# pylint: disable=too-many-lines
|
| 2 |
-
"""
|
| 3 |
-
Template-based play clock reader using digit template matching.
|
| 4 |
-
|
| 5 |
-
This module provides fast play clock reading using template matching instead of OCR.
|
| 6 |
-
Templates are built from OCR-labeled samples during an initial collection phase,
|
| 7 |
-
then used for lightning-fast digit recognition on all subsequent frames.
|
| 8 |
-
|
| 9 |
-
Performance comparison (from ocr_benchmark.md):
|
| 10 |
-
- EasyOCR: 48.9ms/frame
|
| 11 |
-
- Template Matching: 0.3ms/frame (163x faster)
|
| 12 |
-
|
| 13 |
-
The play clock displays values 0-40 and turns RED when <= 5 seconds remain.
|
| 14 |
-
A color normalization step converts red digits to white-like appearance.
|
| 15 |
-
|
| 16 |
-
Implements dual-mode matching to handle both display layouts:
|
| 17 |
-
- Single-digit (0-9): Digit is CENTER-aligned (10 templates: ones_center)
|
| 18 |
-
- Double-digit (10-40): Tens on LEFT, ones on RIGHT (14 templates: ones_right + tens)
|
| 19 |
-
- Plus 1 blank template for detecting empty tens position
|
| 20 |
-
Total: 25 templates needed for full coverage.
|
| 21 |
-
"""
|
| 22 |
-
|
| 23 |
-
import json
|
| 24 |
-
import logging
|
| 25 |
-
from pathlib import Path
|
| 26 |
-
from typing import Dict, List, Optional, Tuple
|
| 27 |
-
|
| 28 |
-
import cv2
|
| 29 |
-
import numpy as np
|
| 30 |
-
|
| 31 |
-
from .models import DigitSample, DigitTemplate, TemplateMatchResult, TemplatePlayClockReading
|
| 32 |
-
|
| 33 |
-
logger = logging.getLogger(__name__)
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
def detect_red_digits(region: np.ndarray) -> bool:
|
| 37 |
-
"""
|
| 38 |
-
Detect if the play clock digits are red.
|
| 39 |
-
|
| 40 |
-
Red digits appear when the play clock has 5 seconds or less remaining.
|
| 41 |
-
Red digits have high red channel values with very low green and blue.
|
| 42 |
-
|
| 43 |
-
Args:
|
| 44 |
-
region: Play clock region (BGR format)
|
| 45 |
-
|
| 46 |
-
Returns:
|
| 47 |
-
True if red digits detected, False otherwise
|
| 48 |
-
"""
|
| 49 |
-
# Split into BGR channels
|
| 50 |
-
b, g, r = cv2.split(region)
|
| 51 |
-
|
| 52 |
-
# Calculate mean values for each channel
|
| 53 |
-
r_mean = np.mean(r)
|
| 54 |
-
g_mean = np.mean(g)
|
| 55 |
-
b_mean = np.mean(b)
|
| 56 |
-
|
| 57 |
-
# Red digits: high red channel, very low green/blue, red > 2x green
|
| 58 |
-
# pylint: disable=chained-comparison
|
| 59 |
-
is_red = r_mean > 15 and max(g_mean, b_mean) < 15 and r_mean > (g_mean * 2)
|
| 60 |
-
|
| 61 |
-
if is_red:
|
| 62 |
-
logger.debug("Red digits detected: R=%.1f, G=%.1f, B=%.1f", r_mean, g_mean, b_mean)
|
| 63 |
-
|
| 64 |
-
return is_red
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
def normalize_to_grayscale(region: np.ndarray) -> np.ndarray:
|
| 68 |
-
"""
|
| 69 |
-
Normalize a play clock region to grayscale, handling both red and white digits.
|
| 70 |
-
|
| 71 |
-
Red digits (displayed when clock <= 5) are converted to white-like grayscale
|
| 72 |
-
by extracting the red channel. White digits use standard grayscale conversion.
|
| 73 |
-
This allows a single set of templates to match both color variants.
|
| 74 |
-
|
| 75 |
-
Args:
|
| 76 |
-
region: Play clock region (BGR format)
|
| 77 |
-
|
| 78 |
-
Returns:
|
| 79 |
-
Grayscale image where digits appear as bright pixels on dark background
|
| 80 |
-
"""
|
| 81 |
-
is_red = detect_red_digits(region)
|
| 82 |
-
|
| 83 |
-
if is_red:
|
| 84 |
-
# For red digits, use the red channel directly as grayscale
|
| 85 |
-
# This converts red digits to white-like appearance
|
| 86 |
-
_, _, r = cv2.split(region)
|
| 87 |
-
return r
|
| 88 |
-
|
| 89 |
-
# Standard grayscale conversion for white digits
|
| 90 |
-
return cv2.cvtColor(region, cv2.COLOR_BGR2GRAY)
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
class DigitTemplateLibrary:
|
| 94 |
-
"""
|
| 95 |
-
Stores and manages digit templates for play clock reading.
|
| 96 |
-
|
| 97 |
-
Uses color normalization to handle both red and white digits with a single
|
| 98 |
-
template set. Now supports position-aware templates to handle both single-digit
|
| 99 |
-
(centered) and double-digit (left/right split) layouts:
|
| 100 |
-
- Ones digits (center): 0-9 from single-digit displays (10 templates)
|
| 101 |
-
- Ones digits (right): 0-9 from double-digit displays (10 templates)
|
| 102 |
-
- Tens digits (left): 1, 2, 3, 4 from double-digit displays (4 templates)
|
| 103 |
-
- Blank (left): Empty tens position from single-digit displays (1 template)
|
| 104 |
-
Total: 25 templates needed for full coverage
|
| 105 |
-
"""
|
| 106 |
-
|
| 107 |
-
# Template coverage requirements
|
| 108 |
-
ONES_DIGITS = list(range(10)) # 0-9
|
| 109 |
-
TENS_DIGITS = [-1, 1, 2, 3, 4] # -1 = blank, 1-4 for 10-40
|
| 110 |
-
POSITIONS = ["left", "center", "right"]
|
| 111 |
-
|
| 112 |
-
def __init__(self):
|
| 113 |
-
"""Initialize empty template library."""
|
| 114 |
-
# Templates: {(is_tens, digit_value, position): DigitTemplate}
|
| 115 |
-
self.templates: Dict[Tuple[bool, int, str], DigitTemplate] = {}
|
| 116 |
-
logger.info("DigitTemplateLibrary initialized (empty)")
|
| 117 |
-
|
| 118 |
-
def add_template(self, template: DigitTemplate) -> None:
|
| 119 |
-
"""
|
| 120 |
-
Add a template to the library.
|
| 121 |
-
|
| 122 |
-
Args:
|
| 123 |
-
template: DigitTemplate to add
|
| 124 |
-
"""
|
| 125 |
-
key = (template.is_tens_digit, template.digit_value, template.position)
|
| 126 |
-
self.templates[key] = template
|
| 127 |
-
digit_display = "blank" if template.digit_value == -1 else str(template.digit_value)
|
| 128 |
-
logger.debug(
|
| 129 |
-
"Added template: tens=%s, digit=%s, position=%s",
|
| 130 |
-
template.is_tens_digit,
|
| 131 |
-
digit_display,
|
| 132 |
-
template.position,
|
| 133 |
-
)
|
| 134 |
-
|
| 135 |
-
def get_template(self, is_tens: bool, digit_value: int, position: str) -> Optional[DigitTemplate]:
|
| 136 |
-
"""
|
| 137 |
-
Get a template from the library.
|
| 138 |
-
|
| 139 |
-
Args:
|
| 140 |
-
is_tens: Whether this is a tens digit
|
| 141 |
-
digit_value: The digit value (-1 for blank, 0-9 for digits)
|
| 142 |
-
position: Template position ("left", "center", or "right")
|
| 143 |
-
|
| 144 |
-
Returns:
|
| 145 |
-
DigitTemplate if found, None otherwise
|
| 146 |
-
"""
|
| 147 |
-
key = (is_tens, digit_value, position)
|
| 148 |
-
return self.templates.get(key)
|
| 149 |
-
|
| 150 |
-
def get_all_templates(self, is_tens: bool, position: Optional[str] = None) -> List[DigitTemplate]:
|
| 151 |
-
"""
|
| 152 |
-
Get all templates for a specific digit position.
|
| 153 |
-
|
| 154 |
-
Args:
|
| 155 |
-
is_tens: Whether to get tens digit templates
|
| 156 |
-
position: Optional position filter ("left", "center", or "right")
|
| 157 |
-
|
| 158 |
-
Returns:
|
| 159 |
-
List of matching DigitTemplate objects
|
| 160 |
-
"""
|
| 161 |
-
templates = []
|
| 162 |
-
for (tens, _, pos), template in self.templates.items():
|
| 163 |
-
if tens == is_tens:
|
| 164 |
-
if position is None or pos == position:
|
| 165 |
-
templates.append(template)
|
| 166 |
-
return templates
|
| 167 |
-
|
| 168 |
-
def get_coverage_status(self) -> Dict[str, any]:
|
| 169 |
-
"""
|
| 170 |
-
Get the current template coverage status.
|
| 171 |
-
|
| 172 |
-
Returns:
|
| 173 |
-
Dictionary with coverage information
|
| 174 |
-
"""
|
| 175 |
-
ones_center_have = set()
|
| 176 |
-
ones_right_have = set()
|
| 177 |
-
tens_have = set()
|
| 178 |
-
has_blank = False
|
| 179 |
-
|
| 180 |
-
for (is_tens, digit, position), _ in self.templates.items():
|
| 181 |
-
if is_tens:
|
| 182 |
-
if digit == -1:
|
| 183 |
-
has_blank = True
|
| 184 |
-
else:
|
| 185 |
-
tens_have.add(digit)
|
| 186 |
-
else:
|
| 187 |
-
if position == "center":
|
| 188 |
-
ones_center_have.add(digit)
|
| 189 |
-
elif position == "right":
|
| 190 |
-
ones_right_have.add(digit)
|
| 191 |
-
|
| 192 |
-
ones_center_missing = set(self.ONES_DIGITS) - ones_center_have
|
| 193 |
-
ones_right_missing = set(self.ONES_DIGITS) - ones_right_have
|
| 194 |
-
tens_missing = set([1, 2, 3, 4]) - tens_have
|
| 195 |
-
|
| 196 |
-
# Calculate totals
|
| 197 |
-
# ones_center (10) + ones_right (10) + tens (4) + blank (1) = 25
|
| 198 |
-
total_needed = 25
|
| 199 |
-
total_have = len(self.templates)
|
| 200 |
-
|
| 201 |
-
# Convert -1 to "blank" for display
|
| 202 |
-
def format_tens(digits):
|
| 203 |
-
return sorted(["blank" if d == -1 else d for d in digits], key=lambda x: (isinstance(x, str), x))
|
| 204 |
-
|
| 205 |
-
return {
|
| 206 |
-
"total_needed": total_needed,
|
| 207 |
-
"total_have": total_have,
|
| 208 |
-
"is_complete": total_have >= total_needed,
|
| 209 |
-
"ones_center_have": sorted(ones_center_have),
|
| 210 |
-
"ones_center_missing": sorted(ones_center_missing),
|
| 211 |
-
"ones_right_have": sorted(ones_right_have),
|
| 212 |
-
"ones_right_missing": sorted(ones_right_missing),
|
| 213 |
-
"tens_have": sorted(tens_have),
|
| 214 |
-
"tens_missing": sorted(tens_missing),
|
| 215 |
-
"has_blank": has_blank,
|
| 216 |
-
# Legacy fields for backward compatibility
|
| 217 |
-
"ones_have": sorted(ones_center_have | ones_right_have),
|
| 218 |
-
"ones_missing": sorted(ones_center_missing & ones_right_missing),
|
| 219 |
-
"tens_have_formatted": format_tens(tens_have | ({-1} if has_blank else set())),
|
| 220 |
-
"tens_missing_formatted": format_tens(tens_missing | (set() if has_blank else {-1})),
|
| 221 |
-
}
|
| 222 |
-
|
| 223 |
-
def is_complete(self) -> bool:
|
| 224 |
-
"""Check if all required templates are present."""
|
| 225 |
-
return self.get_coverage_status()["is_complete"]
|
| 226 |
-
|
| 227 |
-
def save(self, output_path: str) -> None:
|
| 228 |
-
"""
|
| 229 |
-
Save templates to disk.
|
| 230 |
-
|
| 231 |
-
Args:
|
| 232 |
-
output_path: Path to save directory
|
| 233 |
-
"""
|
| 234 |
-
output_dir = Path(output_path)
|
| 235 |
-
output_dir.mkdir(parents=True, exist_ok=True)
|
| 236 |
-
|
| 237 |
-
metadata = {"templates": [], "version": 2} # Version 2 includes position
|
| 238 |
-
|
| 239 |
-
for (is_tens, digit, position), template in self.templates.items():
|
| 240 |
-
# Use "blank" instead of -1 for the empty tens digit in filenames
|
| 241 |
-
digit_str = "blank" if digit == -1 else str(digit)
|
| 242 |
-
position_suffix = f"_{position}" if position != "left" or not is_tens else ""
|
| 243 |
-
filename = f"{'tens' if is_tens else 'ones'}_{digit_str}{position_suffix}.png"
|
| 244 |
-
cv2.imwrite(str(output_dir / filename), template.template)
|
| 245 |
-
metadata["templates"].append(
|
| 246 |
-
{
|
| 247 |
-
"filename": filename,
|
| 248 |
-
"digit_value": digit_str, # Use "blank" for display
|
| 249 |
-
"is_tens_digit": template.is_tens_digit,
|
| 250 |
-
"position": position,
|
| 251 |
-
"sample_count": template.sample_count,
|
| 252 |
-
"avg_confidence": template.avg_confidence,
|
| 253 |
-
}
|
| 254 |
-
)
|
| 255 |
-
|
| 256 |
-
with open(output_dir / "templates_metadata.json", "w", encoding="utf-8") as f:
|
| 257 |
-
json.dump(metadata, f, indent=2)
|
| 258 |
-
|
| 259 |
-
logger.info("Saved %d templates to %s", len(self.templates), output_path)
|
| 260 |
-
|
| 261 |
-
def load(self, input_path: str) -> bool:
|
| 262 |
-
"""
|
| 263 |
-
Load templates from disk.
|
| 264 |
-
|
| 265 |
-
Args:
|
| 266 |
-
input_path: Path to templates directory
|
| 267 |
-
|
| 268 |
-
Returns:
|
| 269 |
-
True if loaded successfully, False otherwise
|
| 270 |
-
"""
|
| 271 |
-
input_dir = Path(input_path)
|
| 272 |
-
metadata_path = input_dir / "templates_metadata.json"
|
| 273 |
-
|
| 274 |
-
if not metadata_path.exists():
|
| 275 |
-
logger.warning("No templates metadata found at %s", metadata_path)
|
| 276 |
-
return False
|
| 277 |
-
|
| 278 |
-
with open(metadata_path, "r", encoding="utf-8") as f:
|
| 279 |
-
metadata = json.load(f)
|
| 280 |
-
|
| 281 |
-
version = metadata.get("version", 1)
|
| 282 |
-
|
| 283 |
-
for entry in metadata.get("templates", []):
|
| 284 |
-
img_path = input_dir / entry["filename"]
|
| 285 |
-
if img_path.exists():
|
| 286 |
-
template_img = cv2.imread(str(img_path), cv2.IMREAD_GRAYSCALE)
|
| 287 |
-
if template_img is not None:
|
| 288 |
-
# Convert "blank" back to -1 for internal use
|
| 289 |
-
digit_value = entry["digit_value"]
|
| 290 |
-
if digit_value == "blank":
|
| 291 |
-
digit_value = -1
|
| 292 |
-
elif isinstance(digit_value, str):
|
| 293 |
-
digit_value = int(digit_value)
|
| 294 |
-
|
| 295 |
-
# Handle position (v2) or infer from old format (v1)
|
| 296 |
-
is_tens = entry["is_tens_digit"]
|
| 297 |
-
if version >= 2:
|
| 298 |
-
position = entry.get("position", "left" if is_tens else "right")
|
| 299 |
-
else:
|
| 300 |
-
# V1 format: tens → left, ones → right (old behavior)
|
| 301 |
-
position = "left" if is_tens else "right"
|
| 302 |
-
|
| 303 |
-
template = DigitTemplate(
|
| 304 |
-
digit_value=digit_value,
|
| 305 |
-
is_tens_digit=is_tens,
|
| 306 |
-
position=position,
|
| 307 |
-
template=template_img,
|
| 308 |
-
sample_count=entry.get("sample_count", 1),
|
| 309 |
-
avg_confidence=entry.get("avg_confidence", 1.0),
|
| 310 |
-
)
|
| 311 |
-
self.add_template(template)
|
| 312 |
-
|
| 313 |
-
logger.info("Loaded %d templates from %s (v%d format)", len(self.templates), input_path, version)
|
| 314 |
-
return True
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
class DigitTemplateBuilder:
|
| 318 |
-
"""
|
| 319 |
-
Builds digit templates from OCR-labeled play clock samples.
|
| 320 |
-
|
| 321 |
-
Collects samples from the play clock region, extracts individual digits,
|
| 322 |
-
and builds averaged templates for each unique digit value.
|
| 323 |
-
|
| 324 |
-
Uses color normalization so red and white digits produce the same template.
|
| 325 |
-
"""
|
| 326 |
-
|
| 327 |
-
# Play clock region dimensions (from config)
|
| 328 |
-
DEFAULT_REGION_WIDTH = 50
|
| 329 |
-
DEFAULT_REGION_HEIGHT = 28
|
| 330 |
-
|
| 331 |
-
def __init__(self, region_width: int = DEFAULT_REGION_WIDTH, region_height: int = DEFAULT_REGION_HEIGHT):
|
| 332 |
-
"""
|
| 333 |
-
Initialize the template builder.
|
| 334 |
-
|
| 335 |
-
Args:
|
| 336 |
-
region_width: Width of play clock region in pixels
|
| 337 |
-
region_height: Height of play clock region in pixels
|
| 338 |
-
"""
|
| 339 |
-
self.region_width = region_width
|
| 340 |
-
self.region_height = region_height
|
| 341 |
-
|
| 342 |
-
# Collected samples: {(is_tens, digit_value): [DigitSample, ...]}
|
| 343 |
-
self.samples: Dict[Tuple[bool, int], List[DigitSample]] = {}
|
| 344 |
-
|
| 345 |
-
# Track raw clock region images for potential reprocessing
|
| 346 |
-
self.raw_regions: List[Tuple[float, int, np.ndarray]] = [] # (timestamp, clock_value, region)
|
| 347 |
-
|
| 348 |
-
logger.info("DigitTemplateBuilder initialized (region: %dx%d)", region_width, region_height)
|
| 349 |
-
|
| 350 |
-
def preprocess_region(self, region: np.ndarray) -> np.ndarray:
|
| 351 |
-
"""
|
| 352 |
-
Preprocess play clock region for template extraction.
|
| 353 |
-
|
| 354 |
-
Uses color normalization to handle both red and white digits uniformly.
|
| 355 |
-
|
| 356 |
-
Args:
|
| 357 |
-
region: Play clock region (BGR format)
|
| 358 |
-
|
| 359 |
-
Returns:
|
| 360 |
-
Preprocessed binary image (white digits on black background)
|
| 361 |
-
"""
|
| 362 |
-
# Normalize color (red → white conversion happens here)
|
| 363 |
-
gray = normalize_to_grayscale(region)
|
| 364 |
-
|
| 365 |
-
# Scale up for better template quality
|
| 366 |
-
scale_factor = 4
|
| 367 |
-
scaled = cv2.resize(gray, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR)
|
| 368 |
-
|
| 369 |
-
# Use Otsu's thresholding
|
| 370 |
-
_, binary = cv2.threshold(scaled, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
| 371 |
-
|
| 372 |
-
# Ensure white digits on black background (digits should be bright)
|
| 373 |
-
mean_intensity = np.mean(binary)
|
| 374 |
-
if mean_intensity > 128:
|
| 375 |
-
binary = cv2.bitwise_not(binary)
|
| 376 |
-
|
| 377 |
-
return binary
|
| 378 |
-
|
| 379 |
-
def extract_left_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 380 |
-
"""
|
| 381 |
-
Extract the left (tens digit) region from preprocessed play clock.
|
| 382 |
-
|
| 383 |
-
Used for tens digits in double-digit displays (10-40).
|
| 384 |
-
|
| 385 |
-
Args:
|
| 386 |
-
preprocessed: Preprocessed play clock image (scaled 4x)
|
| 387 |
-
|
| 388 |
-
Returns:
|
| 389 |
-
Left region image for tens digit
|
| 390 |
-
"""
|
| 391 |
-
_, w = preprocessed.shape[:2]
|
| 392 |
-
mid_x = w // 2
|
| 393 |
-
return preprocessed[:, : mid_x - 2] # Small gap in middle
|
| 394 |
-
|
| 395 |
-
def extract_far_left_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 396 |
-
"""
|
| 397 |
-
Extract the far left region from preprocessed play clock.
|
| 398 |
-
|
| 399 |
-
Used for blank detection in single-digit displays (0-9). This narrow
|
| 400 |
-
region (0%-20% of width) doesn't overlap with a centered digit,
|
| 401 |
-
so it should be truly empty when the clock shows a single digit.
|
| 402 |
-
|
| 403 |
-
Args:
|
| 404 |
-
preprocessed: Preprocessed play clock image (scaled 4x)
|
| 405 |
-
|
| 406 |
-
Returns:
|
| 407 |
-
Far left region image (should be mostly black for single digits)
|
| 408 |
-
"""
|
| 409 |
-
_, w = preprocessed.shape[:2]
|
| 410 |
-
# Far left: 0% to 20% of width - avoids overlap with centered digit
|
| 411 |
-
far_left_end = int(w * 0.20)
|
| 412 |
-
return preprocessed[:, :far_left_end]
|
| 413 |
-
|
| 414 |
-
def extract_right_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 415 |
-
"""
|
| 416 |
-
Extract the right (ones digit) region from preprocessed play clock.
|
| 417 |
-
|
| 418 |
-
Used for ones digits in double-digit displays (10-40).
|
| 419 |
-
|
| 420 |
-
Args:
|
| 421 |
-
preprocessed: Preprocessed play clock image (scaled 4x)
|
| 422 |
-
|
| 423 |
-
Returns:
|
| 424 |
-
Right region image for ones digit
|
| 425 |
-
"""
|
| 426 |
-
_, w = preprocessed.shape[:2]
|
| 427 |
-
mid_x = w // 2
|
| 428 |
-
return preprocessed[:, mid_x + 2 :]
|
| 429 |
-
|
| 430 |
-
def extract_center_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 431 |
-
"""
|
| 432 |
-
Extract the center region from preprocessed play clock.
|
| 433 |
-
|
| 434 |
-
Used for ones digits in single-digit displays (0-9) where the
|
| 435 |
-
digit is center-aligned rather than right-aligned.
|
| 436 |
-
|
| 437 |
-
The center region spans approximately 60% of the width, centered.
|
| 438 |
-
|
| 439 |
-
Args:
|
| 440 |
-
preprocessed: Preprocessed play clock image (scaled 4x)
|
| 441 |
-
|
| 442 |
-
Returns:
|
| 443 |
-
Center region image for centered single digit
|
| 444 |
-
"""
|
| 445 |
-
_, w = preprocessed.shape[:2]
|
| 446 |
-
|
| 447 |
-
# Center region: ~20% padding on each side, capturing middle 60%
|
| 448 |
-
# This encompasses where a centered single digit would appear
|
| 449 |
-
center_start = int(w * 0.20)
|
| 450 |
-
center_end = int(w * 0.80)
|
| 451 |
-
|
| 452 |
-
return preprocessed[:, center_start:center_end]
|
| 453 |
-
|
| 454 |
-
def extract_digits(self, preprocessed: np.ndarray, clock_value: int) -> Tuple[np.ndarray, np.ndarray, Optional[np.ndarray], Optional[np.ndarray]]:
|
| 455 |
-
"""
|
| 456 |
-
Extract individual digit images from preprocessed play clock region.
|
| 457 |
-
|
| 458 |
-
For double-digit values (10-40): extracts left (tens) and right (ones)
|
| 459 |
-
For single-digit values (0-9): extracts far-left (blank) and center (ones)
|
| 460 |
-
|
| 461 |
-
Args:
|
| 462 |
-
preprocessed: Preprocessed play clock image (scaled 4x)
|
| 463 |
-
clock_value: The known clock value (0-40)
|
| 464 |
-
|
| 465 |
-
Returns:
|
| 466 |
-
Tuple of (tens_digit_image, ones_right_image, ones_center_image, blank_image)
|
| 467 |
-
- For double-digit: tens=left, ones_right=right, ones_center=None, blank=None
|
| 468 |
-
- For single-digit: tens=None, ones_right=None, ones_center=center, blank=far_left
|
| 469 |
-
"""
|
| 470 |
-
if clock_value >= 10:
|
| 471 |
-
# Double-digit: standard left/right split
|
| 472 |
-
return self.extract_left_region(preprocessed), self.extract_right_region(preprocessed), None, None
|
| 473 |
-
# Single-digit: far-left is blank (truly empty), ones is centered
|
| 474 |
-
return None, None, self.extract_center_region(preprocessed), self.extract_far_left_region(preprocessed)
|
| 475 |
-
|
| 476 |
-
def add_sample(self, region: np.ndarray, clock_value: int, timestamp: float, confidence: float = 1.0) -> None:
|
| 477 |
-
"""
|
| 478 |
-
Add a play clock sample for template building.
|
| 479 |
-
|
| 480 |
-
Routes samples based on display layout:
|
| 481 |
-
- Single-digit (0-9): Digit is CENTER-aligned, tens position is blank
|
| 482 |
-
- Double-digit (10-40): Tens on LEFT, ones on RIGHT
|
| 483 |
-
|
| 484 |
-
Args:
|
| 485 |
-
region: Play clock region (BGR format, original size)
|
| 486 |
-
clock_value: OCR-determined clock value (0-40)
|
| 487 |
-
timestamp: Video timestamp
|
| 488 |
-
confidence: OCR confidence score
|
| 489 |
-
"""
|
| 490 |
-
if clock_value < 0 or clock_value > 40:
|
| 491 |
-
logger.warning("Invalid clock value %d, skipping sample", clock_value)
|
| 492 |
-
return
|
| 493 |
-
|
| 494 |
-
# Store raw region for potential reprocessing
|
| 495 |
-
self.raw_regions.append((timestamp, clock_value, region.copy()))
|
| 496 |
-
|
| 497 |
-
# Preprocess (handles red-to-white conversion automatically)
|
| 498 |
-
preprocessed = self.preprocess_region(region)
|
| 499 |
-
|
| 500 |
-
# Extract digits based on single vs double digit display
|
| 501 |
-
tens_img, ones_right_img, ones_center_img, blank_img = self.extract_digits(preprocessed, clock_value)
|
| 502 |
-
|
| 503 |
-
# Determine digit values
|
| 504 |
-
ones_digit = clock_value % 10
|
| 505 |
-
tens_digit = clock_value // 10 if clock_value >= 10 else -1 # -1 = blank
|
| 506 |
-
|
| 507 |
-
if clock_value >= 10:
|
| 508 |
-
# Double-digit display (10-40): tens on left, ones on right
|
| 509 |
-
# Store tens sample (left position)
|
| 510 |
-
tens_sample = DigitSample(
|
| 511 |
-
digit_value=tens_digit,
|
| 512 |
-
is_tens_digit=True,
|
| 513 |
-
position="left",
|
| 514 |
-
image=tens_img,
|
| 515 |
-
source_clock_value=clock_value,
|
| 516 |
-
timestamp=timestamp,
|
| 517 |
-
confidence=confidence,
|
| 518 |
-
)
|
| 519 |
-
tens_key = (True, tens_digit, "left")
|
| 520 |
-
if tens_key not in self.samples:
|
| 521 |
-
self.samples[tens_key] = []
|
| 522 |
-
self.samples[tens_key].append(tens_sample)
|
| 523 |
-
|
| 524 |
-
# Store ones sample (right position)
|
| 525 |
-
ones_sample = DigitSample(
|
| 526 |
-
digit_value=ones_digit,
|
| 527 |
-
is_tens_digit=False,
|
| 528 |
-
position="right",
|
| 529 |
-
image=ones_right_img,
|
| 530 |
-
source_clock_value=clock_value,
|
| 531 |
-
timestamp=timestamp,
|
| 532 |
-
confidence=confidence,
|
| 533 |
-
)
|
| 534 |
-
ones_key = (False, ones_digit, "right")
|
| 535 |
-
if ones_key not in self.samples:
|
| 536 |
-
self.samples[ones_key] = []
|
| 537 |
-
self.samples[ones_key].append(ones_sample)
|
| 538 |
-
|
| 539 |
-
logger.debug(
|
| 540 |
-
"Added double-digit sample: clock=%d, tens=%d (left), ones=%d (right), t=%.1f",
|
| 541 |
-
clock_value,
|
| 542 |
-
tens_digit,
|
| 543 |
-
ones_digit,
|
| 544 |
-
timestamp,
|
| 545 |
-
)
|
| 546 |
-
else:
|
| 547 |
-
# Single-digit display (0-9): digit is centered, tens position is blank
|
| 548 |
-
# Store blank sample (far-left position - should be truly empty)
|
| 549 |
-
blank_sample = DigitSample(
|
| 550 |
-
digit_value=-1, # blank
|
| 551 |
-
is_tens_digit=True,
|
| 552 |
-
position="left", # Still use "left" as the position key for compatibility
|
| 553 |
-
image=blank_img, # Now using far-left region that's truly empty
|
| 554 |
-
source_clock_value=clock_value,
|
| 555 |
-
timestamp=timestamp,
|
| 556 |
-
confidence=confidence,
|
| 557 |
-
)
|
| 558 |
-
blank_key = (True, -1, "left")
|
| 559 |
-
if blank_key not in self.samples:
|
| 560 |
-
self.samples[blank_key] = []
|
| 561 |
-
self.samples[blank_key].append(blank_sample)
|
| 562 |
-
|
| 563 |
-
# Store ones sample (center position)
|
| 564 |
-
ones_sample = DigitSample(
|
| 565 |
-
digit_value=ones_digit,
|
| 566 |
-
is_tens_digit=False,
|
| 567 |
-
position="center",
|
| 568 |
-
image=ones_center_img,
|
| 569 |
-
source_clock_value=clock_value,
|
| 570 |
-
timestamp=timestamp,
|
| 571 |
-
confidence=confidence,
|
| 572 |
-
)
|
| 573 |
-
ones_key = (False, ones_digit, "center")
|
| 574 |
-
if ones_key not in self.samples:
|
| 575 |
-
self.samples[ones_key] = []
|
| 576 |
-
self.samples[ones_key].append(ones_sample)
|
| 577 |
-
|
| 578 |
-
logger.debug(
|
| 579 |
-
"Added single-digit sample: clock=%d, ones=%d (center), blank (far-left), t=%.1f",
|
| 580 |
-
clock_value,
|
| 581 |
-
ones_digit,
|
| 582 |
-
timestamp,
|
| 583 |
-
)
|
| 584 |
-
|
| 585 |
-
def get_sample_count(self) -> Dict[str, int]:
|
| 586 |
-
"""Get count of samples collected for each digit and position."""
|
| 587 |
-
counts = {}
|
| 588 |
-
for (is_tens, digit, position), samples in self.samples.items():
|
| 589 |
-
type_str = "tens" if is_tens else "ones"
|
| 590 |
-
digit_str = "blank" if digit == -1 else str(digit)
|
| 591 |
-
key = f"{type_str}_{digit_str}_{position}"
|
| 592 |
-
counts[key] = len(samples)
|
| 593 |
-
return counts
|
| 594 |
-
|
| 595 |
-
def build_templates(self, min_samples: int = 3) -> DigitTemplateLibrary:
|
| 596 |
-
"""
|
| 597 |
-
Build templates from collected samples.
|
| 598 |
-
|
| 599 |
-
For each digit/position combination, averages multiple samples
|
| 600 |
-
to create a robust template.
|
| 601 |
-
|
| 602 |
-
Args:
|
| 603 |
-
min_samples: Minimum samples required to build a template (default: 3)
|
| 604 |
-
|
| 605 |
-
Returns:
|
| 606 |
-
DigitTemplateLibrary with built templates
|
| 607 |
-
"""
|
| 608 |
-
library = DigitTemplateLibrary()
|
| 609 |
-
|
| 610 |
-
for (is_tens, digit, position), samples in self.samples.items():
|
| 611 |
-
if len(samples) < min_samples:
|
| 612 |
-
digit_display = "blank" if digit == -1 else str(digit)
|
| 613 |
-
logger.warning(
|
| 614 |
-
"Insufficient samples for %s digit %s (%s): %d < %d",
|
| 615 |
-
"tens" if is_tens else "ones",
|
| 616 |
-
digit_display,
|
| 617 |
-
position,
|
| 618 |
-
len(samples),
|
| 619 |
-
min_samples,
|
| 620 |
-
)
|
| 621 |
-
continue
|
| 622 |
-
|
| 623 |
-
# Resize all samples to match dimensions of first sample
|
| 624 |
-
target_shape = samples[0].image.shape
|
| 625 |
-
|
| 626 |
-
# Average the samples (with resizing if needed)
|
| 627 |
-
sum_image = np.zeros(target_shape, dtype=np.float32)
|
| 628 |
-
valid_count = 0
|
| 629 |
-
total_confidence = 0.0
|
| 630 |
-
|
| 631 |
-
for sample in samples:
|
| 632 |
-
img = sample.image
|
| 633 |
-
if img.shape != target_shape:
|
| 634 |
-
img = cv2.resize(img, (target_shape[1], target_shape[0]))
|
| 635 |
-
sum_image += img.astype(np.float32)
|
| 636 |
-
valid_count += 1
|
| 637 |
-
total_confidence += sample.confidence
|
| 638 |
-
|
| 639 |
-
if valid_count > 0:
|
| 640 |
-
avg_image = (sum_image / valid_count).astype(np.uint8)
|
| 641 |
-
|
| 642 |
-
# Threshold the averaged image to clean it up
|
| 643 |
-
_, template_img = cv2.threshold(avg_image, 127, 255, cv2.THRESH_BINARY)
|
| 644 |
-
|
| 645 |
-
template = DigitTemplate(
|
| 646 |
-
digit_value=digit,
|
| 647 |
-
is_tens_digit=is_tens,
|
| 648 |
-
position=position,
|
| 649 |
-
template=template_img,
|
| 650 |
-
sample_count=valid_count,
|
| 651 |
-
avg_confidence=total_confidence / valid_count,
|
| 652 |
-
)
|
| 653 |
-
|
| 654 |
-
library.add_template(template)
|
| 655 |
-
digit_display = "blank" if digit == -1 else str(digit)
|
| 656 |
-
logger.info(
|
| 657 |
-
"Built template: %s digit %s (%s) from %d samples",
|
| 658 |
-
"tens" if is_tens else "ones",
|
| 659 |
-
digit_display,
|
| 660 |
-
position,
|
| 661 |
-
valid_count,
|
| 662 |
-
)
|
| 663 |
-
|
| 664 |
-
# Log coverage status
|
| 665 |
-
coverage = library.get_coverage_status()
|
| 666 |
-
logger.info(
|
| 667 |
-
"Template coverage: %d/%d (%.1f%%)",
|
| 668 |
-
coverage["total_have"],
|
| 669 |
-
coverage["total_needed"],
|
| 670 |
-
100 * coverage["total_have"] / coverage["total_needed"],
|
| 671 |
-
)
|
| 672 |
-
|
| 673 |
-
return library
|
| 674 |
-
|
| 675 |
-
def get_coverage_status(self) -> Dict[str, any]:
|
| 676 |
-
"""Get current sample coverage status."""
|
| 677 |
-
ones_center_have = set()
|
| 678 |
-
ones_right_have = set()
|
| 679 |
-
tens_have = set()
|
| 680 |
-
has_blank = False
|
| 681 |
-
|
| 682 |
-
for (is_tens, digit, position), samples in self.samples.items():
|
| 683 |
-
if len(samples) >= 1: # At least one sample
|
| 684 |
-
if is_tens:
|
| 685 |
-
if digit == -1:
|
| 686 |
-
has_blank = True
|
| 687 |
-
else:
|
| 688 |
-
tens_have.add(digit)
|
| 689 |
-
else:
|
| 690 |
-
if position == "center":
|
| 691 |
-
ones_center_have.add(digit)
|
| 692 |
-
elif position == "right":
|
| 693 |
-
ones_right_have.add(digit)
|
| 694 |
-
|
| 695 |
-
return {
|
| 696 |
-
"ones_center": sorted(ones_center_have),
|
| 697 |
-
"ones_right": sorted(ones_right_have),
|
| 698 |
-
"tens": sorted(tens_have),
|
| 699 |
-
"has_blank": has_blank,
|
| 700 |
-
"ones_center_missing": sorted(set(DigitTemplateLibrary.ONES_DIGITS) - ones_center_have),
|
| 701 |
-
"ones_right_missing": sorted(set(DigitTemplateLibrary.ONES_DIGITS) - ones_right_have),
|
| 702 |
-
"tens_missing": sorted(set([1, 2, 3, 4]) - tens_have),
|
| 703 |
-
}
|
| 704 |
-
|
| 705 |
-
def get_coverage_estimate(self) -> float:
|
| 706 |
-
"""
|
| 707 |
-
Get a simple coverage estimate as a float (0.0-1.0).
|
| 708 |
-
|
| 709 |
-
Returns:
|
| 710 |
-
Coverage estimate where 1.0 = all templates have samples
|
| 711 |
-
"""
|
| 712 |
-
status = self.get_coverage_status()
|
| 713 |
-
|
| 714 |
-
# Count what we have (with at least 1 sample each)
|
| 715 |
-
total_have = len(status["ones_center"]) + len(status["ones_right"]) + len(status["tens"])
|
| 716 |
-
if status["has_blank"]:
|
| 717 |
-
total_have += 1
|
| 718 |
-
|
| 719 |
-
# Total needed: 10 ones_center + 10 ones_right + 4 tens + 1 blank = 25
|
| 720 |
-
total_needed = 25
|
| 721 |
-
|
| 722 |
-
return total_have / total_needed
|
| 723 |
-
|
| 724 |
-
|
| 725 |
-
class TemplatePlayClockReader:
|
| 726 |
-
"""
|
| 727 |
-
Reads play clock values using template matching with dual-mode detection.
|
| 728 |
-
|
| 729 |
-
Uses digit templates built from OCR-labeled samples to achieve
|
| 730 |
-
lightning-fast digit recognition (~0.3ms/frame vs ~49ms for OCR).
|
| 731 |
-
|
| 732 |
-
Implements dual-mode matching to handle both display layouts:
|
| 733 |
-
- Single-digit (0-9): Digit is CENTER-aligned
|
| 734 |
-
- Double-digit (10-40): Tens on LEFT, ones on RIGHT
|
| 735 |
-
|
| 736 |
-
The reader tries both interpretations and picks the best match.
|
| 737 |
-
|
| 738 |
-
Handles slight translational shifts via template matching search window.
|
| 739 |
-
"""
|
| 740 |
-
|
| 741 |
-
# Confidence thresholds
|
| 742 |
-
MIN_DIGIT_CONFIDENCE = 0.6 # Minimum confidence to accept a digit match
|
| 743 |
-
MIN_CLOCK_CONFIDENCE = 0.5 # Minimum overall confidence to return a reading
|
| 744 |
-
|
| 745 |
-
def __init__(self, template_library: DigitTemplateLibrary, region_width: int = 50, region_height: int = 28):
|
| 746 |
-
"""
|
| 747 |
-
Initialize the template-based clock reader.
|
| 748 |
-
|
| 749 |
-
Args:
|
| 750 |
-
template_library: Pre-built digit template library
|
| 751 |
-
region_width: Play clock region width
|
| 752 |
-
region_height: Play clock region height
|
| 753 |
-
"""
|
| 754 |
-
self.library = template_library
|
| 755 |
-
self.region_width = region_width
|
| 756 |
-
self.region_height = region_height
|
| 757 |
-
self.scale_factor = 4 # Must match the scale used in template building
|
| 758 |
-
|
| 759 |
-
logger.info("TemplatePlayClockReader initialized")
|
| 760 |
-
|
| 761 |
-
def preprocess_region(self, region: np.ndarray) -> np.ndarray:
|
| 762 |
-
"""
|
| 763 |
-
Preprocess play clock region for template matching.
|
| 764 |
-
|
| 765 |
-
Uses color normalization to handle both red and white digits uniformly.
|
| 766 |
-
|
| 767 |
-
Args:
|
| 768 |
-
region: Play clock region (BGR format)
|
| 769 |
-
|
| 770 |
-
Returns:
|
| 771 |
-
Preprocessed binary image (scaled up)
|
| 772 |
-
"""
|
| 773 |
-
# Normalize color (red → white conversion)
|
| 774 |
-
gray = normalize_to_grayscale(region)
|
| 775 |
-
|
| 776 |
-
# Scale up to match template resolution
|
| 777 |
-
scaled = cv2.resize(gray, None, fx=self.scale_factor, fy=self.scale_factor, interpolation=cv2.INTER_LINEAR)
|
| 778 |
-
|
| 779 |
-
# Use Otsu's thresholding
|
| 780 |
-
_, binary = cv2.threshold(scaled, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
| 781 |
-
|
| 782 |
-
# Ensure white digits on black background
|
| 783 |
-
mean_intensity = np.mean(binary)
|
| 784 |
-
if mean_intensity > 128:
|
| 785 |
-
binary = cv2.bitwise_not(binary)
|
| 786 |
-
|
| 787 |
-
return binary
|
| 788 |
-
|
| 789 |
-
def _extract_left_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 790 |
-
"""Extract left region for tens digit."""
|
| 791 |
-
_, w = preprocessed.shape[:2]
|
| 792 |
-
mid_x = w // 2
|
| 793 |
-
return preprocessed[:, : mid_x - 2]
|
| 794 |
-
|
| 795 |
-
def _extract_right_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 796 |
-
"""Extract right region for ones digit in double-digit displays."""
|
| 797 |
-
_, w = preprocessed.shape[:2]
|
| 798 |
-
mid_x = w // 2
|
| 799 |
-
return preprocessed[:, mid_x + 2 :]
|
| 800 |
-
|
| 801 |
-
def _extract_center_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 802 |
-
"""Extract center region for ones digit in single-digit displays."""
|
| 803 |
-
_, w = preprocessed.shape[:2]
|
| 804 |
-
center_start = int(w * 0.20)
|
| 805 |
-
center_end = int(w * 0.80)
|
| 806 |
-
return preprocessed[:, center_start:center_end]
|
| 807 |
-
|
| 808 |
-
def _extract_far_left_region(self, preprocessed: np.ndarray) -> np.ndarray:
|
| 809 |
-
"""Extract far left region for blank detection in single-digit displays."""
|
| 810 |
-
_, w = preprocessed.shape[:2]
|
| 811 |
-
far_left_end = int(w * 0.20)
|
| 812 |
-
return preprocessed[:, :far_left_end]
|
| 813 |
-
|
| 814 |
-
def match_digit(self, region: np.ndarray, templates: List[DigitTemplate]) -> TemplateMatchResult:
|
| 815 |
-
"""
|
| 816 |
-
Match a region against a set of digit templates.
|
| 817 |
-
|
| 818 |
-
Uses template matching with a small search window for robustness
|
| 819 |
-
to slight translational shifts.
|
| 820 |
-
|
| 821 |
-
Args:
|
| 822 |
-
region: Preprocessed digit region
|
| 823 |
-
templates: List of templates to match against
|
| 824 |
-
|
| 825 |
-
Returns:
|
| 826 |
-
TemplateMatchResult with best match
|
| 827 |
-
"""
|
| 828 |
-
if not templates:
|
| 829 |
-
return TemplateMatchResult(digit_value=-1, confidence=0.0, is_valid=False)
|
| 830 |
-
|
| 831 |
-
best_digit = -1
|
| 832 |
-
best_confidence = -1.0
|
| 833 |
-
|
| 834 |
-
for template in templates:
|
| 835 |
-
tmpl = template.template
|
| 836 |
-
|
| 837 |
-
# Ensure template fits within region (with search window)
|
| 838 |
-
if tmpl.shape[0] > region.shape[0] or tmpl.shape[1] > region.shape[1]:
|
| 839 |
-
# Resize template to fit
|
| 840 |
-
scale = min(region.shape[0] / tmpl.shape[0], region.shape[1] / tmpl.shape[1]) * 0.9
|
| 841 |
-
new_h = int(tmpl.shape[0] * scale)
|
| 842 |
-
new_w = int(tmpl.shape[1] * scale)
|
| 843 |
-
tmpl = cv2.resize(tmpl, (new_w, new_h))
|
| 844 |
-
|
| 845 |
-
# Template matching with search window
|
| 846 |
-
result = cv2.matchTemplate(region, tmpl, cv2.TM_CCOEFF_NORMED)
|
| 847 |
-
_, max_val, _, _ = cv2.minMaxLoc(result)
|
| 848 |
-
|
| 849 |
-
if max_val > best_confidence:
|
| 850 |
-
best_confidence = max_val
|
| 851 |
-
best_digit = template.digit_value
|
| 852 |
-
|
| 853 |
-
is_valid = best_confidence >= self.MIN_DIGIT_CONFIDENCE
|
| 854 |
-
|
| 855 |
-
return TemplateMatchResult(digit_value=best_digit, confidence=best_confidence, is_valid=is_valid)
|
| 856 |
-
|
| 857 |
-
def _try_double_digit(self, preprocessed: np.ndarray) -> TemplatePlayClockReading:
|
| 858 |
-
"""
|
| 859 |
-
Try to read as double-digit display (10-40): tens on left, ones on right.
|
| 860 |
-
|
| 861 |
-
Args:
|
| 862 |
-
preprocessed: Preprocessed play clock region
|
| 863 |
-
|
| 864 |
-
Returns:
|
| 865 |
-
TemplatePlayClockReading with result of double-digit interpretation
|
| 866 |
-
"""
|
| 867 |
-
# Get templates for double-digit positions
|
| 868 |
-
# Filter out blank (-1) since it was built from far-left region (different size)
|
| 869 |
-
tens_templates = [t for t in self.library.get_all_templates(is_tens=True, position="left") if t.digit_value != -1]
|
| 870 |
-
ones_templates = self.library.get_all_templates(is_tens=False, position="right")
|
| 871 |
-
|
| 872 |
-
if not ones_templates or not tens_templates:
|
| 873 |
-
return TemplatePlayClockReading(
|
| 874 |
-
detected=False,
|
| 875 |
-
value=None,
|
| 876 |
-
confidence=0.0,
|
| 877 |
-
tens_match=None,
|
| 878 |
-
ones_match=None,
|
| 879 |
-
method="template_double",
|
| 880 |
-
)
|
| 881 |
-
|
| 882 |
-
# Extract regions
|
| 883 |
-
tens_region = self._extract_left_region(preprocessed)
|
| 884 |
-
ones_region = self._extract_right_region(preprocessed)
|
| 885 |
-
|
| 886 |
-
# Match digits
|
| 887 |
-
tens_match = self.match_digit(tens_region, tens_templates)
|
| 888 |
-
ones_match = self.match_digit(ones_region, ones_templates)
|
| 889 |
-
|
| 890 |
-
# Require valid ones match and tens match
|
| 891 |
-
if not ones_match.is_valid or not tens_match.is_valid:
|
| 892 |
-
return TemplatePlayClockReading(
|
| 893 |
-
detected=False,
|
| 894 |
-
value=None,
|
| 895 |
-
confidence=min(tens_match.confidence, ones_match.confidence),
|
| 896 |
-
tens_match=tens_match,
|
| 897 |
-
ones_match=ones_match,
|
| 898 |
-
method="template_double",
|
| 899 |
-
)
|
| 900 |
-
|
| 901 |
-
# Calculate clock value
|
| 902 |
-
clock_value = tens_match.digit_value * 10 + ones_match.digit_value
|
| 903 |
-
|
| 904 |
-
# Validate range (10-40)
|
| 905 |
-
if clock_value < 10 or clock_value > 40:
|
| 906 |
-
return TemplatePlayClockReading(
|
| 907 |
-
detected=False,
|
| 908 |
-
value=None,
|
| 909 |
-
confidence=0.0,
|
| 910 |
-
tens_match=tens_match,
|
| 911 |
-
ones_match=ones_match,
|
| 912 |
-
method="template_double",
|
| 913 |
-
)
|
| 914 |
-
|
| 915 |
-
overall_confidence = (tens_match.confidence + ones_match.confidence) / 2
|
| 916 |
-
detected = overall_confidence >= self.MIN_CLOCK_CONFIDENCE
|
| 917 |
-
|
| 918 |
-
return TemplatePlayClockReading(
|
| 919 |
-
detected=detected,
|
| 920 |
-
value=clock_value if detected else None,
|
| 921 |
-
confidence=overall_confidence,
|
| 922 |
-
tens_match=tens_match,
|
| 923 |
-
ones_match=ones_match,
|
| 924 |
-
method="template_double",
|
| 925 |
-
)
|
| 926 |
-
|
| 927 |
-
def _try_single_digit(self, preprocessed: np.ndarray) -> TemplatePlayClockReading:
|
| 928 |
-
"""
|
| 929 |
-
Try to read as single-digit display (0-9): digit is centered.
|
| 930 |
-
|
| 931 |
-
Args:
|
| 932 |
-
preprocessed: Preprocessed play clock region
|
| 933 |
-
|
| 934 |
-
Returns:
|
| 935 |
-
TemplatePlayClockReading with result of single-digit interpretation
|
| 936 |
-
"""
|
| 937 |
-
# Get templates for single-digit (centered) position
|
| 938 |
-
ones_templates = self.library.get_all_templates(is_tens=False, position="center")
|
| 939 |
-
blank_templates = [t for t in self.library.get_all_templates(is_tens=True, position="left") if t.digit_value == -1]
|
| 940 |
-
|
| 941 |
-
if not ones_templates:
|
| 942 |
-
return TemplatePlayClockReading(
|
| 943 |
-
detected=False,
|
| 944 |
-
value=None,
|
| 945 |
-
confidence=0.0,
|
| 946 |
-
tens_match=None,
|
| 947 |
-
ones_match=None,
|
| 948 |
-
method="template_single",
|
| 949 |
-
)
|
| 950 |
-
|
| 951 |
-
# Extract regions
|
| 952 |
-
center_region = self._extract_center_region(preprocessed)
|
| 953 |
-
far_left_region = self._extract_far_left_region(preprocessed)
|
| 954 |
-
|
| 955 |
-
# Match center digit (ones)
|
| 956 |
-
ones_match = self.match_digit(center_region, ones_templates)
|
| 957 |
-
|
| 958 |
-
# Optionally check that far-left region looks blank
|
| 959 |
-
blank_match = None
|
| 960 |
-
if blank_templates:
|
| 961 |
-
blank_match = self.match_digit(far_left_region, blank_templates)
|
| 962 |
-
|
| 963 |
-
# Require valid ones match
|
| 964 |
-
if not ones_match.is_valid:
|
| 965 |
-
return TemplatePlayClockReading(
|
| 966 |
-
detected=False,
|
| 967 |
-
value=None,
|
| 968 |
-
confidence=ones_match.confidence,
|
| 969 |
-
tens_match=blank_match,
|
| 970 |
-
ones_match=ones_match,
|
| 971 |
-
method="template_single",
|
| 972 |
-
)
|
| 973 |
-
|
| 974 |
-
# Clock value is just the ones digit (0-9)
|
| 975 |
-
clock_value = ones_match.digit_value
|
| 976 |
-
|
| 977 |
-
# Validate range (0-9)
|
| 978 |
-
if clock_value < 0 or clock_value > 9:
|
| 979 |
-
return TemplatePlayClockReading(
|
| 980 |
-
detected=False,
|
| 981 |
-
value=None,
|
| 982 |
-
confidence=0.0,
|
| 983 |
-
tens_match=blank_match,
|
| 984 |
-
ones_match=ones_match,
|
| 985 |
-
method="template_single",
|
| 986 |
-
)
|
| 987 |
-
|
| 988 |
-
# Use only ones confidence for single-digit
|
| 989 |
-
overall_confidence = ones_match.confidence
|
| 990 |
-
detected = overall_confidence >= self.MIN_CLOCK_CONFIDENCE
|
| 991 |
-
|
| 992 |
-
return TemplatePlayClockReading(
|
| 993 |
-
detected=detected,
|
| 994 |
-
value=clock_value if detected else None,
|
| 995 |
-
confidence=overall_confidence,
|
| 996 |
-
tens_match=blank_match,
|
| 997 |
-
ones_match=ones_match,
|
| 998 |
-
method="template_single",
|
| 999 |
-
)
|
| 1000 |
-
|
| 1001 |
-
def read(self, region: np.ndarray) -> TemplatePlayClockReading:
|
| 1002 |
-
"""
|
| 1003 |
-
Read the play clock value from a region using dual-mode template matching.
|
| 1004 |
-
|
| 1005 |
-
Tries both single-digit (centered) and double-digit (left/right) interpretations
|
| 1006 |
-
and returns the result with higher confidence.
|
| 1007 |
-
|
| 1008 |
-
Args:
|
| 1009 |
-
region: Play clock region (BGR format, original size ~50x28)
|
| 1010 |
-
|
| 1011 |
-
Returns:
|
| 1012 |
-
TemplatePlayClockReading with detected value or error state
|
| 1013 |
-
"""
|
| 1014 |
-
# Preprocess the region (handles red-to-white conversion)
|
| 1015 |
-
preprocessed = self.preprocess_region(region)
|
| 1016 |
-
|
| 1017 |
-
# Try both interpretations
|
| 1018 |
-
double_result = self._try_double_digit(preprocessed)
|
| 1019 |
-
single_result = self._try_single_digit(preprocessed)
|
| 1020 |
-
|
| 1021 |
-
# Pick the best result
|
| 1022 |
-
# Priority: detected result with higher confidence
|
| 1023 |
-
if single_result.detected and double_result.detected:
|
| 1024 |
-
# Both detected - pick higher confidence
|
| 1025 |
-
if single_result.confidence > double_result.confidence:
|
| 1026 |
-
return single_result
|
| 1027 |
-
return double_result
|
| 1028 |
-
if single_result.detected:
|
| 1029 |
-
return single_result
|
| 1030 |
-
if double_result.detected:
|
| 1031 |
-
return double_result
|
| 1032 |
-
# Neither detected - return the one with higher confidence for debugging
|
| 1033 |
-
if single_result.confidence > double_result.confidence:
|
| 1034 |
-
return single_result
|
| 1035 |
-
return double_result
|
| 1036 |
-
|
| 1037 |
-
def read_from_frame(
|
| 1038 |
-
self,
|
| 1039 |
-
frame: np.ndarray,
|
| 1040 |
-
scorebug_bbox: Tuple[int, int, int, int],
|
| 1041 |
-
clock_region_offset: Tuple[int, int, int, int],
|
| 1042 |
-
) -> TemplatePlayClockReading:
|
| 1043 |
-
"""
|
| 1044 |
-
Read play clock from a full frame.
|
| 1045 |
-
|
| 1046 |
-
Args:
|
| 1047 |
-
frame: Full video frame (BGR)
|
| 1048 |
-
scorebug_bbox: Scorebug bounding box (x, y, w, h)
|
| 1049 |
-
clock_region_offset: Play clock region offset from scorebug (x_off, y_off, w, h)
|
| 1050 |
-
|
| 1051 |
-
Returns:
|
| 1052 |
-
TemplatePlayClockReading with detected value
|
| 1053 |
-
"""
|
| 1054 |
-
sb_x, sb_y, _, _ = scorebug_bbox
|
| 1055 |
-
pc_x_off, pc_y_off, pc_w, pc_h = clock_region_offset
|
| 1056 |
-
|
| 1057 |
-
# Calculate absolute coordinates
|
| 1058 |
-
pc_x = sb_x + pc_x_off
|
| 1059 |
-
pc_y = sb_y + pc_y_off
|
| 1060 |
-
|
| 1061 |
-
# Validate bounds
|
| 1062 |
-
frame_h, frame_w = frame.shape[:2]
|
| 1063 |
-
if pc_x < 0 or pc_y < 0 or pc_x + pc_w > frame_w or pc_y + pc_h > frame_h:
|
| 1064 |
-
return TemplatePlayClockReading(
|
| 1065 |
-
detected=False,
|
| 1066 |
-
value=None,
|
| 1067 |
-
confidence=0.0,
|
| 1068 |
-
tens_match=None,
|
| 1069 |
-
ones_match=None,
|
| 1070 |
-
method="template",
|
| 1071 |
-
)
|
| 1072 |
-
|
| 1073 |
-
# Extract region
|
| 1074 |
-
region = frame[pc_y : pc_y + pc_h, pc_x : pc_x + pc_w].copy()
|
| 1075 |
-
|
| 1076 |
-
return self.read(region)
|
| 1077 |
-
|
| 1078 |
-
def read_from_fixed_location(
|
| 1079 |
-
self,
|
| 1080 |
-
frame: np.ndarray,
|
| 1081 |
-
absolute_coords: Tuple[int, int, int, int],
|
| 1082 |
-
) -> TemplatePlayClockReading:
|
| 1083 |
-
"""
|
| 1084 |
-
Read play clock from a fixed absolute location in the frame.
|
| 1085 |
-
|
| 1086 |
-
This bypasses scorebug detection entirely - useful when templates
|
| 1087 |
-
are built and we know exactly where the play clock should be.
|
| 1088 |
-
|
| 1089 |
-
Args:
|
| 1090 |
-
frame: Full video frame (BGR)
|
| 1091 |
-
absolute_coords: Absolute play clock location (x, y, w, h)
|
| 1092 |
-
|
| 1093 |
-
Returns:
|
| 1094 |
-
TemplatePlayClockReading with detected value
|
| 1095 |
-
"""
|
| 1096 |
-
x, y, w, h = absolute_coords
|
| 1097 |
-
|
| 1098 |
-
# Validate bounds
|
| 1099 |
-
frame_h, frame_w = frame.shape[:2]
|
| 1100 |
-
if x < 0 or y < 0 or x + w > frame_w or y + h > frame_h:
|
| 1101 |
-
return TemplatePlayClockReading(
|
| 1102 |
-
detected=False,
|
| 1103 |
-
value=None,
|
| 1104 |
-
confidence=0.0,
|
| 1105 |
-
tens_match=None,
|
| 1106 |
-
ones_match=None,
|
| 1107 |
-
method="template",
|
| 1108 |
-
)
|
| 1109 |
-
|
| 1110 |
-
# Extract region
|
| 1111 |
-
region = frame[y : y + h, x : x + w].copy()
|
| 1112 |
-
|
| 1113 |
-
return self.read(region)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/detectors/models.py
DELETED
|
@@ -1,165 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Dataclass models for the detectors module.
|
| 3 |
-
|
| 4 |
-
This module contains all the data structures used by the detector components
|
| 5 |
-
for passing information between pipeline stages.
|
| 6 |
-
"""
|
| 7 |
-
|
| 8 |
-
from dataclasses import dataclass
|
| 9 |
-
from typing import Optional, Tuple, List
|
| 10 |
-
|
| 11 |
-
import numpy as np
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
# =============================================================================
|
| 15 |
-
# Scorebug Detection Models
|
| 16 |
-
# =============================================================================
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
@dataclass
|
| 20 |
-
class ScorebugDetection:
|
| 21 |
-
"""Results from scorebug detection."""
|
| 22 |
-
|
| 23 |
-
detected: bool # Whether scorebug was detected
|
| 24 |
-
confidence: float # Confidence score (0.0 to 1.0)
|
| 25 |
-
bbox: Optional[Tuple[int, int, int, int]] = None # Bounding box (x, y, width, height)
|
| 26 |
-
method: str = "unknown" # Detection method used
|
| 27 |
-
left_confidence: Optional[float] = None # Left half confidence (when split detection enabled)
|
| 28 |
-
right_confidence: Optional[float] = None # Right half confidence (when split detection enabled)
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
# =============================================================================
|
| 32 |
-
# Play Clock Reading Models
|
| 33 |
-
# =============================================================================
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
@dataclass
|
| 37 |
-
class PlayClockReading:
|
| 38 |
-
"""Result from play clock OCR reading."""
|
| 39 |
-
|
| 40 |
-
detected: bool # Whether a valid play clock value was detected
|
| 41 |
-
value: Optional[int] # Play clock value (0-40 seconds), None if unreadable
|
| 42 |
-
confidence: float # Confidence score (0.0 to 1.0)
|
| 43 |
-
raw_text: str # Raw OCR output for debugging
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
@dataclass
|
| 47 |
-
class PlayClockRegionConfig:
|
| 48 |
-
"""Configuration for the play clock region relative to the scorebug bounding box."""
|
| 49 |
-
|
| 50 |
-
x_offset: int # X offset from scorebug left edge
|
| 51 |
-
y_offset: int # Y offset from scorebug top edge
|
| 52 |
-
width: int # Width of play clock region
|
| 53 |
-
height: int # Height of play clock region
|
| 54 |
-
source_video: str # Video used to identify region
|
| 55 |
-
scorebug_template: str # Template used for scorebug detection
|
| 56 |
-
samples_used: int # Number of frames used to verify region
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
# =============================================================================
|
| 60 |
-
# Timeout Tracking Models
|
| 61 |
-
# =============================================================================
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
@dataclass
|
| 65 |
-
class TimeoutRegionConfig:
|
| 66 |
-
"""Configuration for a team's timeout indicator region."""
|
| 67 |
-
|
| 68 |
-
team_name: str # "home" or "away"
|
| 69 |
-
bbox: Tuple[int, int, int, int] # x, y, width, height for the 3-oval group
|
| 70 |
-
|
| 71 |
-
def to_dict(self) -> dict:
|
| 72 |
-
"""Convert to dictionary for JSON serialization."""
|
| 73 |
-
return {
|
| 74 |
-
"team_name": self.team_name,
|
| 75 |
-
"bbox": {"x": self.bbox[0], "y": self.bbox[1], "width": self.bbox[2], "height": self.bbox[3]},
|
| 76 |
-
}
|
| 77 |
-
|
| 78 |
-
@classmethod
|
| 79 |
-
def from_dict(cls, data: dict) -> "TimeoutRegionConfig":
|
| 80 |
-
"""Create from dictionary."""
|
| 81 |
-
bbox = (data["bbox"]["x"], data["bbox"]["y"], data["bbox"]["width"], data["bbox"]["height"])
|
| 82 |
-
return cls(team_name=data["team_name"], bbox=bbox)
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
@dataclass
|
| 86 |
-
class TimeoutReading:
|
| 87 |
-
"""Results from timeout indicator reading."""
|
| 88 |
-
|
| 89 |
-
home_timeouts: int # 0-3 timeouts remaining
|
| 90 |
-
away_timeouts: int # 0-3 timeouts remaining
|
| 91 |
-
confidence: float # Overall confidence in reading
|
| 92 |
-
home_oval_states: Optional[List[bool]] = None # True = white (available), False = dark (used)
|
| 93 |
-
away_oval_states: Optional[List[bool]] = None # True = white (available), False = dark (used)
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
# =============================================================================
|
| 97 |
-
# Play State Machine Models
|
| 98 |
-
# =============================================================================
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
@dataclass
|
| 102 |
-
class PlayEvent:
|
| 103 |
-
"""Represents a detected play with start and end times."""
|
| 104 |
-
|
| 105 |
-
play_number: int # Sequential play number
|
| 106 |
-
start_time: float # Video timestamp (seconds) when play started
|
| 107 |
-
end_time: float # Video timestamp (seconds) when play ended - from backward counting
|
| 108 |
-
confidence: float # Overall confidence score
|
| 109 |
-
start_method: str # How start was detected: "clock_reset", "clock_reset_25", "clock_freeze"
|
| 110 |
-
end_method: str # How end was detected: "backward_calc" (primary), "direct_detect" (secondary)
|
| 111 |
-
direct_end_time: Optional[float] = None # End time from direct detection (for comparison)
|
| 112 |
-
start_clock_value: Optional[int] = None # Clock value at start detection
|
| 113 |
-
end_clock_value: Optional[int] = None # Clock value used for backward calculation
|
| 114 |
-
play_type: str = "normal" # Type of play: "normal", "special" (punt/fg/xp after 25-second reset)
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
# =============================================================================
|
| 118 |
-
# Digit Template Matching Models
|
| 119 |
-
# =============================================================================
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
@dataclass
|
| 123 |
-
class DigitSample:
|
| 124 |
-
"""A single digit sample extracted from a play clock region."""
|
| 125 |
-
|
| 126 |
-
digit_value: int # 0-9 for ones digit, 0-4 for tens digit, -1 for blank
|
| 127 |
-
is_tens_digit: bool # True if this is the tens place digit
|
| 128 |
-
position: str # "left", "center", or "right" - where digit appears in region
|
| 129 |
-
image: np.ndarray # The digit image (grayscale, preprocessed)
|
| 130 |
-
source_clock_value: int # The full clock value this was extracted from
|
| 131 |
-
timestamp: float # Video timestamp where this was captured
|
| 132 |
-
confidence: float # OCR confidence for this sample
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
@dataclass
|
| 136 |
-
class DigitTemplate:
|
| 137 |
-
"""A template for matching a specific digit."""
|
| 138 |
-
|
| 139 |
-
digit_value: int # 0-9 for ones, 0-4 for tens, -1 for blank
|
| 140 |
-
is_tens_digit: bool # True if this is a tens place template
|
| 141 |
-
position: str # "left", "center", or "right" - where digit appears in region
|
| 142 |
-
template: np.ndarray # The template image (grayscale)
|
| 143 |
-
sample_count: int # Number of samples used to build this template
|
| 144 |
-
avg_confidence: float # Average OCR confidence of source samples
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
@dataclass
|
| 148 |
-
class TemplateMatchResult:
|
| 149 |
-
"""Result from template matching for a single digit."""
|
| 150 |
-
|
| 151 |
-
digit_value: int # Matched digit value (-1 for blank, 0-9 for digits)
|
| 152 |
-
confidence: float # Match confidence (0.0 to 1.0)
|
| 153 |
-
is_valid: bool # Whether match confidence exceeds threshold
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
@dataclass
|
| 157 |
-
class TemplatePlayClockReading:
|
| 158 |
-
"""Result from template-based play clock reading."""
|
| 159 |
-
|
| 160 |
-
detected: bool # Whether a valid clock value was read
|
| 161 |
-
value: Optional[int] # Clock value (0-40), None if unreadable
|
| 162 |
-
confidence: float # Overall confidence score
|
| 163 |
-
tens_match: Optional[TemplateMatchResult] # Tens digit match result
|
| 164 |
-
ones_match: Optional[TemplateMatchResult] # Ones digit match result
|
| 165 |
-
method: str # "template" or "ocr_fallback"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/detectors/play_state_machine.py
DELETED
|
@@ -1,602 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Play state machine module for detecting play start and end times.
|
| 3 |
-
|
| 4 |
-
This module tracks play clock state changes to determine when plays begin and end.
|
| 5 |
-
The primary method for determining play end time is backward counting from the
|
| 6 |
-
next observed play clock value after the play.
|
| 7 |
-
"""
|
| 8 |
-
|
| 9 |
-
import logging
|
| 10 |
-
from dataclasses import dataclass, field
|
| 11 |
-
from enum import Enum
|
| 12 |
-
from typing import Optional, List
|
| 13 |
-
|
| 14 |
-
from .models import ScorebugDetection, PlayClockReading, PlayEvent
|
| 15 |
-
|
| 16 |
-
logger = logging.getLogger(__name__)
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
class PlayState(Enum):
|
| 20 |
-
"""Current state of play detection."""
|
| 21 |
-
|
| 22 |
-
IDLE = "idle" # No scorebug detected, waiting
|
| 23 |
-
PRE_SNAP = "pre_snap" # Scorebug visible, clock ticking down before snap
|
| 24 |
-
PLAY_IN_PROGRESS = "play_in_progress" # Ball snapped, play is live
|
| 25 |
-
POST_PLAY = "post_play" # Play ended, waiting for next play setup
|
| 26 |
-
NO_SCOREBUG = "no_scorebug" # Scorebug lost during/after play (e.g., replay)
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
@dataclass
|
| 30 |
-
class PlayStateMachine: # pylint: disable=too-many-instance-attributes
|
| 31 |
-
"""
|
| 32 |
-
State machine for detecting play boundaries using play clock behavior.
|
| 33 |
-
|
| 34 |
-
Detection Strategy:
|
| 35 |
-
- Play START: Detected when play clock resets to 40 (or potentially freezes - needs validation)
|
| 36 |
-
- Play END: **Always use backward counting** - calculate from next observed clock value after play
|
| 37 |
-
Requires K consecutive descending clock ticks to confirm (avoids false positives)
|
| 38 |
-
|
| 39 |
-
Backward Counting:
|
| 40 |
-
When the play clock reappears showing value X (where X < 40), the play end time is:
|
| 41 |
-
play_end_time = current_time - (40 - X)
|
| 42 |
-
|
| 43 |
-
This method is reliable even when the broadcast cuts to replays.
|
| 44 |
-
"""
|
| 45 |
-
|
| 46 |
-
# Configuration
|
| 47 |
-
clock_stable_frames: int = 3 # Frames with same clock value to consider it "stable"
|
| 48 |
-
max_play_duration: float = 15.0 # Maximum expected play duration in seconds
|
| 49 |
-
scorebug_lost_timeout: float = 30.0 # Seconds before resetting state when scorebug lost
|
| 50 |
-
required_countdown_ticks: int = 3 # Number of consecutive descending ticks required to confirm play end
|
| 51 |
-
min_clock_jump_for_reset: int = 5 # Minimum jump in clock value to consider it a valid reset (40 from X where X <= 40 - this value)
|
| 52 |
-
|
| 53 |
-
# Internal state
|
| 54 |
-
state: PlayState = field(default=PlayState.IDLE)
|
| 55 |
-
plays: List[PlayEvent] = field(default_factory=list)
|
| 56 |
-
|
| 57 |
-
# Tracking variables
|
| 58 |
-
_play_count: int = field(default=0)
|
| 59 |
-
_last_clock_value: Optional[int] = field(default=None)
|
| 60 |
-
_last_clock_timestamp: Optional[float] = field(default=None)
|
| 61 |
-
_clock_stable_count: int = field(default=0)
|
| 62 |
-
_current_play_start_time: Optional[float] = field(default=None)
|
| 63 |
-
_current_play_start_method: Optional[str] = field(default=None)
|
| 64 |
-
_current_play_start_clock: Optional[int] = field(default=None)
|
| 65 |
-
_last_scorebug_timestamp: Optional[float] = field(default=None)
|
| 66 |
-
_direct_end_time: Optional[float] = field(default=None)
|
| 67 |
-
_countdown_history: List[tuple] = field(default_factory=list) # List of (timestamp, clock_value) for countdown tracking
|
| 68 |
-
_first_40_timestamp: Optional[float] = field(default=None) # When we first saw 40 in current play (for turnover detection)
|
| 69 |
-
_current_play_clock_base: int = field(default=40) # Clock base for current play (40 for normal, 25 for special teams)
|
| 70 |
-
_current_play_type: str = field(default="normal") # Type of current play being tracked
|
| 71 |
-
|
| 72 |
-
def update(self, timestamp: float, scorebug: ScorebugDetection, clock: PlayClockReading) -> Optional[PlayEvent]:
|
| 73 |
-
"""
|
| 74 |
-
Update the state machine with new frame data.
|
| 75 |
-
|
| 76 |
-
Args:
|
| 77 |
-
timestamp: Current video timestamp in seconds
|
| 78 |
-
scorebug: Scorebug detection result
|
| 79 |
-
clock: Play clock reading result
|
| 80 |
-
|
| 81 |
-
Returns:
|
| 82 |
-
PlayEvent if a play just ended, None otherwise
|
| 83 |
-
"""
|
| 84 |
-
# Handle scorebug presence/absence
|
| 85 |
-
if not scorebug.detected:
|
| 86 |
-
return self._handle_no_scorebug(timestamp)
|
| 87 |
-
|
| 88 |
-
# Update last scorebug timestamp
|
| 89 |
-
self._last_scorebug_timestamp = timestamp
|
| 90 |
-
|
| 91 |
-
# Handle invalid clock reading
|
| 92 |
-
if not clock.detected or clock.value is None:
|
| 93 |
-
self._handle_invalid_clock(timestamp)
|
| 94 |
-
return None
|
| 95 |
-
|
| 96 |
-
# Process valid clock reading
|
| 97 |
-
return self._process_clock_value(timestamp, clock.value)
|
| 98 |
-
|
| 99 |
-
def _handle_no_scorebug(self, timestamp: float) -> Optional[PlayEvent]:
|
| 100 |
-
"""Handle case when scorebug is not detected."""
|
| 101 |
-
if self.state == PlayState.IDLE:
|
| 102 |
-
return None
|
| 103 |
-
|
| 104 |
-
# Check if we've lost scorebug for too long
|
| 105 |
-
if self._last_scorebug_timestamp is not None:
|
| 106 |
-
time_since_scorebug = timestamp - self._last_scorebug_timestamp
|
| 107 |
-
if time_since_scorebug > self.scorebug_lost_timeout:
|
| 108 |
-
logger.warning("Scorebug lost for %.1fs, resetting to IDLE", time_since_scorebug)
|
| 109 |
-
self._reset_state()
|
| 110 |
-
return None
|
| 111 |
-
|
| 112 |
-
# TURNOVER DETECTION: If we were in PLAY_IN_PROGRESS with significant time at 40,
|
| 113 |
-
# and scorebug disappears (likely for replay/review), record the play.
|
| 114 |
-
if self.state == PlayState.PLAY_IN_PROGRESS and self._first_40_timestamp is not None:
|
| 115 |
-
time_at_40 = (self._last_scorebug_timestamp - self._first_40_timestamp) if self._last_scorebug_timestamp else 0
|
| 116 |
-
min_time_for_play = 2.0
|
| 117 |
-
|
| 118 |
-
if time_at_40 > min_time_for_play:
|
| 119 |
-
# Play happened, scorebug disappeared (likely for replay/review)
|
| 120 |
-
play_end_time = self._last_scorebug_timestamp if self._last_scorebug_timestamp else timestamp
|
| 121 |
-
logger.info(
|
| 122 |
-
"Scorebug disappeared during play at %.1fs (%.1fs at 40). Recording play end at %.1fs.",
|
| 123 |
-
timestamp,
|
| 124 |
-
time_at_40,
|
| 125 |
-
play_end_time,
|
| 126 |
-
)
|
| 127 |
-
# Record the play before transitioning
|
| 128 |
-
completed_play = self._end_play_with_backward_calc(timestamp, 40, play_end_time)
|
| 129 |
-
self.state = PlayState.NO_SCOREBUG
|
| 130 |
-
return completed_play
|
| 131 |
-
|
| 132 |
-
# Transition to NO_SCOREBUG state if we were in a play
|
| 133 |
-
if self.state in (PlayState.PRE_SNAP, PlayState.PLAY_IN_PROGRESS, PlayState.POST_PLAY):
|
| 134 |
-
logger.debug("Scorebug lost at %.1fs, entering NO_SCOREBUG state", timestamp)
|
| 135 |
-
self.state = PlayState.NO_SCOREBUG
|
| 136 |
-
|
| 137 |
-
return None
|
| 138 |
-
|
| 139 |
-
def _handle_invalid_clock(self, timestamp: float) -> None:
|
| 140 |
-
"""Handle case when clock reading is invalid but scorebug is present."""
|
| 141 |
-
# If we're in pre-snap and clock becomes unreadable, might indicate play started
|
| 142 |
-
if self.state == PlayState.PRE_SNAP and self._last_clock_value is not None:
|
| 143 |
-
# Clock became unreadable - could be play in progress
|
| 144 |
-
logger.debug("Clock unreadable at %.1fs in PRE_SNAP state", timestamp)
|
| 145 |
-
|
| 146 |
-
def _process_clock_value(self, timestamp: float, clock_value: int) -> Optional[PlayEvent]:
|
| 147 |
-
"""
|
| 148 |
-
Process a valid clock reading and update state.
|
| 149 |
-
|
| 150 |
-
Args:
|
| 151 |
-
timestamp: Current timestamp
|
| 152 |
-
clock_value: Detected play clock value (0-40)
|
| 153 |
-
|
| 154 |
-
Returns:
|
| 155 |
-
PlayEvent if a play just completed
|
| 156 |
-
"""
|
| 157 |
-
completed_play = None
|
| 158 |
-
|
| 159 |
-
if self.state == PlayState.IDLE:
|
| 160 |
-
# First clock reading - transition to PRE_SNAP
|
| 161 |
-
logger.debug("First clock reading (%d) at %.1fs, entering PRE_SNAP", clock_value, timestamp)
|
| 162 |
-
self.state = PlayState.PRE_SNAP
|
| 163 |
-
self._last_clock_value = clock_value
|
| 164 |
-
self._last_clock_timestamp = timestamp
|
| 165 |
-
self._clock_stable_count = 1
|
| 166 |
-
|
| 167 |
-
elif self.state == PlayState.PRE_SNAP:
|
| 168 |
-
# Watching for play to start (clock reset to 40 or freeze)
|
| 169 |
-
completed_play = self._handle_pre_snap(timestamp, clock_value) # pylint: disable=assignment-from-none
|
| 170 |
-
|
| 171 |
-
elif self.state == PlayState.PLAY_IN_PROGRESS:
|
| 172 |
-
# Play is live, watching for it to end (clock restarts)
|
| 173 |
-
completed_play = self._handle_play_in_progress(timestamp, clock_value)
|
| 174 |
-
|
| 175 |
-
elif self.state == PlayState.POST_PLAY:
|
| 176 |
-
# Play ended, transitioning back to PRE_SNAP
|
| 177 |
-
self._handle_post_play(timestamp, clock_value)
|
| 178 |
-
|
| 179 |
-
elif self.state == PlayState.NO_SCOREBUG:
|
| 180 |
-
# Scorebug returned after being lost
|
| 181 |
-
completed_play = self._handle_scorebug_returned(timestamp, clock_value)
|
| 182 |
-
|
| 183 |
-
# Update tracking
|
| 184 |
-
self._last_clock_value = clock_value
|
| 185 |
-
self._last_clock_timestamp = timestamp
|
| 186 |
-
|
| 187 |
-
return completed_play
|
| 188 |
-
|
| 189 |
-
def _handle_pre_snap(self, timestamp: float, clock_value: int) -> Optional[PlayEvent]:
|
| 190 |
-
"""Handle clock reading during PRE_SNAP state."""
|
| 191 |
-
if self._last_clock_value is None:
|
| 192 |
-
self._last_clock_value = clock_value
|
| 193 |
-
self._clock_stable_count = 1
|
| 194 |
-
return None
|
| 195 |
-
|
| 196 |
-
# Check for clock reset to 40 (indicates ball was snapped for normal play)
|
| 197 |
-
# Require a significant jump in clock value to avoid false positives from OCR noise
|
| 198 |
-
# e.g., "40 from 39" is likely OCR noise, but "40 from 25" is a real reset
|
| 199 |
-
max_prev_value = 40 - self.min_clock_jump_for_reset # e.g., 35 if min_jump=5
|
| 200 |
-
if clock_value == 40 and self._last_clock_value <= max_prev_value:
|
| 201 |
-
logger.info("Play START detected at %.1fs (clock reset to 40 from %d)", timestamp, self._last_clock_value)
|
| 202 |
-
self._current_play_clock_base = 40
|
| 203 |
-
self._current_play_type = "normal"
|
| 204 |
-
self._start_play(timestamp, "clock_reset", self._last_clock_value)
|
| 205 |
-
return None
|
| 206 |
-
|
| 207 |
-
# Reject suspicious clock resets that look like OCR noise
|
| 208 |
-
if clock_value == 40 and self._last_clock_value > max_prev_value:
|
| 209 |
-
logger.debug(
|
| 210 |
-
"Ignoring suspicious clock reset at %.1fs (40 from %d, requires prev <= %d)",
|
| 211 |
-
timestamp,
|
| 212 |
-
self._last_clock_value,
|
| 213 |
-
max_prev_value,
|
| 214 |
-
)
|
| 215 |
-
# Don't update _last_clock_value - treat this 40 reading as noise
|
| 216 |
-
return None
|
| 217 |
-
|
| 218 |
-
# NEW: Check for clock reset to 25 (indicates special teams play - punt return, kickoff return, post-FG/XP)
|
| 219 |
-
# This happens after possession-changing plays like punts, FGs, XPs
|
| 220 |
-
# The clock resets to 25 instead of 40 for these plays
|
| 221 |
-
if clock_value == 25 and self._last_clock_value is not None:
|
| 222 |
-
clock_jump = abs(clock_value - self._last_clock_value)
|
| 223 |
-
# Require significant jump to avoid false positives from normal countdown through 25
|
| 224 |
-
# A jump of 5+ indicates a real reset (e.g., 40→25, 30→25 after brief 40, or 10→25 after play)
|
| 225 |
-
if clock_jump >= self.min_clock_jump_for_reset:
|
| 226 |
-
logger.info(
|
| 227 |
-
"Special teams play START detected at %.1fs (clock reset to 25 from %d, jump of %d)",
|
| 228 |
-
timestamp,
|
| 229 |
-
self._last_clock_value,
|
| 230 |
-
clock_jump,
|
| 231 |
-
)
|
| 232 |
-
self._current_play_clock_base = 25
|
| 233 |
-
self._current_play_type = "special"
|
| 234 |
-
self._start_play(timestamp, "clock_reset_25", self._last_clock_value)
|
| 235 |
-
return None
|
| 236 |
-
|
| 237 |
-
# Track clock stability (for potential future use)
|
| 238 |
-
if clock_value == self._last_clock_value:
|
| 239 |
-
self._clock_stable_count += 1
|
| 240 |
-
else:
|
| 241 |
-
self._clock_stable_count = 1
|
| 242 |
-
|
| 243 |
-
# Note: "clock_freeze" detection disabled - was causing false positives
|
| 244 |
-
# The clock_reset detection (going to 40 or 25) is the reliable method
|
| 245 |
-
return None
|
| 246 |
-
|
| 247 |
-
# pylint: disable=too-many-return-statements,too-many-branches
|
| 248 |
-
def _handle_play_in_progress(self, timestamp: float, clock_value: int) -> Optional[PlayEvent]:
|
| 249 |
-
"""Handle clock reading during PLAY_IN_PROGRESS state."""
|
| 250 |
-
if self._current_play_start_time is None:
|
| 251 |
-
return None
|
| 252 |
-
|
| 253 |
-
# Check for play duration timeout
|
| 254 |
-
play_duration = timestamp - self._current_play_start_time
|
| 255 |
-
if play_duration > self.max_play_duration:
|
| 256 |
-
# Cap the end time at start + max_duration to avoid inflated durations
|
| 257 |
-
# This prevents long gaps (commercials, etc.) from extending play end times
|
| 258 |
-
capped_end_time = self._current_play_start_time + self.max_play_duration
|
| 259 |
-
logger.warning(
|
| 260 |
-
"Play duration (%.1fs) exceeded max (%.1fs), forcing end at %.1fs (capped from %.1fs)",
|
| 261 |
-
play_duration,
|
| 262 |
-
self.max_play_duration,
|
| 263 |
-
capped_end_time,
|
| 264 |
-
timestamp,
|
| 265 |
-
)
|
| 266 |
-
self._direct_end_time = capped_end_time
|
| 267 |
-
self._countdown_history = [] # Reset countdown tracking
|
| 268 |
-
return self._end_play_capped(capped_end_time, clock_value, "max_duration")
|
| 269 |
-
|
| 270 |
-
# If clock is still at 40, the play just started and clock hasn't begun countdown yet
|
| 271 |
-
# We need to wait for the clock to drop below 40 before we can detect play end
|
| 272 |
-
if clock_value == 40:
|
| 273 |
-
# Track when we first saw 40 (for turnover detection)
|
| 274 |
-
if self._first_40_timestamp is None:
|
| 275 |
-
self._first_40_timestamp = timestamp
|
| 276 |
-
# Clock is still at 40 after reset - waiting for countdown to begin
|
| 277 |
-
logger.debug("Play in progress at %.1fs, clock still at 40 (%.1fs at 40)", timestamp, timestamp - self._first_40_timestamp)
|
| 278 |
-
self._countdown_history = [] # Reset countdown tracking
|
| 279 |
-
return None
|
| 280 |
-
|
| 281 |
-
# RAPID 40→25 TRANSITION DETECTION (possession change during play):
|
| 282 |
-
# This happens during punts, kickoffs, and after XPs/FGs:
|
| 283 |
-
# - Punt: Ball punted, receiving team catches it → clock resets to 25
|
| 284 |
-
# - Kickoff: Similar to punt
|
| 285 |
-
# - XP/FG: After kick, clock resets to 25 for next team's possession
|
| 286 |
-
#
|
| 287 |
-
# IMPORTANT: We should NOT end the play here! For punts/kickoffs, the return
|
| 288 |
-
# is still in progress. We need to:
|
| 289 |
-
# 1. Mark this as a special play
|
| 290 |
-
# 2. Update the clock base to 25
|
| 291 |
-
# 3. Continue tracking until proper end detection (countdown confirmed, scorebug lost, or max duration)
|
| 292 |
-
if clock_value == 25 and self._first_40_timestamp is not None:
|
| 293 |
-
time_at_40 = timestamp - self._first_40_timestamp
|
| 294 |
-
max_time_for_possession_change = 5.0 # 40→25 within 5 seconds indicates possession change
|
| 295 |
-
min_time_at_40 = 0.5 # Must be at 40 briefly to avoid OCR noise (lowered from 1.0 for timing precision)
|
| 296 |
-
|
| 297 |
-
if min_time_at_40 <= time_at_40 <= max_time_for_possession_change and len(self._countdown_history) == 0:
|
| 298 |
-
# Possession change detected - play continues (e.g., punt return in progress)
|
| 299 |
-
# DO NOT end the play here - let it continue until proper end detection
|
| 300 |
-
logger.info(
|
| 301 |
-
"Possession change detected at %.1fs: 40 → 25 after %.1fs. Play continues (return may be in progress).",
|
| 302 |
-
timestamp,
|
| 303 |
-
time_at_40,
|
| 304 |
-
)
|
| 305 |
-
# Mark as special play and update clock base for proper end detection
|
| 306 |
-
self._current_play_type = "special"
|
| 307 |
-
self._current_play_clock_base = 25
|
| 308 |
-
self._first_40_timestamp = None # Reset since we're now tracking at 25
|
| 309 |
-
self._countdown_history = [] # Reset countdown tracking for fresh detection at 25
|
| 310 |
-
return None # Continue tracking - DO NOT end the play!
|
| 311 |
-
|
| 312 |
-
# ABNORMAL CLOCK DROP DETECTION:
|
| 313 |
-
# If the first reading after seeing 40 drops by more than 5 seconds (e.g., 40 → 25),
|
| 314 |
-
# this could be either:
|
| 315 |
-
# 1. A timeout/injury reset (no play happened) - clock was at 40 briefly
|
| 316 |
-
# 2. A turnover/possession change (play DID happen) - clock was at 40 for several seconds during play
|
| 317 |
-
# We distinguish by checking how long the clock was at 40.
|
| 318 |
-
if len(self._countdown_history) == 0:
|
| 319 |
-
# This is the first reading after 40
|
| 320 |
-
clock_drop = 40 - clock_value
|
| 321 |
-
max_normal_drop = 5 # Allow up to 5 second drop on first reading (accounts for timing/OCR variance)
|
| 322 |
-
if clock_drop > max_normal_drop:
|
| 323 |
-
# Check how long clock was at 40 to distinguish turnover from timeout
|
| 324 |
-
time_at_40 = (timestamp - self._first_40_timestamp) if self._first_40_timestamp else 0
|
| 325 |
-
min_time_for_play = 2.0 # If clock was at 40 for > 2 seconds, a play likely happened
|
| 326 |
-
|
| 327 |
-
if time_at_40 > min_time_for_play:
|
| 328 |
-
# Ball was snapped, play happened - this is likely a turnover/possession change
|
| 329 |
-
# Estimate play ended shortly before we saw the abnormal reading
|
| 330 |
-
play_end_time = timestamp - 1.0
|
| 331 |
-
logger.info(
|
| 332 |
-
"Turnover/possession change detected at %.1fs: 40 → %d after %.1fs at 40. Recording play end at %.1fs.",
|
| 333 |
-
timestamp,
|
| 334 |
-
clock_value,
|
| 335 |
-
time_at_40,
|
| 336 |
-
play_end_time,
|
| 337 |
-
)
|
| 338 |
-
return self._end_play_with_backward_calc(timestamp, clock_value, play_end_time)
|
| 339 |
-
# Brief time at 40, likely timeout/reset, not a real play
|
| 340 |
-
logger.warning(
|
| 341 |
-
"Abnormal clock drop detected at %.1fs: 40 → %d (drop of %d seconds, only %.1fs at 40). "
|
| 342 |
-
"Likely timeout/injury reset, not a real play. Resetting to PRE_SNAP.",
|
| 343 |
-
timestamp,
|
| 344 |
-
clock_value,
|
| 345 |
-
clock_drop,
|
| 346 |
-
time_at_40,
|
| 347 |
-
)
|
| 348 |
-
self._reset_play_tracking()
|
| 349 |
-
self.state = PlayState.PRE_SNAP
|
| 350 |
-
return None
|
| 351 |
-
|
| 352 |
-
# Track countdown history for confirming play end
|
| 353 |
-
# We require K consecutive descending ticks to confirm
|
| 354 |
-
self._countdown_history.append((timestamp, clock_value))
|
| 355 |
-
|
| 356 |
-
# Check if we have enough consecutive descending values
|
| 357 |
-
if len(self._countdown_history) >= self.required_countdown_ticks:
|
| 358 |
-
# Get last K readings
|
| 359 |
-
recent = self._countdown_history[-self.required_countdown_ticks :]
|
| 360 |
-
values = [v for _, v in recent]
|
| 361 |
-
|
| 362 |
-
# Check if values are strictly descending (or stable which means same second)
|
| 363 |
-
is_valid_countdown = True
|
| 364 |
-
for i in range(1, len(values)):
|
| 365 |
-
# Allow same value (within same second) or descending
|
| 366 |
-
if values[i] > values[i - 1]:
|
| 367 |
-
is_valid_countdown = False
|
| 368 |
-
break
|
| 369 |
-
|
| 370 |
-
if is_valid_countdown:
|
| 371 |
-
# Use the first reading in our confirmed sequence for backward calculation
|
| 372 |
-
first_timestamp, first_value = recent[0]
|
| 373 |
-
# Use the correct clock base (40 for normal plays, 25 for special teams)
|
| 374 |
-
calculated_end_time = first_timestamp - (self._current_play_clock_base - first_value)
|
| 375 |
-
logger.info(
|
| 376 |
-
"Play END confirmed via %d-tick countdown: %.1fs (clock=%d→%d, base=%d, observed %.1fs-%.1fs)",
|
| 377 |
-
self.required_countdown_ticks,
|
| 378 |
-
calculated_end_time,
|
| 379 |
-
values[0],
|
| 380 |
-
values[-1],
|
| 381 |
-
self._current_play_clock_base,
|
| 382 |
-
recent[0][0],
|
| 383 |
-
recent[-1][0],
|
| 384 |
-
)
|
| 385 |
-
self._direct_end_time = timestamp # When we confirmed the countdown
|
| 386 |
-
self._countdown_history = [] # Reset for next play
|
| 387 |
-
return self._end_play_with_backward_calc(timestamp, first_value, calculated_end_time)
|
| 388 |
-
|
| 389 |
-
return None
|
| 390 |
-
|
| 391 |
-
def _handle_post_play(self, timestamp: float, clock_value: int) -> None: # pylint: disable=unused-argument
|
| 392 |
-
"""Handle clock reading during POST_PLAY state."""
|
| 393 |
-
# Note: clock_value unused for now, but kept for potential future use
|
| 394 |
-
# Transition back to PRE_SNAP
|
| 395 |
-
logger.debug("Transitioning from POST_PLAY to PRE_SNAP at %.1fs", timestamp)
|
| 396 |
-
self.state = PlayState.PRE_SNAP
|
| 397 |
-
self._clock_stable_count = 1
|
| 398 |
-
|
| 399 |
-
def _handle_scorebug_returned(self, timestamp: float, clock_value: int) -> Optional[PlayEvent]:
|
| 400 |
-
"""Handle scorebug returning after being lost."""
|
| 401 |
-
completed_play = None
|
| 402 |
-
|
| 403 |
-
# If we were tracking a play, use backward counting to determine when it ended
|
| 404 |
-
if self._current_play_start_time is not None:
|
| 405 |
-
# Calculate when play ended using backward counting with correct clock base
|
| 406 |
-
calculated_end_time = timestamp - (self._current_play_clock_base - clock_value)
|
| 407 |
-
logger.info(
|
| 408 |
-
"Scorebug returned at %.1fs (clock=%d, base=%d), backward calc play end: %.1fs",
|
| 409 |
-
timestamp,
|
| 410 |
-
clock_value,
|
| 411 |
-
self._current_play_clock_base,
|
| 412 |
-
calculated_end_time,
|
| 413 |
-
)
|
| 414 |
-
completed_play = self._end_play_with_backward_calc(timestamp, clock_value, calculated_end_time)
|
| 415 |
-
else:
|
| 416 |
-
# No play was in progress, just return to PRE_SNAP
|
| 417 |
-
logger.debug("Scorebug returned at %.1fs, no play in progress", timestamp)
|
| 418 |
-
|
| 419 |
-
self.state = PlayState.PRE_SNAP
|
| 420 |
-
self._clock_stable_count = 1
|
| 421 |
-
return completed_play
|
| 422 |
-
|
| 423 |
-
def _start_play(self, timestamp: float, method: str, clock_value: Optional[int]) -> None:
|
| 424 |
-
"""Record the start of a new play."""
|
| 425 |
-
self._current_play_start_time = timestamp
|
| 426 |
-
self._current_play_start_method = method
|
| 427 |
-
self._current_play_start_clock = clock_value
|
| 428 |
-
self._countdown_history = [] # Reset countdown tracking for new play
|
| 429 |
-
self.state = PlayState.PLAY_IN_PROGRESS
|
| 430 |
-
logger.debug("Play started: time=%.1fs, method=%s, clock=%s", timestamp, method, clock_value)
|
| 431 |
-
|
| 432 |
-
def _end_play_capped(self, capped_end_time: float, clock_value: int, method: str) -> PlayEvent:
|
| 433 |
-
"""End the current play with a capped end time (for max duration exceeded)."""
|
| 434 |
-
self._play_count += 1
|
| 435 |
-
|
| 436 |
-
play = PlayEvent(
|
| 437 |
-
play_number=self._play_count,
|
| 438 |
-
start_time=self._current_play_start_time or capped_end_time,
|
| 439 |
-
end_time=capped_end_time,
|
| 440 |
-
confidence=0.7, # Lower confidence for capped plays
|
| 441 |
-
start_method=self._current_play_start_method or "unknown",
|
| 442 |
-
end_method=method,
|
| 443 |
-
direct_end_time=self._direct_end_time,
|
| 444 |
-
start_clock_value=self._current_play_start_clock,
|
| 445 |
-
end_clock_value=clock_value,
|
| 446 |
-
play_type=self._current_play_type,
|
| 447 |
-
)
|
| 448 |
-
|
| 449 |
-
self.plays.append(play)
|
| 450 |
-
self._reset_play_tracking()
|
| 451 |
-
self.state = PlayState.POST_PLAY
|
| 452 |
-
|
| 453 |
-
logger.info(
|
| 454 |
-
"Play #%d complete (capped, %s): %.1fs - %.1fs (duration: %.1fs)",
|
| 455 |
-
play.play_number,
|
| 456 |
-
play.play_type,
|
| 457 |
-
play.start_time,
|
| 458 |
-
play.end_time,
|
| 459 |
-
play.end_time - play.start_time,
|
| 460 |
-
)
|
| 461 |
-
|
| 462 |
-
return play
|
| 463 |
-
|
| 464 |
-
def _end_play(self, timestamp: float, clock_value: int, method: str) -> PlayEvent:
|
| 465 |
-
"""End the current play and create a PlayEvent."""
|
| 466 |
-
self._play_count += 1
|
| 467 |
-
|
| 468 |
-
# For direct detection, end time is the current timestamp
|
| 469 |
-
end_time = timestamp
|
| 470 |
-
|
| 471 |
-
play = PlayEvent(
|
| 472 |
-
play_number=self._play_count,
|
| 473 |
-
start_time=self._current_play_start_time or timestamp,
|
| 474 |
-
end_time=end_time,
|
| 475 |
-
confidence=0.8, # Base confidence for direct detection
|
| 476 |
-
start_method=self._current_play_start_method or "unknown",
|
| 477 |
-
end_method=method,
|
| 478 |
-
direct_end_time=self._direct_end_time,
|
| 479 |
-
start_clock_value=self._current_play_start_clock,
|
| 480 |
-
end_clock_value=clock_value,
|
| 481 |
-
play_type=self._current_play_type,
|
| 482 |
-
)
|
| 483 |
-
|
| 484 |
-
self.plays.append(play)
|
| 485 |
-
self._reset_play_tracking()
|
| 486 |
-
self.state = PlayState.POST_PLAY
|
| 487 |
-
|
| 488 |
-
logger.info(
|
| 489 |
-
"Play #%d complete (%s): %.1fs - %.1fs (duration: %.1fs)",
|
| 490 |
-
play.play_number,
|
| 491 |
-
play.play_type,
|
| 492 |
-
play.start_time,
|
| 493 |
-
play.end_time,
|
| 494 |
-
play.end_time - play.start_time,
|
| 495 |
-
)
|
| 496 |
-
|
| 497 |
-
return play
|
| 498 |
-
|
| 499 |
-
def _end_play_with_backward_calc(self, observation_time: float, clock_value: int, calculated_end_time: float) -> Optional[PlayEvent]:
|
| 500 |
-
"""End the current play using backward calculation for end time."""
|
| 501 |
-
start_time = self._current_play_start_time or calculated_end_time
|
| 502 |
-
|
| 503 |
-
# Sanity check: end time must be after start time
|
| 504 |
-
if calculated_end_time < start_time:
|
| 505 |
-
logger.warning(
|
| 506 |
-
"Rejecting invalid play: calculated end time (%.1fs) is before start time (%.1fs). This likely indicates OCR noise or false play detection. Resetting state.",
|
| 507 |
-
calculated_end_time,
|
| 508 |
-
start_time,
|
| 509 |
-
)
|
| 510 |
-
self._reset_play_tracking()
|
| 511 |
-
self.state = PlayState.PRE_SNAP
|
| 512 |
-
return None
|
| 513 |
-
|
| 514 |
-
# Sanity check: play duration should be reasonable
|
| 515 |
-
# Special plays (XP/FG completions) can have very short durations due to sampling interval
|
| 516 |
-
duration = calculated_end_time - start_time
|
| 517 |
-
min_duration = 0.0 if self._current_play_type == "special" else 0.5
|
| 518 |
-
if duration < min_duration:
|
| 519 |
-
logger.warning(
|
| 520 |
-
"Rejecting invalid play: duration (%.1fs) is too short. This likely indicates OCR noise. Resetting state.",
|
| 521 |
-
duration,
|
| 522 |
-
)
|
| 523 |
-
self._reset_play_tracking()
|
| 524 |
-
self.state = PlayState.PRE_SNAP
|
| 525 |
-
return None
|
| 526 |
-
|
| 527 |
-
self._play_count += 1
|
| 528 |
-
|
| 529 |
-
play = PlayEvent(
|
| 530 |
-
play_number=self._play_count,
|
| 531 |
-
start_time=start_time,
|
| 532 |
-
end_time=calculated_end_time, # Use backward-calculated time
|
| 533 |
-
confidence=0.9, # Higher confidence for backward calculation
|
| 534 |
-
start_method=self._current_play_start_method or "unknown",
|
| 535 |
-
end_method="backward_calc",
|
| 536 |
-
direct_end_time=self._direct_end_time, # May be None
|
| 537 |
-
start_clock_value=self._current_play_start_clock,
|
| 538 |
-
end_clock_value=clock_value,
|
| 539 |
-
play_type=self._current_play_type,
|
| 540 |
-
)
|
| 541 |
-
|
| 542 |
-
self.plays.append(play)
|
| 543 |
-
self._reset_play_tracking()
|
| 544 |
-
self.state = PlayState.POST_PLAY
|
| 545 |
-
|
| 546 |
-
logger.info(
|
| 547 |
-
"Play #%d complete (backward calc, %s): %.1fs - %.1fs (duration: %.1fs, observed at %.1fs with clock=%d)",
|
| 548 |
-
play.play_number,
|
| 549 |
-
play.play_type,
|
| 550 |
-
play.start_time,
|
| 551 |
-
play.end_time,
|
| 552 |
-
play.end_time - play.start_time,
|
| 553 |
-
observation_time,
|
| 554 |
-
clock_value,
|
| 555 |
-
)
|
| 556 |
-
|
| 557 |
-
return play
|
| 558 |
-
|
| 559 |
-
def _reset_play_tracking(self) -> None:
|
| 560 |
-
"""Reset tracking variables for next play."""
|
| 561 |
-
self._current_play_start_time = None
|
| 562 |
-
self._current_play_start_method = None
|
| 563 |
-
self._current_play_start_clock = None
|
| 564 |
-
self._direct_end_time = None
|
| 565 |
-
self._clock_stable_count = 0
|
| 566 |
-
self._countdown_history = []
|
| 567 |
-
self._first_40_timestamp = None
|
| 568 |
-
self._current_play_clock_base = 40 # Reset to default
|
| 569 |
-
self._current_play_type = "normal" # Reset to default
|
| 570 |
-
|
| 571 |
-
def _reset_state(self) -> None:
|
| 572 |
-
"""Fully reset state machine."""
|
| 573 |
-
self.state = PlayState.IDLE
|
| 574 |
-
self._reset_play_tracking()
|
| 575 |
-
self._last_clock_value = None
|
| 576 |
-
self._last_clock_timestamp = None
|
| 577 |
-
self._last_scorebug_timestamp = None
|
| 578 |
-
logger.debug("State machine reset to IDLE")
|
| 579 |
-
|
| 580 |
-
def get_plays(self) -> List[PlayEvent]:
|
| 581 |
-
"""Get all detected plays."""
|
| 582 |
-
return self.plays.copy()
|
| 583 |
-
|
| 584 |
-
def get_state(self) -> PlayState:
|
| 585 |
-
"""Get current state."""
|
| 586 |
-
return self.state
|
| 587 |
-
|
| 588 |
-
def get_stats(self) -> dict:
|
| 589 |
-
"""Get statistics about detected plays."""
|
| 590 |
-
if not self.plays:
|
| 591 |
-
return {"total_plays": 0}
|
| 592 |
-
|
| 593 |
-
durations = [p.end_time - p.start_time for p in self.plays]
|
| 594 |
-
return {
|
| 595 |
-
"total_plays": len(self.plays),
|
| 596 |
-
"avg_duration": sum(durations) / len(durations),
|
| 597 |
-
"min_duration": min(durations),
|
| 598 |
-
"max_duration": max(durations),
|
| 599 |
-
"start_methods": {m: sum(1 for p in self.plays if p.start_method == m) for m in set(p.start_method for p in self.plays)},
|
| 600 |
-
"end_methods": {m: sum(1 for p in self.plays if p.end_method == m) for m in set(p.end_method for p in self.plays)},
|
| 601 |
-
"play_types": {t: sum(1 for p in self.plays if p.play_type == t) for t in set(p.play_type for p in self.plays)},
|
| 602 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/pipeline/__init__.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
"""Pipeline modules for video processing and
|
| 2 |
|
| 3 |
Note: OCR-based clock reading has been removed in favor of template matching.
|
| 4 |
Streaming processing is used for optimal performance.
|
|
@@ -13,8 +13,9 @@ from .models import (
|
|
| 13 |
)
|
| 14 |
|
| 15 |
# Pipeline classes and functions
|
| 16 |
-
from .
|
| 17 |
-
from .orchestrator import
|
|
|
|
| 18 |
|
| 19 |
__all__ = [
|
| 20 |
# Models
|
|
@@ -22,8 +23,9 @@ __all__ = [
|
|
| 22 |
"DetectionResult",
|
| 23 |
"VideoContext",
|
| 24 |
# Pipeline
|
| 25 |
-
"
|
| 26 |
-
"
|
| 27 |
-
"
|
| 28 |
"print_results_summary",
|
|
|
|
| 29 |
]
|
|
|
|
| 1 |
+
"""Pipeline modules for video processing and play extraction orchestration.
|
| 2 |
|
| 3 |
Note: OCR-based clock reading has been removed in favor of template matching.
|
| 4 |
Streaming processing is used for optimal performance.
|
|
|
|
| 13 |
)
|
| 14 |
|
| 15 |
# Pipeline classes and functions
|
| 16 |
+
from .play_extractor import PlayExtractor, format_extraction_result_dict
|
| 17 |
+
from .orchestrator import run_extraction, print_results_summary
|
| 18 |
+
from .template_builder_pass import TemplateBuildingPass
|
| 19 |
|
| 20 |
__all__ = [
|
| 21 |
# Models
|
|
|
|
| 23 |
"DetectionResult",
|
| 24 |
"VideoContext",
|
| 25 |
# Pipeline
|
| 26 |
+
"PlayExtractor",
|
| 27 |
+
"format_extraction_result_dict",
|
| 28 |
+
"run_extraction",
|
| 29 |
"print_results_summary",
|
| 30 |
+
"TemplateBuildingPass",
|
| 31 |
]
|
src/pipeline/models.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
"""
|
| 2 |
-
|
| 3 |
|
| 4 |
This module contains all the data structures used by the pipeline components
|
| 5 |
for configuration, intermediate results, and final output.
|
|
@@ -9,10 +9,9 @@ Streaming processing is used for optimal performance (read frame -> process imme
|
|
| 9 |
See docs/ocr_to_template_migration.md for details.
|
| 10 |
"""
|
| 11 |
|
| 12 |
-
from dataclasses import dataclass, field
|
| 13 |
from typing import Optional, List, Dict, Any, Tuple
|
| 14 |
|
| 15 |
-
from pydantic import BaseModel
|
| 16 |
|
| 17 |
|
| 18 |
# =============================================================================
|
|
@@ -20,8 +19,7 @@ from pydantic import BaseModel
|
|
| 20 |
# =============================================================================
|
| 21 |
|
| 22 |
|
| 23 |
-
|
| 24 |
-
class DetectionConfig:
|
| 25 |
"""Configuration for play detection pipeline.
|
| 26 |
|
| 27 |
Uses template matching for play clock reading (~34x faster than OCR).
|
|
@@ -29,19 +27,19 @@ class DetectionConfig:
|
|
| 29 |
then streaming detection processes each frame immediately via template matching.
|
| 30 |
"""
|
| 31 |
|
| 32 |
-
video_path: str
|
| 33 |
-
template_path: str
|
| 34 |
-
clock_region_config_path: str
|
| 35 |
-
start_time: float = 0.0
|
| 36 |
-
end_time: Optional[float] = None
|
| 37 |
-
frame_interval: float = 0.5
|
| 38 |
-
use_split_detection: bool = True
|
| 39 |
# Template matching configuration
|
| 40 |
-
template_collection_frames: int = 400
|
| 41 |
-
digit_template_path: Optional[str] = None
|
| 42 |
# Fixed coordinates mode - skip scorebug detection entirely for maximum speed
|
| 43 |
-
fixed_playclock_coords: Optional[Tuple[int, int, int, int]] = None
|
| 44 |
-
fixed_scorebug_coords: Optional[Tuple[int, int, int, int]] = None
|
| 45 |
|
| 46 |
|
| 47 |
# =============================================================================
|
|
@@ -49,19 +47,20 @@ class DetectionConfig:
|
|
| 49 |
# =============================================================================
|
| 50 |
|
| 51 |
|
| 52 |
-
|
| 53 |
-
class VideoContext:
|
| 54 |
"""Container for video properties and processing state."""
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
|
|
|
|
|
|
| 65 |
|
| 66 |
|
| 67 |
# =============================================================================
|
|
@@ -69,20 +68,19 @@ class VideoContext:
|
|
| 69 |
# =============================================================================
|
| 70 |
|
| 71 |
|
| 72 |
-
|
| 73 |
-
class DetectionResult:
|
| 74 |
"""Results from play detection pipeline."""
|
| 75 |
|
| 76 |
-
video: str
|
| 77 |
-
segment_start: float
|
| 78 |
-
segment_end: float
|
| 79 |
-
total_frames_processed: int
|
| 80 |
-
frames_with_scorebug: int
|
| 81 |
-
frames_with_clock: int
|
| 82 |
-
plays: List[Dict[str, Any]] =
|
| 83 |
-
stats: Dict[str, Any] =
|
| 84 |
-
timing: Dict[str, float] =
|
| 85 |
-
config: Dict[str, Any] =
|
| 86 |
|
| 87 |
|
| 88 |
# =============================================================================
|
|
@@ -90,6 +88,23 @@ class DetectionResult:
|
|
| 90 |
# =============================================================================
|
| 91 |
|
| 92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
class ChunkResult(BaseModel):
|
| 94 |
"""Result from processing a single video chunk in parallel processing.
|
| 95 |
|
|
@@ -97,12 +112,12 @@ class ChunkResult(BaseModel):
|
|
| 97 |
when passing data between worker processes.
|
| 98 |
"""
|
| 99 |
|
| 100 |
-
chunk_id: int
|
| 101 |
-
start_time: float
|
| 102 |
-
end_time: float
|
| 103 |
-
frames_processed: int
|
| 104 |
-
frames_with_scorebug: int
|
| 105 |
-
frames_with_clock: int
|
| 106 |
-
frame_data: List[Dict[str, Any]]
|
| 107 |
-
io_time: float
|
| 108 |
-
processing_time: float
|
|
|
|
| 1 |
"""
|
| 2 |
+
Pydantic models for the pipeline module.
|
| 3 |
|
| 4 |
This module contains all the data structures used by the pipeline components
|
| 5 |
for configuration, intermediate results, and final output.
|
|
|
|
| 9 |
See docs/ocr_to_template_migration.md for details.
|
| 10 |
"""
|
| 11 |
|
|
|
|
| 12 |
from typing import Optional, List, Dict, Any, Tuple
|
| 13 |
|
| 14 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 15 |
|
| 16 |
|
| 17 |
# =============================================================================
|
|
|
|
| 19 |
# =============================================================================
|
| 20 |
|
| 21 |
|
| 22 |
+
class DetectionConfig(BaseModel):
|
|
|
|
| 23 |
"""Configuration for play detection pipeline.
|
| 24 |
|
| 25 |
Uses template matching for play clock reading (~34x faster than OCR).
|
|
|
|
| 27 |
then streaming detection processes each frame immediately via template matching.
|
| 28 |
"""
|
| 29 |
|
| 30 |
+
video_path: str = Field(..., description="Path to video file")
|
| 31 |
+
template_path: str = Field(..., description="Path to scorebug template")
|
| 32 |
+
clock_region_config_path: str = Field(..., description="Path to play clock region config")
|
| 33 |
+
start_time: float = Field(0.0, description="Start time in seconds")
|
| 34 |
+
end_time: Optional[float] = Field(None, description="End time in seconds (None = full video)")
|
| 35 |
+
frame_interval: float = Field(0.5, description="Interval between frame samples (seconds)")
|
| 36 |
+
use_split_detection: bool = Field(True, description="Enable split-half scorebug detection for robustness to partial overlays")
|
| 37 |
# Template matching configuration
|
| 38 |
+
template_collection_frames: int = Field(400, description="Number of frames to use for building digit templates via OCR")
|
| 39 |
+
digit_template_path: Optional[str] = Field(None, description="Path to pre-built digit templates (skip collection phase if provided)")
|
| 40 |
# Fixed coordinates mode - skip scorebug detection entirely for maximum speed
|
| 41 |
+
fixed_playclock_coords: Optional[Tuple[int, int, int, int]] = Field(None, description="(x, y, w, h) absolute play clock coords")
|
| 42 |
+
fixed_scorebug_coords: Optional[Tuple[int, int, int, int]] = Field(None, description="(x, y, w, h) scorebug region (for metadata)")
|
| 43 |
|
| 44 |
|
| 45 |
# =============================================================================
|
|
|
|
| 47 |
# =============================================================================
|
| 48 |
|
| 49 |
|
| 50 |
+
class VideoContext(BaseModel):
|
|
|
|
| 51 |
"""Container for video properties and processing state."""
|
| 52 |
|
| 53 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
| 54 |
+
|
| 55 |
+
cap: Any = Field(..., description="cv2.VideoCapture (using Any to avoid cv2 typing issues)")
|
| 56 |
+
fps: float = Field(..., description="Frames per second")
|
| 57 |
+
total_frames: int = Field(..., description="Total frame count")
|
| 58 |
+
duration: float = Field(..., description="Video duration in seconds")
|
| 59 |
+
start_time: float = Field(..., description="Segment start time")
|
| 60 |
+
end_time: float = Field(..., description="Segment end time")
|
| 61 |
+
frame_skip: int = Field(..., description="Frames to skip between samples")
|
| 62 |
+
start_frame: int = Field(..., description="First frame to process")
|
| 63 |
+
end_frame: int = Field(..., description="Last frame to process")
|
| 64 |
|
| 65 |
|
| 66 |
# =============================================================================
|
|
|
|
| 68 |
# =============================================================================
|
| 69 |
|
| 70 |
|
| 71 |
+
class DetectionResult(BaseModel):
|
|
|
|
| 72 |
"""Results from play detection pipeline."""
|
| 73 |
|
| 74 |
+
video: str = Field(..., description="Video filename")
|
| 75 |
+
segment_start: float = Field(..., description="Segment start time")
|
| 76 |
+
segment_end: float = Field(..., description="Segment end time")
|
| 77 |
+
total_frames_processed: int = Field(..., description="Number of frames analyzed")
|
| 78 |
+
frames_with_scorebug: int = Field(..., description="Frames where scorebug was detected")
|
| 79 |
+
frames_with_clock: int = Field(..., description="Frames where clock was read successfully")
|
| 80 |
+
plays: List[Dict[str, Any]] = Field(default_factory=list, description="Detected plays as dicts")
|
| 81 |
+
stats: Dict[str, Any] = Field(default_factory=dict, description="Summary statistics")
|
| 82 |
+
timing: Dict[str, float] = Field(default_factory=dict, description="Timing breakdown by section")
|
| 83 |
+
config: Dict[str, Any] = Field(default_factory=dict, description="Configuration used for this run (regions, thresholds, etc.)")
|
| 84 |
|
| 85 |
|
| 86 |
# =============================================================================
|
|
|
|
| 88 |
# =============================================================================
|
| 89 |
|
| 90 |
|
| 91 |
+
class ParallelProcessingConfig(BaseModel):
|
| 92 |
+
"""Configuration for parallel video chunk processing.
|
| 93 |
+
|
| 94 |
+
This model groups the configuration parameters needed by parallel processing
|
| 95 |
+
functions, reducing the number of individual arguments.
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
video_path: str = Field(..., description="Path to video file")
|
| 99 |
+
start_time: float = Field(..., description="Start time in seconds")
|
| 100 |
+
end_time: float = Field(..., description="End time in seconds")
|
| 101 |
+
frame_interval: float = Field(..., description="Time interval between frames")
|
| 102 |
+
fixed_playclock_coords: Tuple[int, int, int, int] = Field(..., description="(x, y, w, h) for play clock region")
|
| 103 |
+
fixed_scorebug_coords: Tuple[int, int, int, int] = Field(..., description="(x, y, w, h) for scorebug region")
|
| 104 |
+
template_library_path: Optional[str] = Field(None, description="Path to template library directory")
|
| 105 |
+
timeout_config_path: Optional[str] = Field(None, description="Path to timeout tracker config")
|
| 106 |
+
|
| 107 |
+
|
| 108 |
class ChunkResult(BaseModel):
|
| 109 |
"""Result from processing a single video chunk in parallel processing.
|
| 110 |
|
|
|
|
| 112 |
when passing data between worker processes.
|
| 113 |
"""
|
| 114 |
|
| 115 |
+
chunk_id: int = Field(..., description="Identifier for this chunk")
|
| 116 |
+
start_time: float = Field(..., description="Chunk start time in seconds")
|
| 117 |
+
end_time: float = Field(..., description="Chunk end time in seconds")
|
| 118 |
+
frames_processed: int = Field(..., description="Total frames processed in this chunk")
|
| 119 |
+
frames_with_scorebug: int = Field(..., description="Frames where scorebug was detected")
|
| 120 |
+
frames_with_clock: int = Field(..., description="Frames where clock was successfully read")
|
| 121 |
+
frame_data: List[Dict[str, Any]] = Field(..., description="Per-frame detection results")
|
| 122 |
+
io_time: float = Field(..., description="Time spent on video I/O operations")
|
| 123 |
+
processing_time: float = Field(..., description="Total processing time for this chunk")
|
src/pipeline/orchestrator.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
"""
|
| 2 |
-
Pipeline orchestration for play
|
| 3 |
|
| 4 |
-
This module provides the high-level functions for running play
|
| 5 |
including result filtering and summary printing.
|
| 6 |
|
| 7 |
Supports both sequential and parallel processing modes:
|
|
@@ -10,26 +10,20 @@ Supports both sequential and parallel processing modes:
|
|
| 10 |
"""
|
| 11 |
|
| 12 |
import logging
|
| 13 |
-
import sys
|
| 14 |
from pathlib import Path
|
| 15 |
-
from typing import Any, Dict
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
# pylint: disable=wrong-import-position
|
| 23 |
-
from config.session import SessionConfig, MIN_PLAY_DURATION
|
| 24 |
-
from detectors.timeout_tracker import TimeoutTracker
|
| 25 |
-
from pipeline.play_detector import DetectionConfig, PlayDetector, format_detection_result_dict
|
| 26 |
|
| 27 |
logger = logging.getLogger(__name__)
|
| 28 |
|
| 29 |
|
| 30 |
-
def
|
| 31 |
"""
|
| 32 |
-
Run play
|
| 33 |
|
| 34 |
Uses 3-class classification for 40->25 clock resets:
|
| 35 |
- Class A (weird_clock): 25 counts down immediately -> rejected
|
|
@@ -42,18 +36,18 @@ def run_detection(config: SessionConfig, output_dir: Path, num_workers: int = 1)
|
|
| 42 |
num_workers: Number of parallel workers (1=sequential, 2+=parallel).
|
| 43 |
|
| 44 |
Returns:
|
| 45 |
-
|
| 46 |
- video: Video path
|
| 47 |
- segment: Start/end times
|
| 48 |
- processing: Frame processing stats
|
| 49 |
- timing: Timing breakdown
|
| 50 |
-
- plays: List of
|
| 51 |
- stats: Play statistics
|
| 52 |
"""
|
| 53 |
if num_workers > 1:
|
| 54 |
-
print(f"\n[Phase 3] Running
|
| 55 |
else:
|
| 56 |
-
print("\n[Phase 3] Running
|
| 57 |
print("-" * 50)
|
| 58 |
|
| 59 |
basename = config.video_basename
|
|
@@ -88,19 +82,19 @@ def run_detection(config: SessionConfig, output_dir: Path, num_workers: int = 1)
|
|
| 88 |
# Initialize timeout tracker if config exists
|
| 89 |
timeout_tracker = None
|
| 90 |
if timeout_config_path.exists():
|
| 91 |
-
timeout_tracker =
|
| 92 |
logger.info("Timeout tracker initialized from: %s", timeout_config_path)
|
| 93 |
else:
|
| 94 |
logger.info("No timeout tracker config found - clock reset classification will be limited")
|
| 95 |
|
| 96 |
-
# Initialize
|
| 97 |
-
|
| 98 |
|
| 99 |
-
# Run
|
| 100 |
if num_workers > 1:
|
| 101 |
-
result =
|
| 102 |
else:
|
| 103 |
-
result =
|
| 104 |
|
| 105 |
# Filter out plays shorter than minimum duration (e.g., clock operator errors)
|
| 106 |
original_play_count = len(result.plays)
|
|
@@ -159,10 +153,10 @@ def run_detection(config: SessionConfig, output_dir: Path, num_workers: int = 1)
|
|
| 159 |
|
| 160 |
# Save results with video-specific name
|
| 161 |
results_path = output_dir / f"{basename}_plays.json"
|
| 162 |
-
|
| 163 |
|
| 164 |
# Convert to dictionary for return
|
| 165 |
-
return
|
| 166 |
|
| 167 |
|
| 168 |
def print_results_summary(
|
|
@@ -172,12 +166,12 @@ def print_results_summary(
|
|
| 172 |
video_basename: str,
|
| 173 |
generate_individual: bool,
|
| 174 |
expected_plays: int = 12,
|
| 175 |
-
):
|
| 176 |
"""
|
| 177 |
Print the final results summary.
|
| 178 |
|
| 179 |
Args:
|
| 180 |
-
results:
|
| 181 |
testing_mode: Whether running in testing mode.
|
| 182 |
clip_timing: Timing information from clip generation.
|
| 183 |
video_basename: Base name for output files.
|
|
|
|
| 1 |
"""
|
| 2 |
+
Pipeline orchestration for play extraction.
|
| 3 |
|
| 4 |
+
This module provides the high-level functions for running play extraction,
|
| 5 |
including result filtering and summary printing.
|
| 6 |
|
| 7 |
Supports both sequential and parallel processing modes:
|
|
|
|
| 10 |
"""
|
| 11 |
|
| 12 |
import logging
|
|
|
|
| 13 |
from pathlib import Path
|
| 14 |
+
from typing import Any, Dict
|
| 15 |
|
| 16 |
+
from config import SessionConfig, MIN_PLAY_DURATION
|
| 17 |
+
from detection import DetectTimeouts
|
| 18 |
+
from .models import DetectionConfig
|
| 19 |
+
from .play_extractor import PlayExtractor, format_extraction_result_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
logger = logging.getLogger(__name__)
|
| 22 |
|
| 23 |
|
| 24 |
+
def run_extraction(config: SessionConfig, output_dir: Path, num_workers: int = 1) -> Dict[str, Any]:
|
| 25 |
"""
|
| 26 |
+
Run play extraction using the configured regions.
|
| 27 |
|
| 28 |
Uses 3-class classification for 40->25 clock resets:
|
| 29 |
- Class A (weird_clock): 25 counts down immediately -> rejected
|
|
|
|
| 36 |
num_workers: Number of parallel workers (1=sequential, 2+=parallel).
|
| 37 |
|
| 38 |
Returns:
|
| 39 |
+
Extraction results dictionary with keys:
|
| 40 |
- video: Video path
|
| 41 |
- segment: Start/end times
|
| 42 |
- processing: Frame processing stats
|
| 43 |
- timing: Timing breakdown
|
| 44 |
+
- plays: List of extracted plays
|
| 45 |
- stats: Play statistics
|
| 46 |
"""
|
| 47 |
if num_workers > 1:
|
| 48 |
+
print(f"\n[Phase 3] Running Extraction (parallel: {num_workers} workers)...")
|
| 49 |
else:
|
| 50 |
+
print("\n[Phase 3] Running Extraction...")
|
| 51 |
print("-" * 50)
|
| 52 |
|
| 53 |
basename = config.video_basename
|
|
|
|
| 82 |
# Initialize timeout tracker if config exists
|
| 83 |
timeout_tracker = None
|
| 84 |
if timeout_config_path.exists():
|
| 85 |
+
timeout_tracker = DetectTimeouts(config_path=str(timeout_config_path))
|
| 86 |
logger.info("Timeout tracker initialized from: %s", timeout_config_path)
|
| 87 |
else:
|
| 88 |
logger.info("No timeout tracker config found - clock reset classification will be limited")
|
| 89 |
|
| 90 |
+
# Initialize extractor - fixed coordinates mode is configured via DetectionConfig
|
| 91 |
+
extractor = PlayExtractor(detection_config, timeout_tracker=timeout_tracker)
|
| 92 |
|
| 93 |
+
# Run extraction - parallel or sequential based on num_workers
|
| 94 |
if num_workers > 1:
|
| 95 |
+
result = extractor.extract_parallel(num_workers=num_workers, output_dir=output_dir)
|
| 96 |
else:
|
| 97 |
+
result = extractor.extract()
|
| 98 |
|
| 99 |
# Filter out plays shorter than minimum duration (e.g., clock operator errors)
|
| 100 |
original_play_count = len(result.plays)
|
|
|
|
| 153 |
|
| 154 |
# Save results with video-specific name
|
| 155 |
results_path = output_dir / f"{basename}_plays.json"
|
| 156 |
+
extractor.save_results(result, str(results_path))
|
| 157 |
|
| 158 |
# Convert to dictionary for return
|
| 159 |
+
return format_extraction_result_dict(result)
|
| 160 |
|
| 161 |
|
| 162 |
def print_results_summary(
|
|
|
|
| 166 |
video_basename: str,
|
| 167 |
generate_individual: bool,
|
| 168 |
expected_plays: int = 12,
|
| 169 |
+
) -> None:
|
| 170 |
"""
|
| 171 |
Print the final results summary.
|
| 172 |
|
| 173 |
Args:
|
| 174 |
+
results: Extraction results dictionary from run_extraction().
|
| 175 |
testing_mode: Whether running in testing mode.
|
| 176 |
clip_timing: Timing information from clip generation.
|
| 177 |
video_basename: Base name for output files.
|
src/pipeline/parallel.py
CHANGED
|
@@ -13,9 +13,10 @@ import time
|
|
| 13 |
from concurrent.futures import Future, ProcessPoolExecutor, as_completed
|
| 14 |
from multiprocessing import Manager
|
| 15 |
from pathlib import Path
|
| 16 |
-
from typing import Any, Dict, List, Optional, Tuple
|
| 17 |
|
| 18 |
-
from
|
|
|
|
| 19 |
|
| 20 |
logger = logging.getLogger(__name__)
|
| 21 |
|
|
@@ -25,53 +26,58 @@ logger = logging.getLogger(__name__)
|
|
| 25 |
# =============================================================================
|
| 26 |
|
| 27 |
|
| 28 |
-
def _init_chunk_detectors(
|
| 29 |
-
fixed_playclock_coords: Tuple[int, int, int, int],
|
| 30 |
-
fixed_scorebug_coords: Tuple[int, int, int, int],
|
| 31 |
-
template_library_path: Optional[str],
|
| 32 |
-
timeout_config_path: Optional[str],
|
| 33 |
-
) -> Tuple[Any, Any, Any, Any]:
|
| 34 |
"""
|
| 35 |
Initialize all detection components for a chunk worker.
|
| 36 |
|
| 37 |
Must be called within the subprocess since these objects can't be pickled.
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
Args:
|
| 40 |
-
|
| 41 |
-
fixed_scorebug_coords: (x, y, w, h) for scorebug region.
|
| 42 |
-
template_library_path: Path to template library directory.
|
| 43 |
-
timeout_config_path: Path to timeout tracker config.
|
| 44 |
|
| 45 |
Returns:
|
| 46 |
Tuple of (scorebug_detector, clock_reader, template_reader, timeout_tracker).
|
| 47 |
"""
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
# Create scorebug detector with fixed region
|
| 53 |
-
scorebug_detector =
|
| 54 |
-
scorebug_detector.set_fixed_region(fixed_scorebug_coords)
|
| 55 |
-
|
| 56 |
-
# Create play clock
|
| 57 |
-
pc_x, pc_y, pc_w, pc_h = fixed_playclock_coords
|
| 58 |
-
sb_x, sb_y, _, _ = fixed_scorebug_coords
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
# Create template reader if template path provided
|
| 65 |
template_reader = None
|
| 66 |
-
if template_library_path and Path(template_library_path).exists():
|
| 67 |
template_library = DigitTemplateLibrary()
|
| 68 |
-
if template_library.load(template_library_path):
|
| 69 |
-
template_reader =
|
| 70 |
|
| 71 |
# Initialize timeout tracker if config provided
|
| 72 |
timeout_tracker = None
|
| 73 |
-
if timeout_config_path and Path(timeout_config_path).exists():
|
| 74 |
-
timeout_tracker =
|
| 75 |
|
| 76 |
return scorebug_detector, clock_reader, template_reader, timeout_tracker
|
| 77 |
|
|
@@ -91,10 +97,10 @@ def _process_frame(
|
|
| 91 |
Args:
|
| 92 |
img: OpenCV image (numpy array).
|
| 93 |
timestamp: Frame timestamp in seconds.
|
| 94 |
-
scorebug_detector: Initialized
|
| 95 |
-
clock_reader: Initialized
|
| 96 |
-
template_reader: Initialized
|
| 97 |
-
timeout_tracker: Initialized
|
| 98 |
stats: Mutable dict to update with detection statistics.
|
| 99 |
|
| 100 |
Returns:
|
|
@@ -103,15 +109,12 @@ def _process_frame(
|
|
| 103 |
# Detect scorebug (fast path with fixed region)
|
| 104 |
scorebug = scorebug_detector.detect(img)
|
| 105 |
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
"clock_value": None,
|
| 113 |
-
"clock_detected": False,
|
| 114 |
-
}
|
| 115 |
|
| 116 |
if scorebug.detected:
|
| 117 |
stats["frames_with_scorebug"] += 1
|
|
@@ -123,7 +126,7 @@ def _process_frame(
|
|
| 123 |
frame_result["away_timeouts"] = timeout_reading.away_timeouts
|
| 124 |
|
| 125 |
# Extract play clock region and template match
|
| 126 |
-
play_clock_region = clock_reader.
|
| 127 |
if play_clock_region is not None and template_reader:
|
| 128 |
clock_result = template_reader.read(play_clock_region)
|
| 129 |
frame_result["clock_detected"] = clock_result.detected
|
|
@@ -134,17 +137,52 @@ def _process_frame(
|
|
| 134 |
return frame_result
|
| 135 |
|
| 136 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
def _process_chunk(
|
| 138 |
chunk_id: int,
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
fixed_playclock_coords: Tuple[int, int, int, int],
|
| 144 |
-
fixed_scorebug_coords: Tuple[int, int, int, int],
|
| 145 |
-
template_library_path: Optional[str],
|
| 146 |
-
timeout_config_path: Optional[str],
|
| 147 |
-
progress_dict: Optional[Dict] = None,
|
| 148 |
) -> ChunkResult:
|
| 149 |
"""
|
| 150 |
Process a single video chunk using OpenCV.
|
|
@@ -154,47 +192,41 @@ def _process_chunk(
|
|
| 154 |
|
| 155 |
Args:
|
| 156 |
chunk_id: Identifier for this chunk (for logging).
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
frame_interval: Time interval between frames.
|
| 161 |
-
fixed_playclock_coords: (x, y, w, h) for play clock region.
|
| 162 |
-
fixed_scorebug_coords: (x, y, w, h) for scorebug region.
|
| 163 |
-
template_library_path: Path to template library directory (or None).
|
| 164 |
-
timeout_config_path: Path to timeout tracker config (or None).
|
| 165 |
progress_dict: Shared dictionary for progress updates.
|
| 166 |
|
| 167 |
Returns:
|
| 168 |
ChunkResult with processing results.
|
| 169 |
"""
|
| 170 |
-
#
|
|
|
|
|
|
|
| 171 |
import cv2
|
| 172 |
|
| 173 |
t_start = time.perf_counter()
|
| 174 |
-
io_time = 0.0
|
| 175 |
|
| 176 |
# Initialize all detection components
|
| 177 |
-
scorebug_detector, clock_reader, template_reader, timeout_tracker = _init_chunk_detectors(
|
| 178 |
-
fixed_playclock_coords, fixed_scorebug_coords, template_library_path, timeout_config_path
|
| 179 |
-
)
|
| 180 |
|
| 181 |
# Open video and seek to start
|
| 182 |
t_io_start = time.perf_counter()
|
| 183 |
-
cap = cv2.VideoCapture(video_path)
|
| 184 |
if not cap.isOpened():
|
| 185 |
-
raise RuntimeError(f"Could not open video: {video_path}")
|
| 186 |
|
| 187 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 188 |
-
frame_skip = int(frame_interval * fps)
|
| 189 |
-
start_frame = int(
|
| 190 |
-
end_frame = int(
|
| 191 |
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
|
| 192 |
-
io_time
|
| 193 |
|
| 194 |
# Initialize processing state
|
| 195 |
frame_data: List[Dict[str, Any]] = []
|
| 196 |
stats = {"total_frames": 0, "frames_with_scorebug": 0, "frames_with_clock": 0}
|
| 197 |
-
total_expected_frames = max(1, int((
|
| 198 |
|
| 199 |
# Initialize progress
|
| 200 |
if progress_dict is not None:
|
|
@@ -236,8 +268,8 @@ def _process_chunk(
|
|
| 236 |
|
| 237 |
return ChunkResult(
|
| 238 |
chunk_id=chunk_id,
|
| 239 |
-
start_time=
|
| 240 |
-
end_time=
|
| 241 |
frames_processed=stats["total_frames"],
|
| 242 |
frames_with_scorebug=stats["frames_with_scorebug"],
|
| 243 |
frames_with_clock=stats["frames_with_clock"],
|
|
@@ -278,7 +310,7 @@ def _calculate_chunk_boundaries(start_time: float, end_time: float, num_workers:
|
|
| 278 |
return chunks
|
| 279 |
|
| 280 |
|
| 281 |
-
def _create_progress_monitor(progress_dict:
|
| 282 |
"""
|
| 283 |
Create a progress monitoring thread.
|
| 284 |
|
|
@@ -291,7 +323,7 @@ def _create_progress_monitor(progress_dict: Dict, num_workers: int) -> Tuple[thr
|
|
| 291 |
"""
|
| 292 |
stop_monitor = threading.Event()
|
| 293 |
|
| 294 |
-
def monitor_progress():
|
| 295 |
"""Monitor and display progress from workers."""
|
| 296 |
while not stop_monitor.is_set():
|
| 297 |
_display_progress(progress_dict, num_workers)
|
|
@@ -301,7 +333,7 @@ def _create_progress_monitor(progress_dict: Dict, num_workers: int) -> Tuple[thr
|
|
| 301 |
return monitor_thread, stop_monitor
|
| 302 |
|
| 303 |
|
| 304 |
-
def _display_progress(progress_dict:
|
| 305 |
"""
|
| 306 |
Build and display current progress string.
|
| 307 |
|
|
@@ -337,7 +369,8 @@ def _display_progress(progress_dict: Dict, num_workers: int) -> None:
|
|
| 337 |
overall_pct = min(100, int(100 * total_frames_done / total_frames_expected)) if total_frames_expected > 0 else 0
|
| 338 |
progress_str = " | ".join(parts)
|
| 339 |
|
| 340 |
-
|
|
|
|
| 341 |
remaining = num_workers - completed_workers
|
| 342 |
status_msg = f" [{overall_pct:3d}%] {progress_str} — waiting for {remaining} worker{'s' if remaining > 1 else ''}..."
|
| 343 |
elif completed_workers == num_workers:
|
|
@@ -352,26 +385,16 @@ def _display_progress(progress_dict: Dict, num_workers: int) -> None:
|
|
| 352 |
def _submit_chunk_jobs(
|
| 353 |
executor: ProcessPoolExecutor,
|
| 354 |
chunks: List[Tuple[int, float, float]],
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
fixed_scorebug_coords: Tuple[int, int, int, int],
|
| 359 |
-
template_library_path: Optional[str],
|
| 360 |
-
timeout_config_path: Optional[str],
|
| 361 |
-
progress_dict: Dict,
|
| 362 |
-
) -> Dict[Future, int]:
|
| 363 |
"""
|
| 364 |
Submit all chunk processing jobs to the executor.
|
| 365 |
|
| 366 |
Args:
|
| 367 |
executor: ProcessPoolExecutor instance.
|
| 368 |
chunks: List of (chunk_id, start_time, end_time) tuples.
|
| 369 |
-
|
| 370 |
-
frame_interval: Time between frame samples.
|
| 371 |
-
fixed_playclock_coords: Play clock region coordinates.
|
| 372 |
-
fixed_scorebug_coords: Scorebug region coordinates.
|
| 373 |
-
template_library_path: Path to template library.
|
| 374 |
-
timeout_config_path: Path to timeout config.
|
| 375 |
progress_dict: Shared progress dictionary.
|
| 376 |
|
| 377 |
Returns:
|
|
@@ -382,21 +405,16 @@ def _submit_chunk_jobs(
|
|
| 382 |
future = executor.submit(
|
| 383 |
_process_chunk,
|
| 384 |
chunk_id,
|
| 385 |
-
|
| 386 |
chunk_start,
|
| 387 |
chunk_end,
|
| 388 |
-
frame_interval,
|
| 389 |
-
fixed_playclock_coords,
|
| 390 |
-
fixed_scorebug_coords,
|
| 391 |
-
template_library_path,
|
| 392 |
-
timeout_config_path,
|
| 393 |
progress_dict,
|
| 394 |
)
|
| 395 |
futures[future] = chunk_id
|
| 396 |
return futures
|
| 397 |
|
| 398 |
|
| 399 |
-
def _collect_chunk_results(futures: Dict[Future, int]) -> Dict[int, Optional[ChunkResult]]:
|
| 400 |
"""
|
| 401 |
Collect results from all chunk futures as they complete.
|
| 402 |
|
|
@@ -458,14 +476,7 @@ def _merge_chunk_results(results: Dict[int, Optional[ChunkResult]], num_workers:
|
|
| 458 |
|
| 459 |
|
| 460 |
def process_video_parallel(
|
| 461 |
-
|
| 462 |
-
start_time: float,
|
| 463 |
-
end_time: float,
|
| 464 |
-
frame_interval: float,
|
| 465 |
-
fixed_playclock_coords: Tuple[int, int, int, int],
|
| 466 |
-
fixed_scorebug_coords: Tuple[int, int, int, int],
|
| 467 |
-
template_library_path: Optional[str],
|
| 468 |
-
timeout_config_path: Optional[str],
|
| 469 |
num_workers: int = 2,
|
| 470 |
) -> Tuple[List[Dict[str, Any]], Dict[str, int], float]:
|
| 471 |
"""
|
|
@@ -475,22 +486,16 @@ def process_video_parallel(
|
|
| 475 |
Results are merged in chronological order.
|
| 476 |
|
| 477 |
Args:
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
end_time: End time in seconds.
|
| 481 |
-
frame_interval: Time interval between frames.
|
| 482 |
-
fixed_playclock_coords: (x, y, w, h) for play clock region.
|
| 483 |
-
fixed_scorebug_coords: (x, y, w, h) for scorebug region.
|
| 484 |
-
template_library_path: Path to template library directory.
|
| 485 |
-
timeout_config_path: Path to timeout tracker config.
|
| 486 |
num_workers: Number of parallel workers (default 2).
|
| 487 |
|
| 488 |
Returns:
|
| 489 |
Tuple of (frame_data_list, stats_dict, total_io_time).
|
| 490 |
"""
|
| 491 |
# Calculate chunk boundaries
|
| 492 |
-
chunks = _calculate_chunk_boundaries(start_time, end_time, num_workers)
|
| 493 |
-
chunk_duration = (end_time - start_time) / num_workers
|
| 494 |
|
| 495 |
logger.info("Parallel processing: %d workers, %.1fs per chunk", num_workers, chunk_duration)
|
| 496 |
for chunk_id, chunk_start, chunk_end in chunks:
|
|
@@ -508,9 +513,7 @@ def process_video_parallel(
|
|
| 508 |
|
| 509 |
# Execute chunks in parallel
|
| 510 |
with ProcessPoolExecutor(max_workers=num_workers) as executor:
|
| 511 |
-
futures = _submit_chunk_jobs(
|
| 512 |
-
executor, chunks, video_path, frame_interval, fixed_playclock_coords, fixed_scorebug_coords, template_library_path, timeout_config_path, progress_dict
|
| 513 |
-
)
|
| 514 |
results = _collect_chunk_results(futures)
|
| 515 |
|
| 516 |
# Stop progress monitor and show completion
|
|
|
|
| 13 |
from concurrent.futures import Future, ProcessPoolExecutor, as_completed
|
| 14 |
from multiprocessing import Manager
|
| 15 |
from pathlib import Path
|
| 16 |
+
from typing import Any, Dict, List, MutableMapping, Optional, Tuple
|
| 17 |
|
| 18 |
+
from utils import create_frame_result
|
| 19 |
+
from .models import ChunkResult, ParallelProcessingConfig
|
| 20 |
|
| 21 |
logger = logging.getLogger(__name__)
|
| 22 |
|
|
|
|
| 26 |
# =============================================================================
|
| 27 |
|
| 28 |
|
| 29 |
+
def _init_chunk_detectors(config: ParallelProcessingConfig) -> Tuple[Any, Any, Any, Any]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
"""
|
| 31 |
Initialize all detection components for a chunk worker.
|
| 32 |
|
| 33 |
Must be called within the subprocess since these objects can't be pickled.
|
| 34 |
|
| 35 |
+
Note: Imports are inside this function because multiprocessing requires
|
| 36 |
+
fresh imports in each worker process. Moving these to module level would
|
| 37 |
+
cause pickling errors when spawning workers.
|
| 38 |
+
|
| 39 |
Args:
|
| 40 |
+
config: Parallel processing configuration.
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
Returns:
|
| 43 |
Tuple of (scorebug_detector, clock_reader, template_reader, timeout_tracker).
|
| 44 |
"""
|
| 45 |
+
# pylint: disable=import-outside-toplevel
|
| 46 |
+
# Imports must be inside function for multiprocessing - each subprocess
|
| 47 |
+
# needs its own fresh imports of these modules to avoid pickling errors
|
| 48 |
+
from detection import DetectScoreBug, DetectTimeouts
|
| 49 |
+
from readers import ReadPlayClock
|
| 50 |
+
from setup import DigitTemplateLibrary, PlayClockRegionConfig, PlayClockRegionExtractor
|
| 51 |
|
| 52 |
# Create scorebug detector with fixed region
|
| 53 |
+
scorebug_detector = DetectScoreBug(template_path=None, use_split_detection=True)
|
| 54 |
+
scorebug_detector.set_fixed_region(config.fixed_scorebug_coords)
|
| 55 |
+
|
| 56 |
+
# Create play clock region extractor
|
| 57 |
+
pc_x, pc_y, pc_w, pc_h = config.fixed_playclock_coords
|
| 58 |
+
sb_x, sb_y, _, _ = config.fixed_scorebug_coords
|
| 59 |
+
playclock_config = PlayClockRegionConfig(
|
| 60 |
+
x_offset=pc_x - sb_x,
|
| 61 |
+
y_offset=pc_y - sb_y,
|
| 62 |
+
width=pc_w,
|
| 63 |
+
height=pc_h,
|
| 64 |
+
source_video="",
|
| 65 |
+
scorebug_template="",
|
| 66 |
+
samples_used=0,
|
| 67 |
+
)
|
| 68 |
+
clock_reader = PlayClockRegionExtractor(region_config=playclock_config)
|
| 69 |
|
| 70 |
# Create template reader if template path provided
|
| 71 |
template_reader = None
|
| 72 |
+
if config.template_library_path and Path(config.template_library_path).exists():
|
| 73 |
template_library = DigitTemplateLibrary()
|
| 74 |
+
if template_library.load(config.template_library_path):
|
| 75 |
+
template_reader = ReadPlayClock(template_library, pc_w, pc_h)
|
| 76 |
|
| 77 |
# Initialize timeout tracker if config provided
|
| 78 |
timeout_tracker = None
|
| 79 |
+
if config.timeout_config_path and Path(config.timeout_config_path).exists():
|
| 80 |
+
timeout_tracker = DetectTimeouts(config_path=config.timeout_config_path)
|
| 81 |
|
| 82 |
return scorebug_detector, clock_reader, template_reader, timeout_tracker
|
| 83 |
|
|
|
|
| 97 |
Args:
|
| 98 |
img: OpenCV image (numpy array).
|
| 99 |
timestamp: Frame timestamp in seconds.
|
| 100 |
+
scorebug_detector: Initialized DetectScoreBug.
|
| 101 |
+
clock_reader: Initialized PlayClockRegionExtractor.
|
| 102 |
+
template_reader: Initialized ReadPlayClock (or None).
|
| 103 |
+
timeout_tracker: Initialized DetectTimeouts (or None).
|
| 104 |
stats: Mutable dict to update with detection statistics.
|
| 105 |
|
| 106 |
Returns:
|
|
|
|
| 109 |
# Detect scorebug (fast path with fixed region)
|
| 110 |
scorebug = scorebug_detector.detect(img)
|
| 111 |
|
| 112 |
+
# Initialize frame result using shared factory
|
| 113 |
+
frame_result = create_frame_result(
|
| 114 |
+
timestamp=timestamp,
|
| 115 |
+
scorebug_detected=scorebug.detected,
|
| 116 |
+
scorebug_bbox=scorebug.bbox if scorebug.detected else None,
|
| 117 |
+
)
|
|
|
|
|
|
|
|
|
|
| 118 |
|
| 119 |
if scorebug.detected:
|
| 120 |
stats["frames_with_scorebug"] += 1
|
|
|
|
| 126 |
frame_result["away_timeouts"] = timeout_reading.away_timeouts
|
| 127 |
|
| 128 |
# Extract play clock region and template match
|
| 129 |
+
play_clock_region = clock_reader.extract_region(img, scorebug.bbox)
|
| 130 |
if play_clock_region is not None and template_reader:
|
| 131 |
clock_result = template_reader.read(play_clock_region)
|
| 132 |
frame_result["clock_detected"] = clock_result.detected
|
|
|
|
| 137 |
return frame_result
|
| 138 |
|
| 139 |
|
| 140 |
+
def _read_video_frames(cap: Any, start_frame: int, end_frame: int, frame_skip: int, fps: float) -> Tuple[List[Tuple[float, Any]], float]:
|
| 141 |
+
"""
|
| 142 |
+
Read frames from video capture within the given range.
|
| 143 |
+
|
| 144 |
+
Args:
|
| 145 |
+
cap: OpenCV VideoCapture object.
|
| 146 |
+
start_frame: First frame to read.
|
| 147 |
+
end_frame: Last frame to read.
|
| 148 |
+
frame_skip: Number of frames to skip between samples.
|
| 149 |
+
fps: Video frames per second.
|
| 150 |
+
|
| 151 |
+
Returns:
|
| 152 |
+
Tuple of (list of (timestamp, frame) tuples, total_io_time).
|
| 153 |
+
"""
|
| 154 |
+
frames = []
|
| 155 |
+
io_time = 0.0
|
| 156 |
+
current_frame = start_frame
|
| 157 |
+
|
| 158 |
+
while current_frame < end_frame:
|
| 159 |
+
t_io_start = time.perf_counter()
|
| 160 |
+
ret, img = cap.read()
|
| 161 |
+
io_time += time.perf_counter() - t_io_start
|
| 162 |
+
|
| 163 |
+
if not ret:
|
| 164 |
+
break
|
| 165 |
+
|
| 166 |
+
timestamp = current_frame / fps
|
| 167 |
+
frames.append((timestamp, img))
|
| 168 |
+
|
| 169 |
+
# Skip to next sample frame
|
| 170 |
+
t_io_start = time.perf_counter()
|
| 171 |
+
for _ in range(frame_skip - 1):
|
| 172 |
+
cap.grab()
|
| 173 |
+
io_time += time.perf_counter() - t_io_start
|
| 174 |
+
current_frame += frame_skip
|
| 175 |
+
|
| 176 |
+
return frames, io_time
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# pylint: disable=too-many-locals
|
| 180 |
def _process_chunk(
|
| 181 |
chunk_id: int,
|
| 182 |
+
config: ParallelProcessingConfig,
|
| 183 |
+
chunk_start: float,
|
| 184 |
+
chunk_end: float,
|
| 185 |
+
progress_dict: Optional[MutableMapping[int, Any]] = None,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
) -> ChunkResult:
|
| 187 |
"""
|
| 188 |
Process a single video chunk using OpenCV.
|
|
|
|
| 192 |
|
| 193 |
Args:
|
| 194 |
chunk_id: Identifier for this chunk (for logging).
|
| 195 |
+
config: Parallel processing configuration.
|
| 196 |
+
chunk_start: Chunk start time in seconds.
|
| 197 |
+
chunk_end: Chunk end time in seconds.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
progress_dict: Shared dictionary for progress updates.
|
| 199 |
|
| 200 |
Returns:
|
| 201 |
ChunkResult with processing results.
|
| 202 |
"""
|
| 203 |
+
# pylint: disable=import-outside-toplevel
|
| 204 |
+
# cv2 import must be inside function for multiprocessing - each subprocess
|
| 205 |
+
# needs its own fresh import to avoid issues with OpenCV's internal state
|
| 206 |
import cv2
|
| 207 |
|
| 208 |
t_start = time.perf_counter()
|
|
|
|
| 209 |
|
| 210 |
# Initialize all detection components
|
| 211 |
+
scorebug_detector, clock_reader, template_reader, timeout_tracker = _init_chunk_detectors(config)
|
|
|
|
|
|
|
| 212 |
|
| 213 |
# Open video and seek to start
|
| 214 |
t_io_start = time.perf_counter()
|
| 215 |
+
cap = cv2.VideoCapture(config.video_path)
|
| 216 |
if not cap.isOpened():
|
| 217 |
+
raise RuntimeError(f"Could not open video: {config.video_path}")
|
| 218 |
|
| 219 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 220 |
+
frame_skip = int(config.frame_interval * fps)
|
| 221 |
+
start_frame = int(chunk_start * fps)
|
| 222 |
+
end_frame = int(chunk_end * fps)
|
| 223 |
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
|
| 224 |
+
io_time = time.perf_counter() - t_io_start
|
| 225 |
|
| 226 |
# Initialize processing state
|
| 227 |
frame_data: List[Dict[str, Any]] = []
|
| 228 |
stats = {"total_frames": 0, "frames_with_scorebug": 0, "frames_with_clock": 0}
|
| 229 |
+
total_expected_frames = max(1, int((chunk_end - chunk_start) / config.frame_interval))
|
| 230 |
|
| 231 |
# Initialize progress
|
| 232 |
if progress_dict is not None:
|
|
|
|
| 268 |
|
| 269 |
return ChunkResult(
|
| 270 |
chunk_id=chunk_id,
|
| 271 |
+
start_time=chunk_start,
|
| 272 |
+
end_time=chunk_end,
|
| 273 |
frames_processed=stats["total_frames"],
|
| 274 |
frames_with_scorebug=stats["frames_with_scorebug"],
|
| 275 |
frames_with_clock=stats["frames_with_clock"],
|
|
|
|
| 310 |
return chunks
|
| 311 |
|
| 312 |
|
| 313 |
+
def _create_progress_monitor(progress_dict: MutableMapping[int, Any], num_workers: int) -> Tuple[threading.Thread, threading.Event]:
|
| 314 |
"""
|
| 315 |
Create a progress monitoring thread.
|
| 316 |
|
|
|
|
| 323 |
"""
|
| 324 |
stop_monitor = threading.Event()
|
| 325 |
|
| 326 |
+
def monitor_progress() -> None:
|
| 327 |
"""Monitor and display progress from workers."""
|
| 328 |
while not stop_monitor.is_set():
|
| 329 |
_display_progress(progress_dict, num_workers)
|
|
|
|
| 333 |
return monitor_thread, stop_monitor
|
| 334 |
|
| 335 |
|
| 336 |
+
def _display_progress(progress_dict: MutableMapping[int, Any], num_workers: int) -> None:
|
| 337 |
"""
|
| 338 |
Build and display current progress string.
|
| 339 |
|
|
|
|
| 369 |
overall_pct = min(100, int(100 * total_frames_done / total_frames_expected)) if total_frames_expected > 0 else 0
|
| 370 |
progress_str = " | ".join(parts)
|
| 371 |
|
| 372 |
+
# Use chained comparison for cleaner code
|
| 373 |
+
if 0 < completed_workers < num_workers:
|
| 374 |
remaining = num_workers - completed_workers
|
| 375 |
status_msg = f" [{overall_pct:3d}%] {progress_str} — waiting for {remaining} worker{'s' if remaining > 1 else ''}..."
|
| 376 |
elif completed_workers == num_workers:
|
|
|
|
| 385 |
def _submit_chunk_jobs(
|
| 386 |
executor: ProcessPoolExecutor,
|
| 387 |
chunks: List[Tuple[int, float, float]],
|
| 388 |
+
config: ParallelProcessingConfig,
|
| 389 |
+
progress_dict: MutableMapping[int, Any],
|
| 390 |
+
) -> Dict[Future[ChunkResult], int]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 391 |
"""
|
| 392 |
Submit all chunk processing jobs to the executor.
|
| 393 |
|
| 394 |
Args:
|
| 395 |
executor: ProcessPoolExecutor instance.
|
| 396 |
chunks: List of (chunk_id, start_time, end_time) tuples.
|
| 397 |
+
config: Parallel processing configuration.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 398 |
progress_dict: Shared progress dictionary.
|
| 399 |
|
| 400 |
Returns:
|
|
|
|
| 405 |
future = executor.submit(
|
| 406 |
_process_chunk,
|
| 407 |
chunk_id,
|
| 408 |
+
config,
|
| 409 |
chunk_start,
|
| 410 |
chunk_end,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 411 |
progress_dict,
|
| 412 |
)
|
| 413 |
futures[future] = chunk_id
|
| 414 |
return futures
|
| 415 |
|
| 416 |
|
| 417 |
+
def _collect_chunk_results(futures: Dict[Future[ChunkResult], int]) -> Dict[int, Optional[ChunkResult]]:
|
| 418 |
"""
|
| 419 |
Collect results from all chunk futures as they complete.
|
| 420 |
|
|
|
|
| 476 |
|
| 477 |
|
| 478 |
def process_video_parallel(
|
| 479 |
+
config: ParallelProcessingConfig,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 480 |
num_workers: int = 2,
|
| 481 |
) -> Tuple[List[Dict[str, Any]], Dict[str, int], float]:
|
| 482 |
"""
|
|
|
|
| 486 |
Results are merged in chronological order.
|
| 487 |
|
| 488 |
Args:
|
| 489 |
+
config: Parallel processing configuration containing video path,
|
| 490 |
+
time bounds, frame interval, and region coordinates.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 491 |
num_workers: Number of parallel workers (default 2).
|
| 492 |
|
| 493 |
Returns:
|
| 494 |
Tuple of (frame_data_list, stats_dict, total_io_time).
|
| 495 |
"""
|
| 496 |
# Calculate chunk boundaries
|
| 497 |
+
chunks = _calculate_chunk_boundaries(config.start_time, config.end_time, num_workers)
|
| 498 |
+
chunk_duration = (config.end_time - config.start_time) / num_workers
|
| 499 |
|
| 500 |
logger.info("Parallel processing: %d workers, %.1fs per chunk", num_workers, chunk_duration)
|
| 501 |
for chunk_id, chunk_start, chunk_end in chunks:
|
|
|
|
| 513 |
|
| 514 |
# Execute chunks in parallel
|
| 515 |
with ProcessPoolExecutor(max_workers=num_workers) as executor:
|
| 516 |
+
futures = _submit_chunk_jobs(executor, chunks, config, progress_dict)
|
|
|
|
|
|
|
| 517 |
results = _collect_chunk_results(futures)
|
| 518 |
|
| 519 |
# Stop progress monitor and show completion
|
src/pipeline/{play_detector.py → play_extractor.py}
RENAMED
|
@@ -1,12 +1,12 @@
|
|
| 1 |
-
# pylint: disable=too-many-lines
|
| 2 |
"""
|
| 3 |
-
Play
|
| 4 |
|
| 5 |
-
This module orchestrates the complete play
|
| 6 |
1. Video frame extraction
|
| 7 |
2. Scorebug detection
|
| 8 |
3. Play clock reading via template matching
|
| 9 |
4. Play state machine processing
|
|
|
|
| 10 |
|
| 11 |
Performance optimizations:
|
| 12 |
- Streaming processing: read frame -> process immediately (no intermediate storage)
|
|
@@ -19,161 +19,27 @@ See docs/ocr_to_template_migration.md for details.
|
|
| 19 |
|
| 20 |
import json
|
| 21 |
import logging
|
| 22 |
-
import queue
|
| 23 |
-
import threading
|
| 24 |
import time
|
| 25 |
from pathlib import Path
|
| 26 |
from typing import Optional, List, Dict, Any, Tuple
|
| 27 |
|
| 28 |
import cv2
|
| 29 |
-
import easyocr
|
| 30 |
import numpy as np
|
| 31 |
|
| 32 |
-
from
|
| 33 |
-
from
|
| 34 |
-
from
|
| 35 |
-
from
|
|
|
|
|
|
|
|
|
|
| 36 |
from .parallel import process_video_parallel
|
|
|
|
| 37 |
|
| 38 |
logger = logging.getLogger(__name__)
|
| 39 |
|
| 40 |
-
# Global EasyOCR reader instance for template building (lazy-loaded)
|
| 41 |
-
_easyocr_reader = None # pylint: disable=invalid-name
|
| 42 |
|
| 43 |
-
|
| 44 |
-
class ThreadedFrameReader:
|
| 45 |
-
"""
|
| 46 |
-
Background thread for reading video frames.
|
| 47 |
-
|
| 48 |
-
Uses a producer-consumer pattern to overlap video I/O with processing.
|
| 49 |
-
The reader thread reads frames ahead into a queue while the main thread
|
| 50 |
-
processes frames from the queue.
|
| 51 |
-
|
| 52 |
-
This provides significant speedup by hiding video decode latency.
|
| 53 |
-
"""
|
| 54 |
-
|
| 55 |
-
def __init__(self, cap: cv2.VideoCapture, start_frame: int, end_frame: int, frame_skip: int, queue_size: int = 32):
|
| 56 |
-
"""
|
| 57 |
-
Initialize the threaded frame reader.
|
| 58 |
-
|
| 59 |
-
Args:
|
| 60 |
-
cap: OpenCV VideoCapture object
|
| 61 |
-
start_frame: First frame to read
|
| 62 |
-
end_frame: Last frame to read
|
| 63 |
-
frame_skip: Number of frames to skip between reads
|
| 64 |
-
queue_size: Maximum frames to buffer (default 32)
|
| 65 |
-
"""
|
| 66 |
-
self.cap = cap
|
| 67 |
-
self.start_frame = start_frame
|
| 68 |
-
self.end_frame = end_frame
|
| 69 |
-
self.frame_skip = frame_skip
|
| 70 |
-
self.queue_size = queue_size
|
| 71 |
-
|
| 72 |
-
# Frame queue: (frame_number, frame_data) or (frame_number, None) for read failures
|
| 73 |
-
self.frame_queue: queue.Queue = queue.Queue(maxsize=queue_size)
|
| 74 |
-
|
| 75 |
-
# Control flags
|
| 76 |
-
self.stop_flag = threading.Event()
|
| 77 |
-
self.reader_thread: Optional[threading.Thread] = None
|
| 78 |
-
|
| 79 |
-
# Timing stats
|
| 80 |
-
self.io_time = 0.0
|
| 81 |
-
self.frames_read = 0
|
| 82 |
-
|
| 83 |
-
def start(self) -> None:
|
| 84 |
-
"""Start the background reader thread."""
|
| 85 |
-
self.stop_flag.clear()
|
| 86 |
-
self.reader_thread = threading.Thread(target=self._reader_loop, daemon=True)
|
| 87 |
-
self.reader_thread.start()
|
| 88 |
-
logger.debug("Threaded frame reader started")
|
| 89 |
-
|
| 90 |
-
def stop(self) -> None:
|
| 91 |
-
"""Stop the background reader thread."""
|
| 92 |
-
self.stop_flag.set()
|
| 93 |
-
if self.reader_thread and self.reader_thread.is_alive():
|
| 94 |
-
# Drain the queue to unblock the reader thread
|
| 95 |
-
try:
|
| 96 |
-
while True:
|
| 97 |
-
self.frame_queue.get_nowait()
|
| 98 |
-
except queue.Empty:
|
| 99 |
-
pass
|
| 100 |
-
self.reader_thread.join(timeout=2.0)
|
| 101 |
-
logger.debug("Threaded frame reader stopped (read %d frames, %.2fs I/O)", self.frames_read, self.io_time)
|
| 102 |
-
|
| 103 |
-
def get_frame(self, timeout: float = 5.0) -> Optional[Tuple[int, Optional[np.ndarray]]]:
|
| 104 |
-
"""
|
| 105 |
-
Get the next frame from the queue.
|
| 106 |
-
|
| 107 |
-
Args:
|
| 108 |
-
timeout: Maximum time to wait for a frame
|
| 109 |
-
|
| 110 |
-
Returns:
|
| 111 |
-
Tuple of (frame_number, frame_data) or None if queue is empty and reader is done
|
| 112 |
-
"""
|
| 113 |
-
try:
|
| 114 |
-
return self.frame_queue.get(timeout=timeout)
|
| 115 |
-
except queue.Empty:
|
| 116 |
-
return None
|
| 117 |
-
|
| 118 |
-
def _reader_loop(self) -> None:
|
| 119 |
-
"""Background thread that reads frames into the queue."""
|
| 120 |
-
# Seek to start position
|
| 121 |
-
t_start = time.perf_counter()
|
| 122 |
-
self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.start_frame)
|
| 123 |
-
self.io_time += time.perf_counter() - t_start
|
| 124 |
-
|
| 125 |
-
current_frame = self.start_frame
|
| 126 |
-
|
| 127 |
-
while current_frame < self.end_frame and not self.stop_flag.is_set():
|
| 128 |
-
# Read frame
|
| 129 |
-
t_start = time.perf_counter()
|
| 130 |
-
ret, frame = self.cap.read()
|
| 131 |
-
self.io_time += time.perf_counter() - t_start
|
| 132 |
-
|
| 133 |
-
if ret:
|
| 134 |
-
self.frames_read += 1
|
| 135 |
-
# Put frame in queue (blocks if queue is full)
|
| 136 |
-
try:
|
| 137 |
-
self.frame_queue.put((current_frame, frame), timeout=5.0)
|
| 138 |
-
except queue.Full:
|
| 139 |
-
if self.stop_flag.is_set():
|
| 140 |
-
break
|
| 141 |
-
logger.warning("Frame queue full, dropping frame %d", current_frame)
|
| 142 |
-
else:
|
| 143 |
-
# Signal read failure
|
| 144 |
-
try:
|
| 145 |
-
self.frame_queue.put((current_frame, None), timeout=1.0)
|
| 146 |
-
except queue.Full:
|
| 147 |
-
pass
|
| 148 |
-
|
| 149 |
-
# Skip frames
|
| 150 |
-
t_start = time.perf_counter()
|
| 151 |
-
for _ in range(self.frame_skip - 1):
|
| 152 |
-
if self.stop_flag.is_set():
|
| 153 |
-
break
|
| 154 |
-
self.cap.grab()
|
| 155 |
-
self.io_time += time.perf_counter() - t_start
|
| 156 |
-
|
| 157 |
-
current_frame += self.frame_skip
|
| 158 |
-
|
| 159 |
-
# Signal end of stream
|
| 160 |
-
try:
|
| 161 |
-
self.frame_queue.put(None, timeout=1.0)
|
| 162 |
-
except queue.Full:
|
| 163 |
-
pass
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
def _get_easyocr_reader() -> easyocr.Reader:
|
| 167 |
-
"""Get or create the global EasyOCR reader instance for template building."""
|
| 168 |
-
global _easyocr_reader # pylint: disable=global-statement
|
| 169 |
-
if _easyocr_reader is None:
|
| 170 |
-
logger.info("Initializing EasyOCR reader for template building...")
|
| 171 |
-
_easyocr_reader = easyocr.Reader(["en"], gpu=False, verbose=False)
|
| 172 |
-
logger.info("EasyOCR reader initialized")
|
| 173 |
-
return _easyocr_reader
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
def format_detection_result_dict(result: DetectionResult) -> Dict[str, Any]:
|
| 177 |
"""
|
| 178 |
Format a DetectionResult into a dictionary for JSON serialization or API return.
|
| 179 |
|
|
@@ -197,37 +63,36 @@ def format_detection_result_dict(result: DetectionResult) -> Dict[str, Any]:
|
|
| 197 |
}
|
| 198 |
|
| 199 |
|
| 200 |
-
class
|
| 201 |
"""
|
| 202 |
-
Main pipeline for
|
| 203 |
-
|
| 204 |
-
This class orchestrates all
|
| 205 |
-
-
|
| 206 |
-
-
|
| 207 |
-
-
|
| 208 |
-
-
|
|
|
|
| 209 |
"""
|
| 210 |
|
| 211 |
-
def __init__(self, config: DetectionConfig, timeout_tracker: Optional[
|
| 212 |
"""
|
| 213 |
-
Initialize the play
|
| 214 |
|
| 215 |
Args:
|
| 216 |
config: Detection configuration
|
| 217 |
timeout_tracker: Optional timeout tracker for clock reset classification
|
| 218 |
"""
|
| 219 |
self.config = config
|
| 220 |
-
self.scorebug_detector: Optional[ScorebugDetector] = None
|
| 221 |
-
self.clock_reader: Optional[PlayClockReader] = None
|
| 222 |
-
self.state_machine: Optional[PlayStateMachine] = None
|
| 223 |
self.timeout_tracker = timeout_tracker
|
| 224 |
|
| 225 |
-
# Template-based clock reading components
|
| 226 |
self.template_builder: Optional[DigitTemplateBuilder] = None
|
| 227 |
self.template_library: Optional[DigitTemplateLibrary] = None
|
| 228 |
-
self.template_reader: Optional[
|
| 229 |
|
| 230 |
self._validate_config()
|
|
|
|
| 231 |
self._initialize_components()
|
| 232 |
|
| 233 |
def _validate_config(self) -> None:
|
|
@@ -248,11 +113,11 @@ class PlayDetector:
|
|
| 248 |
raise FileNotFoundError(f"Clock region config not found: {self.config.clock_region_config_path}")
|
| 249 |
|
| 250 |
def _initialize_components(self) -> None:
|
| 251 |
-
"""Initialize
|
| 252 |
-
logger.info("Initializing play
|
| 253 |
|
| 254 |
# Determine if we're using fixed coordinates mode
|
| 255 |
-
# In this mode, we still use the same
|
| 256 |
use_fixed_coords = self.config.fixed_playclock_coords is not None
|
| 257 |
|
| 258 |
if use_fixed_coords:
|
|
@@ -260,6 +125,7 @@ class PlayDetector:
|
|
| 260 |
logger.info("Fixed coordinates mode - regions pre-configured")
|
| 261 |
|
| 262 |
# Compute play clock offset relative to scorebug from absolute coordinates
|
|
|
|
| 263 |
pc_x, pc_y, pc_w, pc_h = self.config.fixed_playclock_coords
|
| 264 |
if self.config.fixed_scorebug_coords:
|
| 265 |
sb_x, sb_y, _, _ = self.config.fixed_scorebug_coords
|
|
@@ -273,27 +139,27 @@ class PlayDetector:
|
|
| 273 |
playclock_config = PlayClockRegionConfig(x_offset=x_offset, y_offset=y_offset, width=pc_w, height=pc_h, source_video="", scorebug_template="", samples_used=0)
|
| 274 |
|
| 275 |
# Initialize scorebug detector (will set fixed region below)
|
| 276 |
-
self.scorebug_detector =
|
| 277 |
|
| 278 |
# Set the fixed region immediately so no template matching is needed
|
| 279 |
if self.config.fixed_scorebug_coords:
|
| 280 |
self.scorebug_detector.set_fixed_region(self.config.fixed_scorebug_coords)
|
| 281 |
logger.info("Scorebug fixed region set: %s", self.config.fixed_scorebug_coords)
|
| 282 |
|
| 283 |
-
# Initialize play clock
|
| 284 |
-
self.clock_reader =
|
| 285 |
-
logger.info("Play clock
|
| 286 |
else:
|
| 287 |
# Standard mode: use template and config files
|
| 288 |
-
self.scorebug_detector =
|
| 289 |
logger.info("Scorebug detector initialized (split_detection=%s)", self.config.use_split_detection)
|
| 290 |
|
| 291 |
-
# Initialize play clock
|
| 292 |
-
self.clock_reader =
|
| 293 |
-
logger.info("Play clock
|
| 294 |
|
| 295 |
# Initialize state machine
|
| 296 |
-
self.state_machine =
|
| 297 |
logger.info("State machine initialized")
|
| 298 |
|
| 299 |
# Initialize template matching components
|
|
@@ -309,10 +175,10 @@ class PlayDetector:
|
|
| 309 |
self.template_library = DigitTemplateLibrary()
|
| 310 |
if self.template_library.load(self.config.digit_template_path):
|
| 311 |
logger.info("Loaded pre-built digit templates from %s", self.config.digit_template_path)
|
| 312 |
-
self.template_reader =
|
| 313 |
else:
|
| 314 |
self.template_library = None
|
| 315 |
-
logger.info("Could not load templates, will build during
|
| 316 |
|
| 317 |
# Initialize template builder for collection phase if no templates loaded
|
| 318 |
if self.template_library is None:
|
|
@@ -375,22 +241,7 @@ class PlayDetector:
|
|
| 375 |
"""
|
| 376 |
Pass 0: Build digit templates by scanning video using TEMPLATE-BASED scorebug detection.
|
| 377 |
|
| 378 |
-
|
| 379 |
-
1. Uses the scorebug template (created during user setup) for detection
|
| 380 |
-
2. Scans through video until finding frames with actual scorebugs
|
| 381 |
-
3. Runs OCR on ONLY frames where scorebug is detected
|
| 382 |
-
4. Builds templates from samples with high-confidence OCR results
|
| 383 |
-
|
| 384 |
-
This solves the problem of building templates from pre-game content that
|
| 385 |
-
has no scorebug, which causes all subsequent template matching to fail.
|
| 386 |
-
|
| 387 |
-
The scorebug is verified using template matching (same as main detection),
|
| 388 |
-
not just brightness/contrast heuristics.
|
| 389 |
-
|
| 390 |
-
Completion criteria:
|
| 391 |
-
- At least min_samples valid OCR samples collected
|
| 392 |
-
- OR template coverage >= 70%
|
| 393 |
-
- OR scanned max_scan_frames frames
|
| 394 |
|
| 395 |
Args:
|
| 396 |
timing: Timing dictionary to update
|
|
@@ -398,168 +249,24 @@ class PlayDetector:
|
|
| 398 |
Returns:
|
| 399 |
True if templates were built successfully, False otherwise
|
| 400 |
"""
|
| 401 |
-
#
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
t_build_start = time.perf_counter()
|
| 410 |
-
|
| 411 |
-
# Need both template and fixed coordinates for Pass 0
|
| 412 |
-
if not self.config.template_path:
|
| 413 |
-
logger.warning("Pass 0: No scorebug template path provided, cannot detect scorebugs")
|
| 414 |
-
return False
|
| 415 |
-
|
| 416 |
-
template_path = Path(self.config.template_path)
|
| 417 |
-
if not template_path.exists():
|
| 418 |
-
logger.warning("Pass 0: Scorebug template not found at %s", template_path)
|
| 419 |
-
return False
|
| 420 |
-
|
| 421 |
-
if not self.config.fixed_scorebug_coords:
|
| 422 |
-
logger.warning("Pass 0: No fixed scorebug coordinates provided")
|
| 423 |
-
return False
|
| 424 |
-
|
| 425 |
-
sb_x, sb_y, sb_w, sb_h = self.config.fixed_scorebug_coords
|
| 426 |
-
logger.info(" Scorebug region: (%d, %d, %d, %d)", sb_x, sb_y, sb_w, sb_h)
|
| 427 |
-
logger.info(" Scorebug template: %s", template_path)
|
| 428 |
-
|
| 429 |
-
# Create a temporary ScorebugDetector for Pass 0 detection
|
| 430 |
-
# This uses the user-created template + fixed region for fast/accurate detection
|
| 431 |
-
temp_detector = ScorebugDetector(
|
| 432 |
-
template_path=str(template_path),
|
| 433 |
-
fixed_region=(sb_x, sb_y, sb_w, sb_h),
|
| 434 |
-
use_split_detection=self.config.use_split_detection,
|
| 435 |
-
)
|
| 436 |
-
|
| 437 |
-
# Get EasyOCR reader for labeling
|
| 438 |
-
reader = _get_easyocr_reader()
|
| 439 |
-
|
| 440 |
-
# Open video for scanning
|
| 441 |
-
cap = cv2.VideoCapture(self.config.video_path)
|
| 442 |
-
if not cap.isOpened():
|
| 443 |
-
logger.error("Pass 0: Could not open video")
|
| 444 |
-
return False
|
| 445 |
-
|
| 446 |
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 447 |
-
frame_skip = int(self.config.frame_interval * fps)
|
| 448 |
-
|
| 449 |
-
# Scan from start of video to find where game content starts
|
| 450 |
-
scan_start_frame = 0
|
| 451 |
-
|
| 452 |
-
cap.set(cv2.CAP_PROP_POS_FRAMES, scan_start_frame)
|
| 453 |
-
|
| 454 |
-
valid_samples = 0
|
| 455 |
-
frames_scanned = 0
|
| 456 |
-
frames_with_scorebug = 0
|
| 457 |
-
current_frame = scan_start_frame
|
| 458 |
-
|
| 459 |
-
logger.info(" Scanning from frame %d (%.1fs)...", scan_start_frame, scan_start_frame / fps)
|
| 460 |
-
|
| 461 |
-
while frames_scanned < max_scan_frames:
|
| 462 |
-
ret, frame = cap.read()
|
| 463 |
-
if not ret:
|
| 464 |
-
break
|
| 465 |
-
|
| 466 |
-
current_time = current_frame / fps
|
| 467 |
-
frames_scanned += 1
|
| 468 |
-
|
| 469 |
-
# Use REAL scorebug detection (template matching)
|
| 470 |
-
scorebug_result = temp_detector.detect(frame)
|
| 471 |
-
|
| 472 |
-
if scorebug_result.detected:
|
| 473 |
-
frames_with_scorebug += 1
|
| 474 |
-
|
| 475 |
-
# Extract play clock region using the detected scorebug bbox
|
| 476 |
-
scorebug_bbox = scorebug_result.bbox
|
| 477 |
-
play_clock_region = self.clock_reader._extract_region(frame, scorebug_bbox) # pylint: disable=protected-access
|
| 478 |
-
|
| 479 |
-
if play_clock_region is not None:
|
| 480 |
-
# Preprocess and run OCR
|
| 481 |
-
preprocessed = self.clock_reader._preprocess_for_ocr(play_clock_region) # pylint: disable=protected-access
|
| 482 |
-
|
| 483 |
-
try:
|
| 484 |
-
ocr_results = reader.readtext(preprocessed, allowlist="0123456789", detail=1)
|
| 485 |
-
if ocr_results:
|
| 486 |
-
best = max(ocr_results, key=lambda x: x[2])
|
| 487 |
-
text, confidence = best[1].strip(), best[2]
|
| 488 |
-
|
| 489 |
-
# Parse and validate
|
| 490 |
-
if text and confidence >= 0.5:
|
| 491 |
-
try:
|
| 492 |
-
value = int(text)
|
| 493 |
-
if 0 <= value <= 40:
|
| 494 |
-
# Add sample to template builder
|
| 495 |
-
self.template_builder.add_sample(play_clock_region, value, current_time, confidence)
|
| 496 |
-
valid_samples += 1
|
| 497 |
-
except ValueError:
|
| 498 |
-
pass # Invalid text, skip
|
| 499 |
-
except Exception as e: # pylint: disable=broad-except
|
| 500 |
-
logger.debug("Pass 0: OCR error at %.1fs: %s", current_time, e)
|
| 501 |
-
|
| 502 |
-
# Progress logging every 200 frames
|
| 503 |
-
if frames_scanned % 200 == 0:
|
| 504 |
-
# Check current template coverage
|
| 505 |
-
coverage = self.template_builder.get_coverage_estimate()
|
| 506 |
-
logger.info(
|
| 507 |
-
" Pass 0 progress: %d frames scanned, %d with scorebug, %d valid samples, ~%.0f%% coverage",
|
| 508 |
-
frames_scanned,
|
| 509 |
-
frames_with_scorebug,
|
| 510 |
-
valid_samples,
|
| 511 |
-
coverage * 100,
|
| 512 |
-
)
|
| 513 |
-
|
| 514 |
-
# Check completion criteria
|
| 515 |
-
if valid_samples >= min_samples or coverage >= target_coverage:
|
| 516 |
-
logger.info(" Completion criteria met!")
|
| 517 |
-
break
|
| 518 |
-
|
| 519 |
-
# Skip frames
|
| 520 |
-
for _ in range(frame_skip - 1):
|
| 521 |
-
cap.grab()
|
| 522 |
-
current_frame += frame_skip
|
| 523 |
-
|
| 524 |
-
cap.release()
|
| 525 |
-
|
| 526 |
-
logger.info(
|
| 527 |
-
"Pass 0 scan complete: %d frames, %d with scorebug (%.1f%%), %d valid samples",
|
| 528 |
-
frames_scanned,
|
| 529 |
-
frames_with_scorebug,
|
| 530 |
-
100 * frames_with_scorebug / max(1, frames_scanned),
|
| 531 |
-
valid_samples,
|
| 532 |
-
)
|
| 533 |
-
|
| 534 |
-
if valid_samples < 50: # Need at least 50 samples to build useful templates
|
| 535 |
-
logger.warning("Pass 0: Insufficient samples (%d < 50), template building may fail", valid_samples)
|
| 536 |
-
if frames_with_scorebug == 0:
|
| 537 |
-
logger.error("Pass 0: No scorebugs detected! Check that the scorebug template matches the video.")
|
| 538 |
-
return False
|
| 539 |
-
|
| 540 |
-
# Build the templates
|
| 541 |
-
self.template_library = self.template_builder.build_templates(min_samples=2)
|
| 542 |
-
coverage = self.template_library.get_coverage_status()
|
| 543 |
-
logger.info(
|
| 544 |
-
"Pass 0 templates built: %d/%d (%.1f%%) coverage",
|
| 545 |
-
coverage["total_have"],
|
| 546 |
-
coverage["total_needed"],
|
| 547 |
-
100 * coverage["total_have"] / coverage["total_needed"],
|
| 548 |
)
|
| 549 |
|
| 550 |
-
#
|
| 551 |
-
|
| 552 |
-
|
| 553 |
-
self.template_reader = TemplatePlayClockReader(self.template_library, region_w, region_h)
|
| 554 |
|
| 555 |
-
|
| 556 |
-
logger.info("Pass 0 complete: Template building took %.2fs", timing["template_building"])
|
| 557 |
|
| 558 |
-
|
| 559 |
-
|
| 560 |
-
def _streaming_detection_pass(self, context: VideoContext, stats: Dict[str, Any], timing: Dict[str, float]) -> List[Dict[str, Any]]:
|
| 561 |
"""
|
| 562 |
-
Streaming
|
| 563 |
|
| 564 |
This combines the old Pass 1 (frame extraction) and Pass 2 (template matching) into
|
| 565 |
a single streaming pass. Each frame is:
|
|
@@ -579,7 +286,7 @@ class PlayDetector:
|
|
| 579 |
Returns:
|
| 580 |
List of frame data dictionaries with all processing results
|
| 581 |
"""
|
| 582 |
-
logger.info("Streaming
|
| 583 |
|
| 584 |
logger.info(
|
| 585 |
"Threaded reading: frame_skip=%d (%.2f fps effective), frames %d-%d",
|
|
@@ -597,7 +304,7 @@ class PlayDetector:
|
|
| 597 |
frame_data: List[Dict[str, Any]] = []
|
| 598 |
|
| 599 |
# Flag to track if we've locked the scorebug region
|
| 600 |
-
scorebug_region_locked = self.scorebug_detector.
|
| 601 |
|
| 602 |
# Progress tracking
|
| 603 |
progress_interval = int(30 / self.config.frame_interval) # Log every 30 seconds of video
|
|
@@ -632,7 +339,7 @@ class PlayDetector:
|
|
| 632 |
# Progress logging
|
| 633 |
if stats["total_frames"] % progress_interval == 0:
|
| 634 |
progress_pct = 100 * (current_time - context.start_time) / (context.end_time - context.start_time)
|
| 635 |
-
logger.info("
|
| 636 |
|
| 637 |
finally:
|
| 638 |
# Stop the reader thread and get I/O timing
|
|
@@ -641,7 +348,7 @@ class PlayDetector:
|
|
| 641 |
context.cap.release()
|
| 642 |
|
| 643 |
logger.info(
|
| 644 |
-
"Streaming
|
| 645 |
stats["total_frames"],
|
| 646 |
stats["frames_with_scorebug"],
|
| 647 |
stats["frames_with_clock"],
|
|
@@ -651,7 +358,7 @@ class PlayDetector:
|
|
| 651 |
|
| 652 |
def _process_frame_streaming(
|
| 653 |
self,
|
| 654 |
-
frame: np.ndarray,
|
| 655 |
current_time: float,
|
| 656 |
timing: Dict[str, float],
|
| 657 |
stats: Dict[str, Any],
|
|
@@ -680,16 +387,15 @@ class PlayDetector:
|
|
| 680 |
scorebug = self.scorebug_detector.detect(frame)
|
| 681 |
timing["scorebug_detection"] += time.perf_counter() - t_start
|
| 682 |
|
| 683 |
-
# Initialize frame result
|
| 684 |
-
frame_result =
|
| 685 |
-
|
| 686 |
-
|
| 687 |
-
|
| 688 |
-
|
| 689 |
-
|
| 690 |
-
|
| 691 |
-
|
| 692 |
-
}
|
| 693 |
|
| 694 |
if scorebug.detected:
|
| 695 |
stats["frames_with_scorebug"] += 1
|
|
@@ -699,10 +405,16 @@ class PlayDetector:
|
|
| 699 |
timeout_reading = self.timeout_tracker.read_timeouts(frame)
|
| 700 |
frame_result["home_timeouts"] = timeout_reading.home_timeouts
|
| 701 |
frame_result["away_timeouts"] = timeout_reading.away_timeouts
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 702 |
|
| 703 |
# Extract play clock region and run template matching immediately
|
| 704 |
t_start = time.perf_counter()
|
| 705 |
-
|
|
|
|
| 706 |
timing["preprocessing"] += time.perf_counter() - t_start
|
| 707 |
|
| 708 |
if play_clock_region is not None and self.template_reader:
|
|
@@ -717,7 +429,7 @@ class PlayDetector:
|
|
| 717 |
if clock_result.detected:
|
| 718 |
stats["frames_with_clock"] += 1
|
| 719 |
|
| 720 |
-
# Update state machine immediately
|
| 721 |
t_start = time.perf_counter()
|
| 722 |
clock_reading = PlayClockReading(
|
| 723 |
detected=clock_result.detected,
|
|
@@ -725,60 +437,79 @@ class PlayDetector:
|
|
| 725 |
confidence=clock_result.confidence,
|
| 726 |
raw_text=f"TEMPLATE_{clock_result.value}" if clock_result.detected else "TEMPLATE_FAILED",
|
| 727 |
)
|
| 728 |
-
self.state_machine.update(current_time, scorebug, clock_reading)
|
| 729 |
timing["state_machine"] += time.perf_counter() - t_start
|
| 730 |
else:
|
| 731 |
# No scorebug - still update state machine
|
| 732 |
t_start = time.perf_counter()
|
| 733 |
clock_reading = PlayClockReading(detected=False, value=None, confidence=0.0, raw_text="NO_SCOREBUG")
|
| 734 |
-
self.state_machine.update(current_time, scorebug, clock_reading)
|
| 735 |
timing["state_machine"] += time.perf_counter() - t_start
|
| 736 |
|
| 737 |
return frame_result
|
| 738 |
|
| 739 |
-
def
|
| 740 |
self,
|
| 741 |
-
frame_data: List[Dict[str, Any]],
|
| 742 |
context: VideoContext,
|
| 743 |
stats: Dict[str, Any],
|
| 744 |
timing: Dict[str, float],
|
|
|
|
| 745 |
) -> DetectionResult:
|
| 746 |
"""
|
| 747 |
-
Finalize
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 748 |
|
| 749 |
Args:
|
| 750 |
-
frame_data: Complete frame data from streaming detection pass
|
| 751 |
context: Video context
|
| 752 |
stats: Processing stats
|
| 753 |
timing: Timing breakdown
|
|
|
|
| 754 |
|
| 755 |
Returns:
|
| 756 |
Final DetectionResult
|
| 757 |
"""
|
| 758 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 759 |
|
| 760 |
-
#
|
| 761 |
-
|
|
|
|
| 762 |
logger.info(
|
| 763 |
-
"Clock reset
|
| 764 |
clock_reset_stats.get("total", 0),
|
| 765 |
clock_reset_stats.get("weird_clock", 0),
|
| 766 |
clock_reset_stats.get("timeout", 0),
|
| 767 |
clock_reset_stats.get("special", 0),
|
| 768 |
)
|
| 769 |
|
| 770 |
-
#
|
| 771 |
-
|
| 772 |
|
| 773 |
-
#
|
| 774 |
-
|
| 775 |
-
|
| 776 |
|
| 777 |
-
#
|
| 778 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 779 |
|
| 780 |
-
|
| 781 |
-
|
|
|
|
|
|
|
| 782 |
|
| 783 |
result = DetectionResult(
|
| 784 |
video=Path(self.config.video_path).name,
|
|
@@ -793,16 +524,16 @@ class PlayDetector:
|
|
| 793 |
)
|
| 794 |
|
| 795 |
# Log final summary
|
| 796 |
-
logger.info("
|
| 797 |
logger.info("Processed %d frames", stats["total_frames"])
|
| 798 |
logger.info("Frames with scorebug: %d (%.1f%%)", stats["frames_with_scorebug"], 100 * stats["frames_with_scorebug"] / max(1, stats["total_frames"]))
|
| 799 |
logger.info("Frames with clock: %d (%.1f%%)", stats["frames_with_clock"], 100 * stats["frames_with_clock"] / max(1, stats["total_frames"]))
|
| 800 |
-
logger.info("Plays
|
| 801 |
|
| 802 |
return result
|
| 803 |
|
| 804 |
def _log_timing_breakdown(self, timing: Dict[str, float]) -> None:
|
| 805 |
-
"""Log the timing breakdown for the
|
| 806 |
total_time = sum(timing.values())
|
| 807 |
logger.info("=" * 50)
|
| 808 |
logger.info("TIMING BREAKDOWN")
|
|
@@ -813,23 +544,23 @@ class PlayDetector:
|
|
| 813 |
logger.info(" TOTAL: %.2fs", total_time)
|
| 814 |
logger.info("=" * 50)
|
| 815 |
|
| 816 |
-
def
|
| 817 |
"""
|
| 818 |
-
Run play
|
| 819 |
|
| 820 |
Uses streaming processing for optimal performance:
|
| 821 |
- Pass 0 (if needed): Build digit templates using OCR on scorebug-verified frames
|
| 822 |
- Streaming pass: Read frame -> extract region -> template match -> state machine update
|
| 823 |
(threaded video I/O overlaps reading with processing)
|
| 824 |
-
- Finalize: Clock reset
|
| 825 |
|
| 826 |
When fixed coordinates are provided, the scorebug detection step simply verifies
|
| 827 |
the scorebug is present at the known location (faster than searching).
|
| 828 |
|
| 829 |
Returns:
|
| 830 |
-
DetectionResult with all
|
| 831 |
"""
|
| 832 |
-
logger.info("Starting play
|
| 833 |
logger.info("Video: %s", self.config.video_path)
|
| 834 |
logger.info("Segment: %.1fs to %s", self.config.start_time, self.config.end_time or "end")
|
| 835 |
|
|
@@ -850,24 +581,26 @@ class PlayDetector:
|
|
| 850 |
if not self.template_reader and self.template_builder:
|
| 851 |
success = self._pass0_build_templates_with_real_detection(timing)
|
| 852 |
if not success:
|
| 853 |
-
logger.warning("Pass 0 failed to build templates,
|
| 854 |
|
| 855 |
# Log mode info (after Pass 0 so we can show if templates were built)
|
| 856 |
-
self.
|
| 857 |
|
| 858 |
# Initialize video and get processing context
|
| 859 |
context, stats, _ = self._open_video_and_get_context()
|
| 860 |
|
| 861 |
-
# Streaming
|
| 862 |
# Uses threaded video I/O to overlap reading with processing
|
| 863 |
-
frame_data
|
|
|
|
| 864 |
|
| 865 |
-
# Finalize:
|
| 866 |
-
return self.
|
| 867 |
|
| 868 |
-
|
|
|
|
| 869 |
"""
|
| 870 |
-
Run play
|
| 871 |
|
| 872 |
This provides ~26% speedup over sequential processing by using multiple
|
| 873 |
processes to read and process different segments of the video simultaneously.
|
|
@@ -877,18 +610,16 @@ class PlayDetector:
|
|
| 877 |
2. Save templates to disk for worker processes to load
|
| 878 |
3. Parallel pass: Each worker processes a video chunk independently
|
| 879 |
4. Merge: Combine frame data from all chunks in chronological order
|
| 880 |
-
5. State machine: Process merged data to
|
| 881 |
|
| 882 |
Args:
|
| 883 |
num_workers: Number of parallel workers (default 2).
|
| 884 |
output_dir: Output directory for templates (required).
|
| 885 |
|
| 886 |
Returns:
|
| 887 |
-
DetectionResult with all
|
| 888 |
"""
|
| 889 |
-
|
| 890 |
-
|
| 891 |
-
logger.info("Starting parallel play detection (%d workers)...", num_workers)
|
| 892 |
logger.info("Video: %s", self.config.video_path)
|
| 893 |
logger.info("Segment: %.1fs to %s", self.config.start_time, self.config.end_time or "end")
|
| 894 |
|
|
@@ -906,7 +637,7 @@ class PlayDetector:
|
|
| 906 |
if not self.template_reader and self.template_builder:
|
| 907 |
success = self._pass0_build_templates_with_real_detection(timing)
|
| 908 |
if not success:
|
| 909 |
-
logger.warning("Pass 0 failed to build templates,
|
| 910 |
|
| 911 |
# Save templates to disk for worker processes
|
| 912 |
template_path = None
|
|
@@ -933,9 +664,13 @@ class PlayDetector:
|
|
| 933 |
|
| 934 |
# Run parallel processing
|
| 935 |
logger.info("Starting parallel frame extraction...")
|
| 936 |
-
t_parallel_start =
|
| 937 |
|
| 938 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 939 |
video_path=self.config.video_path,
|
| 940 |
start_time=self.config.start_time,
|
| 941 |
end_time=end_time,
|
|
@@ -944,12 +679,13 @@ class PlayDetector:
|
|
| 944 |
fixed_scorebug_coords=self.config.fixed_scorebug_coords,
|
| 945 |
template_library_path=str(template_path) if template_path else None,
|
| 946 |
timeout_config_path=timeout_config_path,
|
| 947 |
-
num_workers=num_workers,
|
| 948 |
)
|
| 949 |
|
|
|
|
|
|
|
| 950 |
timing["video_io"] = io_time
|
| 951 |
# Estimate template matching time from parallel processing
|
| 952 |
-
parallel_time =
|
| 953 |
timing["template_matching"] = max(0, parallel_time - io_time - timing["template_building"])
|
| 954 |
|
| 955 |
logger.info("Parallel processing complete: %d frames", stats["total_frames"])
|
|
@@ -968,7 +704,7 @@ class PlayDetector:
|
|
| 968 |
)
|
| 969 |
|
| 970 |
# Run state machine on merged frame data
|
| 971 |
-
t_sm_start =
|
| 972 |
for frame in frame_data:
|
| 973 |
# Create proper objects for state machine
|
| 974 |
scorebug = ScorebugDetection(
|
|
@@ -982,8 +718,15 @@ class PlayDetector:
|
|
| 982 |
confidence=1.0 if frame.get("clock_detected") else 0.0,
|
| 983 |
raw_text=f"PARALLEL_{frame.get('clock_value')}" if frame.get("clock_detected") else "PARALLEL_FAILED",
|
| 984 |
)
|
| 985 |
-
|
| 986 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 987 |
|
| 988 |
# Update stats dict
|
| 989 |
stats_dict = {
|
|
@@ -992,12 +735,12 @@ class PlayDetector:
|
|
| 992 |
"frames_with_clock": stats["frames_with_clock"],
|
| 993 |
}
|
| 994 |
|
| 995 |
-
# Finalize:
|
| 996 |
-
return self.
|
| 997 |
|
| 998 |
-
def
|
| 999 |
-
"""Log the
|
| 1000 |
-
use_fixed_region = self.scorebug_detector and self.scorebug_detector.
|
| 1001 |
|
| 1002 |
if use_fixed_region:
|
| 1003 |
logger.info("Mode: Fixed region (scorebug location pre-configured)")
|
|
@@ -1012,290 +755,6 @@ class PlayDetector:
|
|
| 1012 |
else:
|
| 1013 |
logger.info(" Will build templates using fallback method")
|
| 1014 |
|
| 1015 |
-
def _detect_clock_resets(self, frame_data: List[Dict[str, Any]]) -> Tuple[List[PlayEvent], Dict[str, int]]:
|
| 1016 |
-
"""
|
| 1017 |
-
Detect and classify 40 -> 25 clock reset events.
|
| 1018 |
-
|
| 1019 |
-
Classification:
|
| 1020 |
-
- Class A (weird_clock): 25 counts down immediately -> rejected
|
| 1021 |
-
- Class B (timeout): Timeout indicator changed -> tracked as timeout
|
| 1022 |
-
- Class C (special): Neither A nor B -> special play with extension
|
| 1023 |
-
|
| 1024 |
-
Args:
|
| 1025 |
-
frame_data: List of frame data with clock values and timeout counts
|
| 1026 |
-
|
| 1027 |
-
Returns:
|
| 1028 |
-
Tuple of (list of PlayEvent for valid clock resets, stats dict)
|
| 1029 |
-
"""
|
| 1030 |
-
plays = []
|
| 1031 |
-
stats = {"total": 0, "weird_clock": 0, "timeout": 0, "special": 0}
|
| 1032 |
-
|
| 1033 |
-
# Parameters
|
| 1034 |
-
immediate_countdown_window = 2.0 # Seconds to check if 25 counts down
|
| 1035 |
-
special_play_extension = 10.0 # Extension for Class C plays
|
| 1036 |
-
|
| 1037 |
-
prev_clock = None
|
| 1038 |
-
saw_40_at = None
|
| 1039 |
-
|
| 1040 |
-
for i, frame in enumerate(frame_data):
|
| 1041 |
-
clock_value = frame.get("clock_value")
|
| 1042 |
-
timestamp = frame["timestamp"]
|
| 1043 |
-
|
| 1044 |
-
if clock_value is not None:
|
| 1045 |
-
# Detect 40 -> 25 transition
|
| 1046 |
-
if prev_clock == 40 and clock_value == 25:
|
| 1047 |
-
stats["total"] += 1
|
| 1048 |
-
_ = timestamp - saw_40_at if saw_40_at else 0.0 # time_at_40 for potential future use
|
| 1049 |
-
|
| 1050 |
-
# Check if 25 immediately counts down (Class A: weird clock)
|
| 1051 |
-
is_immediate_countdown = self._check_immediate_countdown(frame_data, i, immediate_countdown_window)
|
| 1052 |
-
|
| 1053 |
-
# Check if timeout changed (Class B: team timeout)
|
| 1054 |
-
timeout_team = self._check_timeout_change(frame_data, i)
|
| 1055 |
-
|
| 1056 |
-
if is_immediate_countdown:
|
| 1057 |
-
# Class A: Weird clock behavior - reject
|
| 1058 |
-
stats["weird_clock"] += 1
|
| 1059 |
-
logger.debug("Clock reset at %.1fs: weird_clock (25 counts down immediately)", timestamp)
|
| 1060 |
-
elif timeout_team:
|
| 1061 |
-
# Class B: Team timeout - record but mark as timeout
|
| 1062 |
-
stats["timeout"] += 1
|
| 1063 |
-
play_end = self._find_clock_reset_play_end(frame_data, i, max_duration=30.0) # Timeouts can last longer
|
| 1064 |
-
play = PlayEvent(
|
| 1065 |
-
play_number=0,
|
| 1066 |
-
start_time=timestamp,
|
| 1067 |
-
end_time=play_end,
|
| 1068 |
-
confidence=0.8,
|
| 1069 |
-
start_method=f"timeout_{timeout_team}",
|
| 1070 |
-
end_method="timeout_end",
|
| 1071 |
-
direct_end_time=play_end,
|
| 1072 |
-
start_clock_value=prev_clock,
|
| 1073 |
-
end_clock_value=25,
|
| 1074 |
-
play_type="timeout",
|
| 1075 |
-
)
|
| 1076 |
-
plays.append(play)
|
| 1077 |
-
logger.debug("Clock reset at %.1fs: timeout (%s team)", timestamp, timeout_team)
|
| 1078 |
-
else:
|
| 1079 |
-
# Class C: Special play (injury/punt/FG/XP) - end at scorebug disappear OR max_duration from start
|
| 1080 |
-
stats["special"] += 1
|
| 1081 |
-
play_end = self._find_clock_reset_play_end(frame_data, i, max_duration=special_play_extension)
|
| 1082 |
-
# Determine end method based on whether we hit max duration or scorebug disappeared first
|
| 1083 |
-
play_duration = play_end - timestamp
|
| 1084 |
-
if play_duration >= special_play_extension - 0.1: # Close to max duration (within tolerance)
|
| 1085 |
-
end_method = "max_duration"
|
| 1086 |
-
else:
|
| 1087 |
-
end_method = "scorebug_disappeared"
|
| 1088 |
-
play = PlayEvent(
|
| 1089 |
-
play_number=0,
|
| 1090 |
-
start_time=timestamp,
|
| 1091 |
-
end_time=play_end,
|
| 1092 |
-
confidence=0.8,
|
| 1093 |
-
start_method="clock_reset_special",
|
| 1094 |
-
end_method=end_method,
|
| 1095 |
-
direct_end_time=play_end,
|
| 1096 |
-
start_clock_value=prev_clock,
|
| 1097 |
-
end_clock_value=25,
|
| 1098 |
-
play_type="special",
|
| 1099 |
-
)
|
| 1100 |
-
plays.append(play)
|
| 1101 |
-
logger.debug("Clock reset at %.1fs: special play (%.1fs duration)", timestamp, play_end - timestamp)
|
| 1102 |
-
|
| 1103 |
-
# Track when 40 first appeared
|
| 1104 |
-
if clock_value == 40 and prev_clock != 40:
|
| 1105 |
-
saw_40_at = timestamp
|
| 1106 |
-
|
| 1107 |
-
prev_clock = clock_value
|
| 1108 |
-
|
| 1109 |
-
return plays, stats
|
| 1110 |
-
|
| 1111 |
-
def _check_immediate_countdown(self, frame_data: List[Dict[str, Any]], frame_idx: int, window: float) -> bool:
|
| 1112 |
-
"""Check if 25 immediately starts counting down (indicates weird clock behavior)."""
|
| 1113 |
-
reset_timestamp = frame_data[frame_idx]["timestamp"]
|
| 1114 |
-
|
| 1115 |
-
for j in range(frame_idx + 1, len(frame_data)):
|
| 1116 |
-
frame = frame_data[j]
|
| 1117 |
-
elapsed = frame["timestamp"] - reset_timestamp
|
| 1118 |
-
if elapsed > window:
|
| 1119 |
-
break
|
| 1120 |
-
clock_value = frame.get("clock_value")
|
| 1121 |
-
if clock_value is not None and clock_value < 25:
|
| 1122 |
-
return True # 25 counted down - weird clock
|
| 1123 |
-
|
| 1124 |
-
return False
|
| 1125 |
-
|
| 1126 |
-
def _check_timeout_change(self, frame_data: List[Dict[str, Any]], frame_idx: int) -> Optional[str]:
|
| 1127 |
-
"""Check if a timeout indicator changed around the reset."""
|
| 1128 |
-
# Get timeout counts before reset
|
| 1129 |
-
before_home = None
|
| 1130 |
-
before_away = None
|
| 1131 |
-
|
| 1132 |
-
for j in range(frame_idx - 1, max(0, frame_idx - 20), -1):
|
| 1133 |
-
frame = frame_data[j]
|
| 1134 |
-
if frame.get("home_timeouts") is not None:
|
| 1135 |
-
before_home = frame.get("home_timeouts", 3)
|
| 1136 |
-
before_away = frame.get("away_timeouts", 3)
|
| 1137 |
-
break
|
| 1138 |
-
|
| 1139 |
-
if before_home is None:
|
| 1140 |
-
return None
|
| 1141 |
-
|
| 1142 |
-
# Look forward for timeout change (up to 15 seconds)
|
| 1143 |
-
frame_interval = frame_data[1]["timestamp"] - frame_data[0]["timestamp"] if len(frame_data) > 1 else 0.5
|
| 1144 |
-
max_frames_forward = int(15.0 / frame_interval) if frame_interval > 0 else 30
|
| 1145 |
-
|
| 1146 |
-
for j in range(frame_idx, min(len(frame_data), frame_idx + max_frames_forward)):
|
| 1147 |
-
frame = frame_data[j]
|
| 1148 |
-
if frame.get("home_timeouts") is not None:
|
| 1149 |
-
after_home = frame.get("home_timeouts", 3)
|
| 1150 |
-
after_away = frame.get("away_timeouts", 3)
|
| 1151 |
-
|
| 1152 |
-
if after_home < before_home:
|
| 1153 |
-
return "home"
|
| 1154 |
-
if after_away < before_away:
|
| 1155 |
-
return "away"
|
| 1156 |
-
|
| 1157 |
-
return None
|
| 1158 |
-
|
| 1159 |
-
def _find_clock_reset_play_end(self, frame_data: List[Dict[str, Any]], frame_idx: int, max_duration: float) -> float:
|
| 1160 |
-
"""
|
| 1161 |
-
Find the end time for a clock reset play (Class C special play).
|
| 1162 |
-
|
| 1163 |
-
The play ends when EITHER:
|
| 1164 |
-
- Scorebug disappears (cut to commercial/replay)
|
| 1165 |
-
- max_duration seconds have elapsed since play START
|
| 1166 |
-
|
| 1167 |
-
Whichever comes FIRST.
|
| 1168 |
-
|
| 1169 |
-
Args:
|
| 1170 |
-
frame_data: Frame data list
|
| 1171 |
-
frame_idx: Index of the frame where 40->25 reset occurred
|
| 1172 |
-
max_duration: Maximum play duration from start (e.g., 10 seconds)
|
| 1173 |
-
|
| 1174 |
-
Returns:
|
| 1175 |
-
Play end timestamp
|
| 1176 |
-
"""
|
| 1177 |
-
start_timestamp = frame_data[frame_idx]["timestamp"]
|
| 1178 |
-
max_end_time = start_timestamp + max_duration
|
| 1179 |
-
|
| 1180 |
-
# Look for scorebug disappearance (but cap at max_duration from start)
|
| 1181 |
-
for j in range(frame_idx + 1, len(frame_data)):
|
| 1182 |
-
frame = frame_data[j]
|
| 1183 |
-
timestamp = frame["timestamp"]
|
| 1184 |
-
|
| 1185 |
-
# If we've exceeded max_duration, end the play at max_duration
|
| 1186 |
-
if timestamp >= max_end_time:
|
| 1187 |
-
return max_end_time
|
| 1188 |
-
|
| 1189 |
-
# Check for play clock disappearance (works for both fixed coords and standard mode)
|
| 1190 |
-
clock_available = frame.get("clock_detected", frame.get("scorebug_detected", False))
|
| 1191 |
-
if not clock_available:
|
| 1192 |
-
return timestamp
|
| 1193 |
-
|
| 1194 |
-
# Default: end at max_duration (or end of data if shorter)
|
| 1195 |
-
return min(max_end_time, frame_data[-1]["timestamp"] if frame_data else max_end_time)
|
| 1196 |
-
|
| 1197 |
-
def _merge_plays(self, state_machine_plays: List[PlayEvent], clock_reset_plays: List[PlayEvent]) -> List[PlayEvent]:
|
| 1198 |
-
"""
|
| 1199 |
-
Merge plays from state machine and clock reset detection, removing overlaps and duplicates.
|
| 1200 |
-
|
| 1201 |
-
Handles two types of duplicates:
|
| 1202 |
-
1. Overlapping plays (start_time < last.end_time)
|
| 1203 |
-
2. Close plays (start times within proximity_threshold) representing the same event
|
| 1204 |
-
|
| 1205 |
-
Args:
|
| 1206 |
-
state_machine_plays: Plays from the state machine
|
| 1207 |
-
clock_reset_plays: Plays from clock reset detection
|
| 1208 |
-
|
| 1209 |
-
Returns:
|
| 1210 |
-
Merged list of plays sorted by start time
|
| 1211 |
-
"""
|
| 1212 |
-
all_plays = list(state_machine_plays) + list(clock_reset_plays)
|
| 1213 |
-
all_plays.sort(key=lambda p: p.start_time)
|
| 1214 |
-
|
| 1215 |
-
if not all_plays:
|
| 1216 |
-
return []
|
| 1217 |
-
|
| 1218 |
-
# Proximity threshold: plays within this time are considered the same event
|
| 1219 |
-
proximity_threshold = 5.0 # seconds
|
| 1220 |
-
|
| 1221 |
-
# Remove overlapping and close plays (keep state machine plays over clock reset plays)
|
| 1222 |
-
filtered = [all_plays[0]]
|
| 1223 |
-
for play in all_plays[1:]:
|
| 1224 |
-
last = filtered[-1]
|
| 1225 |
-
|
| 1226 |
-
# Check for overlap OR proximity (both indicate same event)
|
| 1227 |
-
is_overlapping = play.start_time < last.end_time
|
| 1228 |
-
is_close = abs(play.start_time - last.start_time) < proximity_threshold
|
| 1229 |
-
|
| 1230 |
-
if is_overlapping or is_close:
|
| 1231 |
-
# Same event detected twice - keep the better one
|
| 1232 |
-
# Priority: normal > special > timeout (normal plays are most reliable)
|
| 1233 |
-
type_priority = {"normal": 3, "special": 2, "timeout": 1}
|
| 1234 |
-
last_priority = type_priority.get(last.play_type, 0)
|
| 1235 |
-
play_priority = type_priority.get(play.play_type, 0)
|
| 1236 |
-
|
| 1237 |
-
if play_priority > last_priority:
|
| 1238 |
-
filtered[-1] = play # Replace with higher priority play
|
| 1239 |
-
elif play_priority == last_priority and play.confidence > last.confidence:
|
| 1240 |
-
filtered[-1] = play # Same priority, but higher confidence
|
| 1241 |
-
# else: keep existing
|
| 1242 |
-
else:
|
| 1243 |
-
filtered.append(play)
|
| 1244 |
-
|
| 1245 |
-
# Apply quiet time filter to remove false positives after normal plays
|
| 1246 |
-
filtered = self._apply_quiet_time_filter(filtered)
|
| 1247 |
-
|
| 1248 |
-
# Renumber plays
|
| 1249 |
-
for i, play in enumerate(filtered, 1):
|
| 1250 |
-
play.play_number = i
|
| 1251 |
-
|
| 1252 |
-
return filtered
|
| 1253 |
-
|
| 1254 |
-
def _apply_quiet_time_filter(self, plays: List[PlayEvent], quiet_time: float = 10.0) -> List[PlayEvent]:
|
| 1255 |
-
"""
|
| 1256 |
-
Apply quiet time filter after normal plays.
|
| 1257 |
-
|
| 1258 |
-
After a normal play ends, no new special/timeout plays can start for quiet_time seconds.
|
| 1259 |
-
This filters out false positives from penalties during plays (false starts, delay of game, etc.).
|
| 1260 |
-
|
| 1261 |
-
Args:
|
| 1262 |
-
plays: List of plays sorted by start time
|
| 1263 |
-
quiet_time: Seconds of quiet time after normal plays (default 10.0)
|
| 1264 |
-
|
| 1265 |
-
Returns:
|
| 1266 |
-
Filtered list of plays
|
| 1267 |
-
"""
|
| 1268 |
-
if not plays:
|
| 1269 |
-
return []
|
| 1270 |
-
|
| 1271 |
-
filtered = []
|
| 1272 |
-
last_normal_end = -999.0 # Track when last normal play ended
|
| 1273 |
-
|
| 1274 |
-
for play in plays:
|
| 1275 |
-
# Check if this play starts during quiet time after a normal play
|
| 1276 |
-
if play.start_time < last_normal_end + quiet_time and play.play_type != "normal":
|
| 1277 |
-
# This non-normal play starts during quiet time - filter it out
|
| 1278 |
-
time_since_normal = play.start_time - last_normal_end
|
| 1279 |
-
logger.debug(
|
| 1280 |
-
"Quiet time filter: Removing %s play at %.1fs (%.1fs after normal play ended)",
|
| 1281 |
-
play.play_type,
|
| 1282 |
-
play.start_time,
|
| 1283 |
-
time_since_normal,
|
| 1284 |
-
)
|
| 1285 |
-
continue
|
| 1286 |
-
|
| 1287 |
-
filtered.append(play)
|
| 1288 |
-
|
| 1289 |
-
# Update last normal play end time
|
| 1290 |
-
if play.play_type == "normal":
|
| 1291 |
-
last_normal_end = play.end_time
|
| 1292 |
-
|
| 1293 |
-
removed_count = len(plays) - len(filtered)
|
| 1294 |
-
if removed_count > 0:
|
| 1295 |
-
logger.info("Quiet time filter removed %d plays", removed_count)
|
| 1296 |
-
|
| 1297 |
-
return filtered
|
| 1298 |
-
|
| 1299 |
def _play_to_dict(self, play: PlayEvent) -> Dict[str, Any]:
|
| 1300 |
"""Convert PlayEvent to dictionary for JSON serialization."""
|
| 1301 |
return {
|
|
@@ -1314,16 +773,16 @@ class PlayDetector:
|
|
| 1314 |
|
| 1315 |
def save_results(self, result: DetectionResult, output_path: str) -> None:
|
| 1316 |
"""
|
| 1317 |
-
Save
|
| 1318 |
|
| 1319 |
Args:
|
| 1320 |
-
result:
|
| 1321 |
output_path: Path to output file
|
| 1322 |
"""
|
| 1323 |
output = Path(output_path)
|
| 1324 |
output.parent.mkdir(parents=True, exist_ok=True)
|
| 1325 |
|
| 1326 |
-
data =
|
| 1327 |
|
| 1328 |
# Include configuration if provided (for reproducibility)
|
| 1329 |
if result.config:
|
|
|
|
|
|
|
| 1 |
"""
|
| 2 |
+
Play extractor pipeline module.
|
| 3 |
|
| 4 |
+
This module orchestrates the complete play extraction pipeline:
|
| 5 |
1. Video frame extraction
|
| 6 |
2. Scorebug detection
|
| 7 |
3. Play clock reading via template matching
|
| 8 |
4. Play state machine processing
|
| 9 |
+
5. Post-hoc clock reset identification (timeout/special plays)
|
| 10 |
|
| 11 |
Performance optimizations:
|
| 12 |
- Streaming processing: read frame -> process immediately (no intermediate storage)
|
|
|
|
| 19 |
|
| 20 |
import json
|
| 21 |
import logging
|
|
|
|
|
|
|
| 22 |
import time
|
| 23 |
from pathlib import Path
|
| 24 |
from typing import Optional, List, Dict, Any, Tuple
|
| 25 |
|
| 26 |
import cv2
|
|
|
|
| 27 |
import numpy as np
|
| 28 |
|
| 29 |
+
from detection import DetectScoreBug, ScorebugDetection, DetectTimeouts
|
| 30 |
+
from readers import ReadPlayClock, PlayClockReading
|
| 31 |
+
from setup import DigitTemplateBuilder, DigitTemplateLibrary, PlayClockRegionConfig, PlayClockRegionExtractor
|
| 32 |
+
from tracking import TrackPlayState, PlayEvent, PlayMerger, TimeoutInfo, ClockResetIdentifier
|
| 33 |
+
from utils import create_frame_result
|
| 34 |
+
from video import ThreadedFrameReader
|
| 35 |
+
from .models import DetectionConfig, DetectionResult, ParallelProcessingConfig, VideoContext
|
| 36 |
from .parallel import process_video_parallel
|
| 37 |
+
from .template_builder_pass import TemplateBuildingPass
|
| 38 |
|
| 39 |
logger = logging.getLogger(__name__)
|
| 40 |
|
|
|
|
|
|
|
| 41 |
|
| 42 |
+
def format_extraction_result_dict(result: DetectionResult) -> Dict[str, Any]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
"""
|
| 44 |
Format a DetectionResult into a dictionary for JSON serialization or API return.
|
| 45 |
|
|
|
|
| 63 |
}
|
| 64 |
|
| 65 |
|
| 66 |
+
class PlayExtractor:
|
| 67 |
"""
|
| 68 |
+
Main pipeline for extracting plays from video.
|
| 69 |
+
|
| 70 |
+
This class orchestrates all extraction components:
|
| 71 |
+
- DetectScoreBug: Locates scorebug in frames
|
| 72 |
+
- ReadPlayClock: Reads play clock digits via template matching
|
| 73 |
+
- TrackPlayState: Determines play boundaries
|
| 74 |
+
- DetectTimeouts: Tracks timeout indicators for 3-class clock reset classification
|
| 75 |
+
- ClockResetIdentifier: Post-hoc identification of timeout/special plays
|
| 76 |
"""
|
| 77 |
|
| 78 |
+
def __init__(self, config: DetectionConfig, timeout_tracker: Optional[DetectTimeouts] = None):
|
| 79 |
"""
|
| 80 |
+
Initialize the play extractor pipeline.
|
| 81 |
|
| 82 |
Args:
|
| 83 |
config: Detection configuration
|
| 84 |
timeout_tracker: Optional timeout tracker for clock reset classification
|
| 85 |
"""
|
| 86 |
self.config = config
|
|
|
|
|
|
|
|
|
|
| 87 |
self.timeout_tracker = timeout_tracker
|
| 88 |
|
| 89 |
+
# Template-based clock reading components (conditionally initialized)
|
| 90 |
self.template_builder: Optional[DigitTemplateBuilder] = None
|
| 91 |
self.template_library: Optional[DigitTemplateLibrary] = None
|
| 92 |
+
self.template_reader: Optional[ReadPlayClock] = None
|
| 93 |
|
| 94 |
self._validate_config()
|
| 95 |
+
# Core components are initialized here (scorebug_detector, clock_reader, state_machine)
|
| 96 |
self._initialize_components()
|
| 97 |
|
| 98 |
def _validate_config(self) -> None:
|
|
|
|
| 113 |
raise FileNotFoundError(f"Clock region config not found: {self.config.clock_region_config_path}")
|
| 114 |
|
| 115 |
def _initialize_components(self) -> None:
|
| 116 |
+
"""Initialize extraction components."""
|
| 117 |
+
logger.info("Initializing play extractor components...")
|
| 118 |
|
| 119 |
# Determine if we're using fixed coordinates mode
|
| 120 |
+
# In this mode, we still use the same logic but with pre-set regions
|
| 121 |
use_fixed_coords = self.config.fixed_playclock_coords is not None
|
| 122 |
|
| 123 |
if use_fixed_coords:
|
|
|
|
| 125 |
logger.info("Fixed coordinates mode - regions pre-configured")
|
| 126 |
|
| 127 |
# Compute play clock offset relative to scorebug from absolute coordinates
|
| 128 |
+
assert self.config.fixed_playclock_coords is not None # Already checked above, helps mypy
|
| 129 |
pc_x, pc_y, pc_w, pc_h = self.config.fixed_playclock_coords
|
| 130 |
if self.config.fixed_scorebug_coords:
|
| 131 |
sb_x, sb_y, _, _ = self.config.fixed_scorebug_coords
|
|
|
|
| 139 |
playclock_config = PlayClockRegionConfig(x_offset=x_offset, y_offset=y_offset, width=pc_w, height=pc_h, source_video="", scorebug_template="", samples_used=0)
|
| 140 |
|
| 141 |
# Initialize scorebug detector (will set fixed region below)
|
| 142 |
+
self.scorebug_detector: DetectScoreBug = DetectScoreBug(template_path=None, use_split_detection=self.config.use_split_detection)
|
| 143 |
|
| 144 |
# Set the fixed region immediately so no template matching is needed
|
| 145 |
if self.config.fixed_scorebug_coords:
|
| 146 |
self.scorebug_detector.set_fixed_region(self.config.fixed_scorebug_coords)
|
| 147 |
logger.info("Scorebug fixed region set: %s", self.config.fixed_scorebug_coords)
|
| 148 |
|
| 149 |
+
# Initialize play clock region extractor with the derived config
|
| 150 |
+
self.clock_reader: PlayClockRegionExtractor = PlayClockRegionExtractor(region_config=playclock_config)
|
| 151 |
+
logger.info("Play clock region extractor initialized with offset=(%d, %d), size=(%d, %d)", x_offset, y_offset, pc_w, pc_h)
|
| 152 |
else:
|
| 153 |
# Standard mode: use template and config files
|
| 154 |
+
self.scorebug_detector = DetectScoreBug(template_path=self.config.template_path, use_split_detection=self.config.use_split_detection)
|
| 155 |
logger.info("Scorebug detector initialized (split_detection=%s)", self.config.use_split_detection)
|
| 156 |
|
| 157 |
+
# Initialize play clock region extractor from config file
|
| 158 |
+
self.clock_reader = PlayClockRegionExtractor(region_config_path=self.config.clock_region_config_path)
|
| 159 |
+
logger.info("Play clock region extractor initialized")
|
| 160 |
|
| 161 |
# Initialize state machine
|
| 162 |
+
self.state_machine: TrackPlayState = TrackPlayState()
|
| 163 |
logger.info("State machine initialized")
|
| 164 |
|
| 165 |
# Initialize template matching components
|
|
|
|
| 175 |
self.template_library = DigitTemplateLibrary()
|
| 176 |
if self.template_library.load(self.config.digit_template_path):
|
| 177 |
logger.info("Loaded pre-built digit templates from %s", self.config.digit_template_path)
|
| 178 |
+
self.template_reader = ReadPlayClock(self.template_library, region_w, region_h)
|
| 179 |
else:
|
| 180 |
self.template_library = None
|
| 181 |
+
logger.info("Could not load templates, will build during extraction")
|
| 182 |
|
| 183 |
# Initialize template builder for collection phase if no templates loaded
|
| 184 |
if self.template_library is None:
|
|
|
|
| 241 |
"""
|
| 242 |
Pass 0: Build digit templates by scanning video using TEMPLATE-BASED scorebug detection.
|
| 243 |
|
| 244 |
+
Delegates to TemplateBuildingPass which handles the actual scanning and OCR.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 245 |
|
| 246 |
Args:
|
| 247 |
timing: Timing dictionary to update
|
|
|
|
| 249 |
Returns:
|
| 250 |
True if templates were built successfully, False otherwise
|
| 251 |
"""
|
| 252 |
+
# Use the extracted TemplateBuildingPass module
|
| 253 |
+
# Assert: template_builder is initialized when this method is called
|
| 254 |
+
assert self.template_builder is not None
|
| 255 |
+
template_pass = TemplateBuildingPass(
|
| 256 |
+
config=self.config,
|
| 257 |
+
clock_reader=self.clock_reader,
|
| 258 |
+
template_builder=self.template_builder,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
)
|
| 260 |
|
| 261 |
+
# Run template building
|
| 262 |
+
self.template_library, self.template_reader, build_time = template_pass.run()
|
| 263 |
+
timing["template_building"] = build_time
|
|
|
|
| 264 |
|
| 265 |
+
return self.template_library is not None
|
|
|
|
| 266 |
|
| 267 |
+
def _streaming_extraction_pass(self, context: VideoContext, stats: Dict[str, Any], timing: Dict[str, float]) -> List[Dict[str, Any]]:
|
|
|
|
|
|
|
| 268 |
"""
|
| 269 |
+
Streaming extraction pass: Read frames, process immediately, no intermediate storage.
|
| 270 |
|
| 271 |
This combines the old Pass 1 (frame extraction) and Pass 2 (template matching) into
|
| 272 |
a single streaming pass. Each frame is:
|
|
|
|
| 286 |
Returns:
|
| 287 |
List of frame data dictionaries with all processing results
|
| 288 |
"""
|
| 289 |
+
logger.info("Streaming extraction pass: frame extraction + template matching...")
|
| 290 |
|
| 291 |
logger.info(
|
| 292 |
"Threaded reading: frame_skip=%d (%.2f fps effective), frames %d-%d",
|
|
|
|
| 304 |
frame_data: List[Dict[str, Any]] = []
|
| 305 |
|
| 306 |
# Flag to track if we've locked the scorebug region
|
| 307 |
+
scorebug_region_locked = self.scorebug_detector.is_fixed_region_mode if self.scorebug_detector else False
|
| 308 |
|
| 309 |
# Progress tracking
|
| 310 |
progress_interval = int(30 / self.config.frame_interval) # Log every 30 seconds of video
|
|
|
|
| 339 |
# Progress logging
|
| 340 |
if stats["total_frames"] % progress_interval == 0:
|
| 341 |
progress_pct = 100 * (current_time - context.start_time) / (context.end_time - context.start_time)
|
| 342 |
+
logger.info("Extraction progress: %.1fs / %.1fs (%.0f%%)", current_time, context.end_time, progress_pct)
|
| 343 |
|
| 344 |
finally:
|
| 345 |
# Stop the reader thread and get I/O timing
|
|
|
|
| 348 |
context.cap.release()
|
| 349 |
|
| 350 |
logger.info(
|
| 351 |
+
"Streaming extraction complete: %d frames processed, %d with scorebug, %d with clock",
|
| 352 |
stats["total_frames"],
|
| 353 |
stats["frames_with_scorebug"],
|
| 354 |
stats["frames_with_clock"],
|
|
|
|
| 358 |
|
| 359 |
def _process_frame_streaming(
|
| 360 |
self,
|
| 361 |
+
frame: np.ndarray[Any, Any],
|
| 362 |
current_time: float,
|
| 363 |
timing: Dict[str, float],
|
| 364 |
stats: Dict[str, Any],
|
|
|
|
| 387 |
scorebug = self.scorebug_detector.detect(frame)
|
| 388 |
timing["scorebug_detection"] += time.perf_counter() - t_start
|
| 389 |
|
| 390 |
+
# Initialize frame result using shared factory
|
| 391 |
+
frame_result = create_frame_result(
|
| 392 |
+
timestamp=current_time,
|
| 393 |
+
scorebug_detected=scorebug.detected,
|
| 394 |
+
scorebug_bbox=scorebug.bbox if scorebug.detected else None,
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
# Initialize timeout_info for state machine
|
| 398 |
+
timeout_info = None
|
|
|
|
| 399 |
|
| 400 |
if scorebug.detected:
|
| 401 |
stats["frames_with_scorebug"] += 1
|
|
|
|
| 405 |
timeout_reading = self.timeout_tracker.read_timeouts(frame)
|
| 406 |
frame_result["home_timeouts"] = timeout_reading.home_timeouts
|
| 407 |
frame_result["away_timeouts"] = timeout_reading.away_timeouts
|
| 408 |
+
# Create TimeoutInfo for state machine clock reset classification
|
| 409 |
+
timeout_info = TimeoutInfo(
|
| 410 |
+
home_timeouts=timeout_reading.home_timeouts,
|
| 411 |
+
away_timeouts=timeout_reading.away_timeouts,
|
| 412 |
+
)
|
| 413 |
|
| 414 |
# Extract play clock region and run template matching immediately
|
| 415 |
t_start = time.perf_counter()
|
| 416 |
+
assert scorebug.bbox is not None # scorebug.detected implies bbox is set
|
| 417 |
+
play_clock_region = self.clock_reader.extract_region(frame, scorebug.bbox)
|
| 418 |
timing["preprocessing"] += time.perf_counter() - t_start
|
| 419 |
|
| 420 |
if play_clock_region is not None and self.template_reader:
|
|
|
|
| 429 |
if clock_result.detected:
|
| 430 |
stats["frames_with_clock"] += 1
|
| 431 |
|
| 432 |
+
# Update state machine immediately with timeout info for clock reset classification
|
| 433 |
t_start = time.perf_counter()
|
| 434 |
clock_reading = PlayClockReading(
|
| 435 |
detected=clock_result.detected,
|
|
|
|
| 437 |
confidence=clock_result.confidence,
|
| 438 |
raw_text=f"TEMPLATE_{clock_result.value}" if clock_result.detected else "TEMPLATE_FAILED",
|
| 439 |
)
|
| 440 |
+
self.state_machine.update(current_time, scorebug, clock_reading, timeout_info)
|
| 441 |
timing["state_machine"] += time.perf_counter() - t_start
|
| 442 |
else:
|
| 443 |
# No scorebug - still update state machine
|
| 444 |
t_start = time.perf_counter()
|
| 445 |
clock_reading = PlayClockReading(detected=False, value=None, confidence=0.0, raw_text="NO_SCOREBUG")
|
| 446 |
+
self.state_machine.update(current_time, scorebug, clock_reading, timeout_info)
|
| 447 |
timing["state_machine"] += time.perf_counter() - t_start
|
| 448 |
|
| 449 |
return frame_result
|
| 450 |
|
| 451 |
+
def _finalize_extraction(
|
| 452 |
self,
|
|
|
|
| 453 |
context: VideoContext,
|
| 454 |
stats: Dict[str, Any],
|
| 455 |
timing: Dict[str, float],
|
| 456 |
+
frame_data: List[Dict[str, Any]],
|
| 457 |
) -> DetectionResult:
|
| 458 |
"""
|
| 459 |
+
Finalize extraction: run post-hoc clock reset identification and build result.
|
| 460 |
+
|
| 461 |
+
Uses ClockResetIdentifier for 3-class classification of 40→25 clock reset events:
|
| 462 |
+
- Class A (weird_clock): 25 counts down immediately → rejected
|
| 463 |
+
- Class B (timeout): Timeout indicator changed → tracked as timeout
|
| 464 |
+
- Class C (special): Neither A nor B → special play (punt/FG/XP)
|
| 465 |
|
| 466 |
Args:
|
|
|
|
| 467 |
context: Video context
|
| 468 |
stats: Processing stats
|
| 469 |
timing: Timing breakdown
|
| 470 |
+
frame_data: List of frame data dicts with clock values and timeout counts
|
| 471 |
|
| 472 |
Returns:
|
| 473 |
Final DetectionResult
|
| 474 |
"""
|
| 475 |
+
# Log timing breakdown
|
| 476 |
+
self._log_timing_breakdown(timing)
|
| 477 |
+
|
| 478 |
+
# Get plays from state machine (normal 40-second plays)
|
| 479 |
+
state_machine_plays = self.state_machine.get_plays()
|
| 480 |
+
play_stats = self.state_machine.get_stats()
|
| 481 |
|
| 482 |
+
# Run post-hoc clock reset identification (40→25 transitions)
|
| 483 |
+
clock_reset_identifier = ClockResetIdentifier()
|
| 484 |
+
clock_reset_plays, clock_reset_stats = clock_reset_identifier.identify(frame_data)
|
| 485 |
logger.info(
|
| 486 |
+
"Clock reset identification: %d total, %d weird (rejected), %d timeouts, %d special plays",
|
| 487 |
clock_reset_stats.get("total", 0),
|
| 488 |
clock_reset_stats.get("weird_clock", 0),
|
| 489 |
clock_reset_stats.get("timeout", 0),
|
| 490 |
clock_reset_stats.get("special", 0),
|
| 491 |
)
|
| 492 |
|
| 493 |
+
# Merge clock reset stats into play stats
|
| 494 |
+
play_stats["clock_reset_events"] = clock_reset_stats
|
| 495 |
|
| 496 |
+
# Merge state machine plays with clock reset plays using PlayMerger
|
| 497 |
+
merger = PlayMerger()
|
| 498 |
+
plays = merger.merge(state_machine_plays, clock_reset_plays)
|
| 499 |
|
| 500 |
+
# Recalculate stats from merged plays
|
| 501 |
+
start_methods: Dict[str, int] = {}
|
| 502 |
+
end_methods: Dict[str, int] = {}
|
| 503 |
+
play_types: Dict[str, int] = {}
|
| 504 |
+
for play in plays:
|
| 505 |
+
start_methods[play.start_method] = start_methods.get(play.start_method, 0) + 1
|
| 506 |
+
end_methods[play.end_method] = end_methods.get(play.end_method, 0) + 1
|
| 507 |
+
play_types[play.play_type] = play_types.get(play.play_type, 0) + 1
|
| 508 |
|
| 509 |
+
play_stats["total_plays"] = len(plays)
|
| 510 |
+
play_stats["start_methods"] = start_methods
|
| 511 |
+
play_stats["end_methods"] = end_methods
|
| 512 |
+
play_stats["play_types"] = play_types
|
| 513 |
|
| 514 |
result = DetectionResult(
|
| 515 |
video=Path(self.config.video_path).name,
|
|
|
|
| 524 |
)
|
| 525 |
|
| 526 |
# Log final summary
|
| 527 |
+
logger.info("Extraction complete!")
|
| 528 |
logger.info("Processed %d frames", stats["total_frames"])
|
| 529 |
logger.info("Frames with scorebug: %d (%.1f%%)", stats["frames_with_scorebug"], 100 * stats["frames_with_scorebug"] / max(1, stats["total_frames"]))
|
| 530 |
logger.info("Frames with clock: %d (%.1f%%)", stats["frames_with_clock"], 100 * stats["frames_with_clock"] / max(1, stats["total_frames"]))
|
| 531 |
+
logger.info("Plays extracted: %d", len(plays))
|
| 532 |
|
| 533 |
return result
|
| 534 |
|
| 535 |
def _log_timing_breakdown(self, timing: Dict[str, float]) -> None:
|
| 536 |
+
"""Log the timing breakdown for the extraction run."""
|
| 537 |
total_time = sum(timing.values())
|
| 538 |
logger.info("=" * 50)
|
| 539 |
logger.info("TIMING BREAKDOWN")
|
|
|
|
| 544 |
logger.info(" TOTAL: %.2fs", total_time)
|
| 545 |
logger.info("=" * 50)
|
| 546 |
|
| 547 |
+
def extract(self) -> DetectionResult:
|
| 548 |
"""
|
| 549 |
+
Run play extraction on the video segment.
|
| 550 |
|
| 551 |
Uses streaming processing for optimal performance:
|
| 552 |
- Pass 0 (if needed): Build digit templates using OCR on scorebug-verified frames
|
| 553 |
- Streaming pass: Read frame -> extract region -> template match -> state machine update
|
| 554 |
(threaded video I/O overlaps reading with processing)
|
| 555 |
+
- Finalize: Clock reset identification and result building
|
| 556 |
|
| 557 |
When fixed coordinates are provided, the scorebug detection step simply verifies
|
| 558 |
the scorebug is present at the known location (faster than searching).
|
| 559 |
|
| 560 |
Returns:
|
| 561 |
+
DetectionResult with all extracted plays
|
| 562 |
"""
|
| 563 |
+
logger.info("Starting play extraction...")
|
| 564 |
logger.info("Video: %s", self.config.video_path)
|
| 565 |
logger.info("Segment: %.1fs to %s", self.config.start_time, self.config.end_time or "end")
|
| 566 |
|
|
|
|
| 581 |
if not self.template_reader and self.template_builder:
|
| 582 |
success = self._pass0_build_templates_with_real_detection(timing)
|
| 583 |
if not success:
|
| 584 |
+
logger.warning("Pass 0 failed to build templates, extraction may fail or be inaccurate")
|
| 585 |
|
| 586 |
# Log mode info (after Pass 0 so we can show if templates were built)
|
| 587 |
+
self._log_extraction_mode()
|
| 588 |
|
| 589 |
# Initialize video and get processing context
|
| 590 |
context, stats, _ = self._open_video_and_get_context()
|
| 591 |
|
| 592 |
+
# Streaming extraction pass: read frames + template match + state machine (all in one)
|
| 593 |
# Uses threaded video I/O to overlap reading with processing
|
| 594 |
+
# Returns frame_data needed for post-hoc clock reset identification
|
| 595 |
+
frame_data = self._streaming_extraction_pass(context, stats, timing)
|
| 596 |
|
| 597 |
+
# Finalize: Post-hoc clock reset identification (Class A/B/C) and result building
|
| 598 |
+
return self._finalize_extraction(context, stats, timing, frame_data)
|
| 599 |
|
| 600 |
+
# pylint: disable=too-many-locals
|
| 601 |
+
def extract_parallel(self, num_workers: int = 2, output_dir: Optional[Path] = None) -> DetectionResult:
|
| 602 |
"""
|
| 603 |
+
Run play extraction using parallel chunk processing.
|
| 604 |
|
| 605 |
This provides ~26% speedup over sequential processing by using multiple
|
| 606 |
processes to read and process different segments of the video simultaneously.
|
|
|
|
| 610 |
2. Save templates to disk for worker processes to load
|
| 611 |
3. Parallel pass: Each worker processes a video chunk independently
|
| 612 |
4. Merge: Combine frame data from all chunks in chronological order
|
| 613 |
+
5. State machine: Process merged data to extract plays
|
| 614 |
|
| 615 |
Args:
|
| 616 |
num_workers: Number of parallel workers (default 2).
|
| 617 |
output_dir: Output directory for templates (required).
|
| 618 |
|
| 619 |
Returns:
|
| 620 |
+
DetectionResult with all extracted plays
|
| 621 |
"""
|
| 622 |
+
logger.info("Starting parallel play extraction (%d workers)...", num_workers)
|
|
|
|
|
|
|
| 623 |
logger.info("Video: %s", self.config.video_path)
|
| 624 |
logger.info("Segment: %.1fs to %s", self.config.start_time, self.config.end_time or "end")
|
| 625 |
|
|
|
|
| 637 |
if not self.template_reader and self.template_builder:
|
| 638 |
success = self._pass0_build_templates_with_real_detection(timing)
|
| 639 |
if not success:
|
| 640 |
+
logger.warning("Pass 0 failed to build templates, extraction may fail or be inaccurate")
|
| 641 |
|
| 642 |
# Save templates to disk for worker processes
|
| 643 |
template_path = None
|
|
|
|
| 664 |
|
| 665 |
# Run parallel processing
|
| 666 |
logger.info("Starting parallel frame extraction...")
|
| 667 |
+
t_parallel_start = time.perf_counter()
|
| 668 |
|
| 669 |
+
# Create parallel processing config
|
| 670 |
+
# Asserts: validated by _validate_config, parallel mode requires fixed coords
|
| 671 |
+
assert self.config.fixed_playclock_coords is not None
|
| 672 |
+
assert self.config.fixed_scorebug_coords is not None
|
| 673 |
+
parallel_config = ParallelProcessingConfig(
|
| 674 |
video_path=self.config.video_path,
|
| 675 |
start_time=self.config.start_time,
|
| 676 |
end_time=end_time,
|
|
|
|
| 679 |
fixed_scorebug_coords=self.config.fixed_scorebug_coords,
|
| 680 |
template_library_path=str(template_path) if template_path else None,
|
| 681 |
timeout_config_path=timeout_config_path,
|
|
|
|
| 682 |
)
|
| 683 |
|
| 684 |
+
frame_data, stats, io_time = process_video_parallel(parallel_config, num_workers=num_workers)
|
| 685 |
+
|
| 686 |
timing["video_io"] = io_time
|
| 687 |
# Estimate template matching time from parallel processing
|
| 688 |
+
parallel_time = time.perf_counter() - t_parallel_start
|
| 689 |
timing["template_matching"] = max(0, parallel_time - io_time - timing["template_building"])
|
| 690 |
|
| 691 |
logger.info("Parallel processing complete: %d frames", stats["total_frames"])
|
|
|
|
| 704 |
)
|
| 705 |
|
| 706 |
# Run state machine on merged frame data
|
| 707 |
+
t_sm_start = time.perf_counter()
|
| 708 |
for frame in frame_data:
|
| 709 |
# Create proper objects for state machine
|
| 710 |
scorebug = ScorebugDetection(
|
|
|
|
| 718 |
confidence=1.0 if frame.get("clock_detected") else 0.0,
|
| 719 |
raw_text=f"PARALLEL_{frame.get('clock_value')}" if frame.get("clock_detected") else "PARALLEL_FAILED",
|
| 720 |
)
|
| 721 |
+
# Create timeout info for clock reset classification
|
| 722 |
+
timeout_info = None
|
| 723 |
+
if frame.get("home_timeouts") is not None or frame.get("away_timeouts") is not None:
|
| 724 |
+
timeout_info = TimeoutInfo(
|
| 725 |
+
home_timeouts=frame.get("home_timeouts"),
|
| 726 |
+
away_timeouts=frame.get("away_timeouts"),
|
| 727 |
+
)
|
| 728 |
+
self.state_machine.update(frame["timestamp"], scorebug, clock_reading, timeout_info)
|
| 729 |
+
timing["state_machine"] = time.perf_counter() - t_sm_start
|
| 730 |
|
| 731 |
# Update stats dict
|
| 732 |
stats_dict = {
|
|
|
|
| 735 |
"frames_with_clock": stats["frames_with_clock"],
|
| 736 |
}
|
| 737 |
|
| 738 |
+
# Finalize: Post-hoc clock reset identification (Class A/B/C) and result building
|
| 739 |
+
return self._finalize_extraction(context, stats_dict, timing, frame_data)
|
| 740 |
|
| 741 |
+
def _log_extraction_mode(self) -> None:
|
| 742 |
+
"""Log the extraction mode being used."""
|
| 743 |
+
use_fixed_region = self.scorebug_detector and self.scorebug_detector.is_fixed_region_mode
|
| 744 |
|
| 745 |
if use_fixed_region:
|
| 746 |
logger.info("Mode: Fixed region (scorebug location pre-configured)")
|
|
|
|
| 755 |
else:
|
| 756 |
logger.info(" Will build templates using fallback method")
|
| 757 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 758 |
def _play_to_dict(self, play: PlayEvent) -> Dict[str, Any]:
|
| 759 |
"""Convert PlayEvent to dictionary for JSON serialization."""
|
| 760 |
return {
|
|
|
|
| 773 |
|
| 774 |
def save_results(self, result: DetectionResult, output_path: str) -> None:
|
| 775 |
"""
|
| 776 |
+
Save extraction results to a JSON file.
|
| 777 |
|
| 778 |
Args:
|
| 779 |
+
result: Extraction results
|
| 780 |
output_path: Path to output file
|
| 781 |
"""
|
| 782 |
output = Path(output_path)
|
| 783 |
output.parent.mkdir(parents=True, exist_ok=True)
|
| 784 |
|
| 785 |
+
data = format_extraction_result_dict(result)
|
| 786 |
|
| 787 |
# Include configuration if provided (for reproducibility)
|
| 788 |
if result.config:
|
src/pipeline/template_builder_pass.py
ADDED
|
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Template building pass (Pass 0) for the play detection pipeline.
|
| 3 |
+
|
| 4 |
+
This module handles the initial phase of building digit templates by scanning
|
| 5 |
+
the video for frames with scorebugs and running OCR on the play clock region.
|
| 6 |
+
These templates are then used for fast template matching in subsequent passes.
|
| 7 |
+
|
| 8 |
+
Note: This module uses functions instead of a class because there is only one
|
| 9 |
+
public entry point (run_template_building_pass). The class pattern was causing
|
| 10 |
+
"too few public methods" warnings.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import logging
|
| 14 |
+
import time
|
| 15 |
+
from functools import lru_cache
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
from typing import Any, Optional, Tuple
|
| 18 |
+
|
| 19 |
+
import cv2
|
| 20 |
+
import easyocr
|
| 21 |
+
import numpy as np
|
| 22 |
+
|
| 23 |
+
from detection import DetectScoreBug
|
| 24 |
+
from readers import ReadPlayClock
|
| 25 |
+
from setup import DigitTemplateBuilder, DigitTemplateLibrary, PlayClockRegionExtractor
|
| 26 |
+
from .models import DetectionConfig
|
| 27 |
+
|
| 28 |
+
logger = logging.getLogger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@lru_cache(maxsize=1)
|
| 32 |
+
def get_easyocr_reader() -> easyocr.Reader:
|
| 33 |
+
"""Get or create the EasyOCR reader instance (lazily loaded, cached)."""
|
| 34 |
+
logger.info("Initializing EasyOCR reader for template building...")
|
| 35 |
+
reader = easyocr.Reader(["en"], gpu=False, verbose=False)
|
| 36 |
+
logger.info("EasyOCR reader initialized")
|
| 37 |
+
return reader
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _validate_config(config: DetectionConfig) -> bool:
|
| 41 |
+
"""Validate that required configuration is present for template building."""
|
| 42 |
+
if not config.template_path:
|
| 43 |
+
logger.warning("Pass 0: No scorebug template path provided, cannot detect scorebugs")
|
| 44 |
+
return False
|
| 45 |
+
|
| 46 |
+
template_path = Path(config.template_path)
|
| 47 |
+
if not template_path.exists():
|
| 48 |
+
logger.warning("Pass 0: Scorebug template not found at %s", template_path)
|
| 49 |
+
return False
|
| 50 |
+
|
| 51 |
+
if not config.fixed_scorebug_coords:
|
| 52 |
+
logger.warning("Pass 0: No fixed scorebug coordinates provided")
|
| 53 |
+
return False
|
| 54 |
+
|
| 55 |
+
return True
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _process_scorebug_frame(
|
| 59 |
+
frame: np.ndarray[Any, Any],
|
| 60 |
+
scorebug_bbox: Tuple[int, int, int, int],
|
| 61 |
+
current_time: float,
|
| 62 |
+
clock_reader: PlayClockRegionExtractor,
|
| 63 |
+
template_builder: DigitTemplateBuilder,
|
| 64 |
+
reader: easyocr.Reader,
|
| 65 |
+
) -> int:
|
| 66 |
+
"""
|
| 67 |
+
Process a frame with detected scorebug and extract play clock via OCR.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
frame: Video frame
|
| 71 |
+
scorebug_bbox: Scorebug bounding box (x, y, w, h)
|
| 72 |
+
current_time: Current timestamp
|
| 73 |
+
clock_reader: Play clock region extractor
|
| 74 |
+
template_builder: Digit template builder
|
| 75 |
+
reader: EasyOCR reader
|
| 76 |
+
|
| 77 |
+
Returns:
|
| 78 |
+
1 if a valid sample was collected, 0 otherwise
|
| 79 |
+
"""
|
| 80 |
+
# Extract play clock region
|
| 81 |
+
play_clock_region = clock_reader.extract_region(frame, scorebug_bbox)
|
| 82 |
+
if play_clock_region is None:
|
| 83 |
+
return 0
|
| 84 |
+
|
| 85 |
+
# Preprocess and run OCR
|
| 86 |
+
preprocessed = clock_reader.preprocess_for_ocr(play_clock_region)
|
| 87 |
+
|
| 88 |
+
try:
|
| 89 |
+
ocr_results = reader.readtext(preprocessed, allowlist="0123456789", detail=1)
|
| 90 |
+
if not ocr_results:
|
| 91 |
+
return 0
|
| 92 |
+
|
| 93 |
+
best = max(ocr_results, key=lambda x: x[2])
|
| 94 |
+
text, confidence = best[1].strip(), best[2]
|
| 95 |
+
|
| 96 |
+
# Parse and validate
|
| 97 |
+
if text and confidence >= 0.5:
|
| 98 |
+
try:
|
| 99 |
+
value = int(text)
|
| 100 |
+
if 0 <= value <= 40:
|
| 101 |
+
template_builder.add_sample(play_clock_region, value, current_time, confidence)
|
| 102 |
+
return 1
|
| 103 |
+
except ValueError:
|
| 104 |
+
pass # Invalid text, skip
|
| 105 |
+
|
| 106 |
+
except Exception as e: # pylint: disable=broad-except
|
| 107 |
+
logger.debug("Pass 0: OCR error at %.1fs: %s", current_time, e)
|
| 108 |
+
|
| 109 |
+
return 0
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def _scan_video(
|
| 113 |
+
config: DetectionConfig,
|
| 114 |
+
clock_reader: PlayClockRegionExtractor,
|
| 115 |
+
template_builder: DigitTemplateBuilder,
|
| 116 |
+
temp_detector: DetectScoreBug,
|
| 117 |
+
reader: easyocr.Reader,
|
| 118 |
+
min_samples: int,
|
| 119 |
+
max_scan_frames: int,
|
| 120 |
+
target_coverage: float,
|
| 121 |
+
) -> Tuple[int, int, int]:
|
| 122 |
+
"""
|
| 123 |
+
Scan video frames and collect OCR samples for template building.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
config: Detection configuration
|
| 127 |
+
clock_reader: Play clock region extractor
|
| 128 |
+
template_builder: Digit template builder
|
| 129 |
+
temp_detector: Scorebug detector to use
|
| 130 |
+
reader: EasyOCR reader instance
|
| 131 |
+
min_samples: Minimum valid OCR samples to collect
|
| 132 |
+
max_scan_frames: Maximum frames to scan
|
| 133 |
+
target_coverage: Target template coverage (0.0-1.0)
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
Tuple of (valid_samples, frames_scanned, frames_with_scorebug)
|
| 137 |
+
"""
|
| 138 |
+
# Open video
|
| 139 |
+
cap = cv2.VideoCapture(config.video_path)
|
| 140 |
+
if not cap.isOpened():
|
| 141 |
+
logger.error("Pass 0: Could not open video")
|
| 142 |
+
return 0, 0, 0
|
| 143 |
+
|
| 144 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 145 |
+
frame_skip = int(config.frame_interval * fps)
|
| 146 |
+
|
| 147 |
+
# Initialize counters
|
| 148 |
+
valid_samples = 0
|
| 149 |
+
frames_scanned = 0
|
| 150 |
+
frames_with_scorebug = 0
|
| 151 |
+
current_frame = 0
|
| 152 |
+
|
| 153 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
|
| 154 |
+
logger.info(" Scanning from frame 0 (0.0s)...")
|
| 155 |
+
|
| 156 |
+
try:
|
| 157 |
+
while frames_scanned < max_scan_frames:
|
| 158 |
+
ret, frame = cap.read()
|
| 159 |
+
if not ret:
|
| 160 |
+
break
|
| 161 |
+
|
| 162 |
+
current_time = current_frame / fps
|
| 163 |
+
frames_scanned += 1
|
| 164 |
+
|
| 165 |
+
# Detect scorebug using template matching
|
| 166 |
+
scorebug_result = temp_detector.detect(frame)
|
| 167 |
+
|
| 168 |
+
if scorebug_result.detected:
|
| 169 |
+
frames_with_scorebug += 1
|
| 170 |
+
assert scorebug_result.bbox is not None # detected implies bbox is set
|
| 171 |
+
valid_samples += _process_scorebug_frame(frame, scorebug_result.bbox, current_time, clock_reader, template_builder, reader)
|
| 172 |
+
|
| 173 |
+
# Progress logging every 200 frames
|
| 174 |
+
if frames_scanned % 200 == 0:
|
| 175 |
+
coverage = template_builder.get_coverage_estimate()
|
| 176 |
+
logger.info(
|
| 177 |
+
" Pass 0 progress: %d frames scanned, %d with scorebug, %d valid samples, ~%.0f%% coverage",
|
| 178 |
+
frames_scanned,
|
| 179 |
+
frames_with_scorebug,
|
| 180 |
+
valid_samples,
|
| 181 |
+
coverage * 100,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
# Check completion criteria
|
| 185 |
+
if valid_samples >= min_samples or coverage >= target_coverage:
|
| 186 |
+
logger.info(" Completion criteria met!")
|
| 187 |
+
break
|
| 188 |
+
|
| 189 |
+
# Skip frames
|
| 190 |
+
for _ in range(frame_skip - 1):
|
| 191 |
+
cap.grab()
|
| 192 |
+
current_frame += frame_skip
|
| 193 |
+
|
| 194 |
+
finally:
|
| 195 |
+
cap.release()
|
| 196 |
+
|
| 197 |
+
return valid_samples, frames_scanned, frames_with_scorebug
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# pylint: disable=too-many-locals
|
| 201 |
+
def run_template_building_pass(
|
| 202 |
+
config: DetectionConfig,
|
| 203 |
+
clock_reader: PlayClockRegionExtractor,
|
| 204 |
+
template_builder: DigitTemplateBuilder,
|
| 205 |
+
min_samples: int = 200,
|
| 206 |
+
max_scan_frames: int = 2000,
|
| 207 |
+
target_coverage: float = 0.70,
|
| 208 |
+
) -> Tuple[Optional[DigitTemplateLibrary], Optional[ReadPlayClock], float]:
|
| 209 |
+
"""
|
| 210 |
+
Run the template building pass (Pass 0).
|
| 211 |
+
|
| 212 |
+
This pass runs BEFORE the main detection loop. It:
|
| 213 |
+
1. Uses the scorebug template (created during user setup) for detection
|
| 214 |
+
2. Scans through video until finding frames with actual scorebugs
|
| 215 |
+
3. Runs OCR on ONLY frames where scorebug is detected
|
| 216 |
+
4. Builds templates from samples with high-confidence OCR results
|
| 217 |
+
|
| 218 |
+
This solves the problem of building templates from pre-game content that
|
| 219 |
+
has no scorebug, which causes all subsequent template matching to fail.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
config: Detection configuration
|
| 223 |
+
clock_reader: Play clock region extractor
|
| 224 |
+
template_builder: Digit template builder
|
| 225 |
+
min_samples: Minimum valid OCR samples to collect (default: 200)
|
| 226 |
+
max_scan_frames: Maximum frames to scan (default: 2000)
|
| 227 |
+
target_coverage: Target template coverage 0.0-1.0 (default: 0.70)
|
| 228 |
+
|
| 229 |
+
Returns:
|
| 230 |
+
Tuple of (template_library, template_reader, build_time).
|
| 231 |
+
template_library and template_reader may be None if building failed.
|
| 232 |
+
"""
|
| 233 |
+
logger.info("Pass 0: Building templates using scorebug template detection...")
|
| 234 |
+
logger.info(" Target: %d samples OR %.0f%% template coverage", min_samples, target_coverage * 100)
|
| 235 |
+
|
| 236 |
+
t_build_start = time.perf_counter()
|
| 237 |
+
|
| 238 |
+
# Validate configuration
|
| 239 |
+
if not _validate_config(config):
|
| 240 |
+
return None, None, time.perf_counter() - t_build_start
|
| 241 |
+
|
| 242 |
+
# Assert: _validate_config guarantees fixed_scorebug_coords is set
|
| 243 |
+
assert config.fixed_scorebug_coords is not None
|
| 244 |
+
|
| 245 |
+
# Create temporary scorebug detector for Pass 0
|
| 246 |
+
sb_x, sb_y, sb_w, sb_h = config.fixed_scorebug_coords
|
| 247 |
+
logger.info(" Scorebug region: (%d, %d, %d, %d)", sb_x, sb_y, sb_w, sb_h)
|
| 248 |
+
logger.info(" Scorebug template: %s", config.template_path)
|
| 249 |
+
|
| 250 |
+
temp_detector = DetectScoreBug(
|
| 251 |
+
template_path=str(config.template_path),
|
| 252 |
+
fixed_region=(sb_x, sb_y, sb_w, sb_h),
|
| 253 |
+
use_split_detection=config.use_split_detection,
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
# Get EasyOCR reader for labeling
|
| 257 |
+
reader = get_easyocr_reader()
|
| 258 |
+
|
| 259 |
+
# Scan video and collect samples
|
| 260 |
+
valid_samples, frames_scanned, frames_with_scorebug = _scan_video(config, clock_reader, template_builder, temp_detector, reader, min_samples, max_scan_frames, target_coverage)
|
| 261 |
+
|
| 262 |
+
# Log scan results
|
| 263 |
+
logger.info(
|
| 264 |
+
"Pass 0 scan complete: %d frames, %d with scorebug (%.1f%%), %d valid samples",
|
| 265 |
+
frames_scanned,
|
| 266 |
+
frames_with_scorebug,
|
| 267 |
+
100 * frames_with_scorebug / max(1, frames_scanned),
|
| 268 |
+
valid_samples,
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
# Check if we have enough samples
|
| 272 |
+
if valid_samples < 50:
|
| 273 |
+
logger.warning("Pass 0: Insufficient samples (%d < 50), template building may fail", valid_samples)
|
| 274 |
+
if frames_with_scorebug == 0:
|
| 275 |
+
logger.error("Pass 0: No scorebugs detected! Check that the scorebug template matches the video.")
|
| 276 |
+
return None, None, time.perf_counter() - t_build_start
|
| 277 |
+
|
| 278 |
+
# Build templates
|
| 279 |
+
template_library = template_builder.build_templates(min_samples=2)
|
| 280 |
+
coverage = template_library.get_coverage_status()
|
| 281 |
+
logger.info(
|
| 282 |
+
"Pass 0 templates built: %d/%d (%.1f%%) coverage",
|
| 283 |
+
coverage["total_have"],
|
| 284 |
+
coverage["total_needed"],
|
| 285 |
+
100 * coverage["total_have"] / coverage["total_needed"],
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
# Create template reader
|
| 289 |
+
region_w = clock_reader.config.width if clock_reader.config else 50
|
| 290 |
+
region_h = clock_reader.config.height if clock_reader.config else 28
|
| 291 |
+
template_reader = ReadPlayClock(template_library, region_w, region_h)
|
| 292 |
+
|
| 293 |
+
build_time = time.perf_counter() - t_build_start
|
| 294 |
+
logger.info("Pass 0 complete: Template building took %.2fs", build_time)
|
| 295 |
+
|
| 296 |
+
return template_library, template_reader, build_time
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
# Keep the class for backward compatibility, but it now delegates to the function
|
| 300 |
+
class TemplateBuildingPass: # pylint: disable=too-few-public-methods
|
| 301 |
+
"""
|
| 302 |
+
Backward-compatible class wrapper for run_template_building_pass.
|
| 303 |
+
|
| 304 |
+
Deprecated: Use run_template_building_pass() function directly instead.
|
| 305 |
+
This class exists only for backward compatibility with existing code.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
def __init__(
|
| 309 |
+
self,
|
| 310 |
+
config: DetectionConfig,
|
| 311 |
+
clock_reader: PlayClockRegionExtractor,
|
| 312 |
+
template_builder: DigitTemplateBuilder,
|
| 313 |
+
min_samples: int = 200,
|
| 314 |
+
max_scan_frames: int = 2000,
|
| 315 |
+
target_coverage: float = 0.70,
|
| 316 |
+
):
|
| 317 |
+
"""Initialize the template building pass."""
|
| 318 |
+
self.config = config
|
| 319 |
+
self.clock_reader = clock_reader
|
| 320 |
+
self.template_builder = template_builder
|
| 321 |
+
self.min_samples = min_samples
|
| 322 |
+
self.max_scan_frames = max_scan_frames
|
| 323 |
+
self.target_coverage = target_coverage
|
| 324 |
+
|
| 325 |
+
def run(self) -> Tuple[Optional[DigitTemplateLibrary], Optional[ReadPlayClock], float]:
|
| 326 |
+
"""Run the template building pass by delegating to the function."""
|
| 327 |
+
return run_template_building_pass(
|
| 328 |
+
self.config,
|
| 329 |
+
self.clock_reader,
|
| 330 |
+
self.template_builder,
|
| 331 |
+
self.min_samples,
|
| 332 |
+
self.max_scan_frames,
|
| 333 |
+
self.target_coverage,
|
| 334 |
+
)
|
src/readers/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Reader modules for extracting values from video frame regions.
|
| 2 |
+
|
| 3 |
+
This package contains components that read values from detected regions:
|
| 4 |
+
- Play clock digit values (via template matching)
|
| 5 |
+
- Future: expanded timeout value reading
|
| 6 |
+
|
| 7 |
+
Note: detect_red_digits and normalize_to_grayscale are now in utils.color
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from .models import (
|
| 11 |
+
PlayClockReading,
|
| 12 |
+
TemplateMatchResult,
|
| 13 |
+
TemplatePlayClockReading,
|
| 14 |
+
)
|
| 15 |
+
from .playclock import (
|
| 16 |
+
ReadPlayClock,
|
| 17 |
+
backfill_missing_readings,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
# Models
|
| 22 |
+
"PlayClockReading",
|
| 23 |
+
"TemplateMatchResult",
|
| 24 |
+
"TemplatePlayClockReading",
|
| 25 |
+
# Play clock reading
|
| 26 |
+
"ReadPlayClock",
|
| 27 |
+
"backfill_missing_readings",
|
| 28 |
+
]
|
src/readers/models.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Pydantic models for value reading results.
|
| 3 |
+
|
| 4 |
+
These models represent the outputs of reading values from detected regions:
|
| 5 |
+
play clock readings, template match results, etc.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import Optional
|
| 9 |
+
|
| 10 |
+
from pydantic import BaseModel, Field
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class PlayClockReading(BaseModel):
|
| 14 |
+
"""Result from play clock reading (used by both OCR and template-based methods)."""
|
| 15 |
+
|
| 16 |
+
detected: bool = Field(..., description="Whether a valid play clock value was detected")
|
| 17 |
+
value: Optional[int] = Field(..., description="Play clock value (0-40 seconds), None if unreadable")
|
| 18 |
+
confidence: float = Field(..., description="Confidence score (0.0 to 1.0)")
|
| 19 |
+
raw_text: str = Field(..., description="Source identifier for debugging (e.g., 'TEMPLATE_40', 'BACKFILLED_25', 'NO_SCOREBUG')")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class TemplateMatchResult(BaseModel):
|
| 23 |
+
"""Result from template matching for a single digit."""
|
| 24 |
+
|
| 25 |
+
digit_value: int = Field(..., description="Matched digit value (-1 for blank, 0-9 for digits)")
|
| 26 |
+
confidence: float = Field(..., description="Match confidence (0.0 to 1.0)")
|
| 27 |
+
is_valid: bool = Field(..., description="Whether match confidence exceeds threshold")
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class TemplatePlayClockReading(BaseModel):
|
| 31 |
+
"""Result from template-based play clock reading."""
|
| 32 |
+
|
| 33 |
+
detected: bool = Field(..., description="Whether a valid clock value was read")
|
| 34 |
+
value: Optional[int] = Field(..., description="Clock value (0-40), None if unreadable")
|
| 35 |
+
confidence: float = Field(..., description="Overall confidence score")
|
| 36 |
+
tens_match: Optional[TemplateMatchResult] = Field(..., description="Tens digit match result")
|
| 37 |
+
ones_match: Optional[TemplateMatchResult] = Field(..., description="Ones digit match result")
|
| 38 |
+
method: str = Field(..., description="'template_single', 'template_double', or 'template'")
|
src/readers/playclock.py
ADDED
|
@@ -0,0 +1,543 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Play clock reading via template matching.
|
| 3 |
+
|
| 4 |
+
This module provides:
|
| 5 |
+
- ReadPlayClock: Fast template-based clock value reading
|
| 6 |
+
- backfill_missing_readings for gap interpolation
|
| 7 |
+
|
| 8 |
+
Color normalization utilities (detect_red_digits, normalize_to_grayscale) are
|
| 9 |
+
now shared from utils.color to eliminate code duplication.
|
| 10 |
+
|
| 11 |
+
Performance comparison (from ocr_benchmark.md):
|
| 12 |
+
- EasyOCR: 48.9ms/frame
|
| 13 |
+
- Template Matching: 0.3ms/frame (163x faster)
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import logging
|
| 17 |
+
from typing import Any, List, Tuple
|
| 18 |
+
|
| 19 |
+
import cv2
|
| 20 |
+
import numpy as np
|
| 21 |
+
|
| 22 |
+
from setup import DigitTemplate, DigitTemplateLibrary
|
| 23 |
+
from utils import extract_center_region, extract_far_left_region, extract_left_region, extract_right_region, preprocess_playclock_region
|
| 24 |
+
from .models import PlayClockReading, TemplateMatchResult, TemplatePlayClockReading
|
| 25 |
+
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# =============================================================================
|
| 30 |
+
# Template-Based Play Clock Reader
|
| 31 |
+
# =============================================================================
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class ReadPlayClock:
|
| 35 |
+
"""
|
| 36 |
+
Reads play clock values using template matching with dual-mode detection.
|
| 37 |
+
|
| 38 |
+
Uses digit templates built from OCR-labeled samples to achieve
|
| 39 |
+
lightning-fast digit recognition (~0.3ms/frame vs ~49ms for OCR).
|
| 40 |
+
|
| 41 |
+
Implements dual-mode matching to handle both display layouts:
|
| 42 |
+
- Single-digit (0-9): Digit is CENTER-aligned
|
| 43 |
+
- Double-digit (10-40): Tens on LEFT, ones on RIGHT
|
| 44 |
+
|
| 45 |
+
The reader tries both interpretations and picks the best match.
|
| 46 |
+
|
| 47 |
+
Handles slight translational shifts via template matching search window.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
# Confidence thresholds
|
| 51 |
+
MIN_DIGIT_CONFIDENCE = 0.6 # Minimum confidence to accept a digit match
|
| 52 |
+
MIN_CLOCK_CONFIDENCE = 0.5 # Minimum overall confidence to return a reading
|
| 53 |
+
|
| 54 |
+
def __init__(self, template_library: DigitTemplateLibrary, region_width: int = 50, region_height: int = 28):
|
| 55 |
+
"""
|
| 56 |
+
Initialize the template-based clock reader.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
template_library: Pre-built digit template library
|
| 60 |
+
region_width: Play clock region width
|
| 61 |
+
region_height: Play clock region height
|
| 62 |
+
"""
|
| 63 |
+
self.library = template_library
|
| 64 |
+
self.region_width = region_width
|
| 65 |
+
self.region_height = region_height
|
| 66 |
+
self.scale_factor = 4 # Must match the scale used in template building
|
| 67 |
+
|
| 68 |
+
logger.info("ReadPlayClock initialized")
|
| 69 |
+
|
| 70 |
+
def preprocess_region(self, region: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
|
| 71 |
+
"""
|
| 72 |
+
Preprocess play clock region for template matching.
|
| 73 |
+
|
| 74 |
+
Delegates to shared utility function in utils.regions.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
region: Play clock region (BGR format)
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
Preprocessed binary image (scaled up)
|
| 81 |
+
"""
|
| 82 |
+
return preprocess_playclock_region(region, self.scale_factor)
|
| 83 |
+
|
| 84 |
+
def match_digit(self, region: np.ndarray[Any, Any], templates: List[DigitTemplate]) -> TemplateMatchResult:
|
| 85 |
+
"""
|
| 86 |
+
Match a region against a set of digit templates.
|
| 87 |
+
|
| 88 |
+
Uses template matching with a small search window for robustness
|
| 89 |
+
to slight translational shifts.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
region: Preprocessed digit region
|
| 93 |
+
templates: List of templates to match against
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
TemplateMatchResult with best match
|
| 97 |
+
"""
|
| 98 |
+
if not templates:
|
| 99 |
+
return TemplateMatchResult(digit_value=-1, confidence=0.0, is_valid=False)
|
| 100 |
+
|
| 101 |
+
best_digit = -1
|
| 102 |
+
best_confidence = -1.0
|
| 103 |
+
|
| 104 |
+
for template in templates:
|
| 105 |
+
tmpl = template.template
|
| 106 |
+
|
| 107 |
+
# Ensure template fits within region (with search window)
|
| 108 |
+
if tmpl.shape[0] > region.shape[0] or tmpl.shape[1] > region.shape[1]:
|
| 109 |
+
# Resize template to fit
|
| 110 |
+
scale = min(region.shape[0] / tmpl.shape[0], region.shape[1] / tmpl.shape[1]) * 0.9
|
| 111 |
+
new_h = int(tmpl.shape[0] * scale)
|
| 112 |
+
new_w = int(tmpl.shape[1] * scale)
|
| 113 |
+
tmpl = cv2.resize(tmpl, (new_w, new_h))
|
| 114 |
+
|
| 115 |
+
# Template matching with search window
|
| 116 |
+
result = cv2.matchTemplate(region, tmpl, cv2.TM_CCOEFF_NORMED)
|
| 117 |
+
_, max_val, _, _ = cv2.minMaxLoc(result)
|
| 118 |
+
|
| 119 |
+
if max_val > best_confidence:
|
| 120 |
+
best_confidence = max_val
|
| 121 |
+
best_digit = template.digit_value
|
| 122 |
+
|
| 123 |
+
is_valid = best_confidence >= self.MIN_DIGIT_CONFIDENCE
|
| 124 |
+
|
| 125 |
+
return TemplateMatchResult(digit_value=best_digit, confidence=best_confidence, is_valid=is_valid)
|
| 126 |
+
|
| 127 |
+
def _try_double_digit(self, preprocessed: np.ndarray[Any, Any]) -> TemplatePlayClockReading:
|
| 128 |
+
"""
|
| 129 |
+
Try to read as double-digit display (10-40): tens on left, ones on right.
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
preprocessed: Preprocessed play clock region
|
| 133 |
+
|
| 134 |
+
Returns:
|
| 135 |
+
TemplatePlayClockReading with result of double-digit interpretation
|
| 136 |
+
"""
|
| 137 |
+
# Get templates for double-digit positions
|
| 138 |
+
# Filter out blank (-1) since it was built from far-left region (different size)
|
| 139 |
+
tens_templates = [t for t in self.library.get_all_templates(is_tens=True, position="left") if t.digit_value != -1]
|
| 140 |
+
ones_templates = self.library.get_all_templates(is_tens=False, position="right")
|
| 141 |
+
|
| 142 |
+
if not ones_templates or not tens_templates:
|
| 143 |
+
return TemplatePlayClockReading(
|
| 144 |
+
detected=False,
|
| 145 |
+
value=None,
|
| 146 |
+
confidence=0.0,
|
| 147 |
+
tens_match=None,
|
| 148 |
+
ones_match=None,
|
| 149 |
+
method="template_double",
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
# Extract regions using shared utility functions
|
| 153 |
+
tens_region = extract_left_region(preprocessed)
|
| 154 |
+
ones_region = extract_right_region(preprocessed)
|
| 155 |
+
|
| 156 |
+
# Match digits
|
| 157 |
+
tens_match = self.match_digit(tens_region, tens_templates)
|
| 158 |
+
ones_match = self.match_digit(ones_region, ones_templates)
|
| 159 |
+
|
| 160 |
+
# Require valid ones match and tens match
|
| 161 |
+
if not ones_match.is_valid or not tens_match.is_valid:
|
| 162 |
+
return TemplatePlayClockReading(
|
| 163 |
+
detected=False,
|
| 164 |
+
value=None,
|
| 165 |
+
confidence=min(tens_match.confidence, ones_match.confidence),
|
| 166 |
+
tens_match=tens_match,
|
| 167 |
+
ones_match=ones_match,
|
| 168 |
+
method="template_double",
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
# Calculate clock value
|
| 172 |
+
clock_value = tens_match.digit_value * 10 + ones_match.digit_value
|
| 173 |
+
|
| 174 |
+
# Validate range (10-40)
|
| 175 |
+
if clock_value < 10 or clock_value > 40:
|
| 176 |
+
return TemplatePlayClockReading(
|
| 177 |
+
detected=False,
|
| 178 |
+
value=None,
|
| 179 |
+
confidence=0.0,
|
| 180 |
+
tens_match=tens_match,
|
| 181 |
+
ones_match=ones_match,
|
| 182 |
+
method="template_double",
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
overall_confidence = (tens_match.confidence + ones_match.confidence) / 2
|
| 186 |
+
detected = overall_confidence >= self.MIN_CLOCK_CONFIDENCE
|
| 187 |
+
|
| 188 |
+
return TemplatePlayClockReading(
|
| 189 |
+
detected=detected,
|
| 190 |
+
value=clock_value if detected else None,
|
| 191 |
+
confidence=overall_confidence,
|
| 192 |
+
tens_match=tens_match,
|
| 193 |
+
ones_match=ones_match,
|
| 194 |
+
method="template_double",
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
def _try_single_digit(self, preprocessed: np.ndarray[Any, Any]) -> TemplatePlayClockReading:
|
| 198 |
+
"""
|
| 199 |
+
Try to read as single-digit display (0-9): digit is centered.
|
| 200 |
+
|
| 201 |
+
Args:
|
| 202 |
+
preprocessed: Preprocessed play clock region
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
TemplatePlayClockReading with result of single-digit interpretation
|
| 206 |
+
"""
|
| 207 |
+
# Get templates for single-digit (centered) position
|
| 208 |
+
ones_templates = self.library.get_all_templates(is_tens=False, position="center")
|
| 209 |
+
blank_templates = [t for t in self.library.get_all_templates(is_tens=True, position="left") if t.digit_value == -1]
|
| 210 |
+
|
| 211 |
+
if not ones_templates:
|
| 212 |
+
return TemplatePlayClockReading(
|
| 213 |
+
detected=False,
|
| 214 |
+
value=None,
|
| 215 |
+
confidence=0.0,
|
| 216 |
+
tens_match=None,
|
| 217 |
+
ones_match=None,
|
| 218 |
+
method="template_single",
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
# Extract regions using shared utility functions
|
| 222 |
+
center_region = extract_center_region(preprocessed)
|
| 223 |
+
far_left_region = extract_far_left_region(preprocessed)
|
| 224 |
+
|
| 225 |
+
# Match center digit (ones)
|
| 226 |
+
ones_match = self.match_digit(center_region, ones_templates)
|
| 227 |
+
|
| 228 |
+
# Optionally check that far-left region looks blank
|
| 229 |
+
blank_match = None
|
| 230 |
+
if blank_templates:
|
| 231 |
+
blank_match = self.match_digit(far_left_region, blank_templates)
|
| 232 |
+
|
| 233 |
+
# Require valid ones match
|
| 234 |
+
if not ones_match.is_valid:
|
| 235 |
+
return TemplatePlayClockReading(
|
| 236 |
+
detected=False,
|
| 237 |
+
value=None,
|
| 238 |
+
confidence=ones_match.confidence,
|
| 239 |
+
tens_match=blank_match,
|
| 240 |
+
ones_match=ones_match,
|
| 241 |
+
method="template_single",
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
# Clock value is just the ones digit (0-9)
|
| 245 |
+
clock_value = ones_match.digit_value
|
| 246 |
+
|
| 247 |
+
# Validate range (0-9)
|
| 248 |
+
if clock_value < 0 or clock_value > 9:
|
| 249 |
+
return TemplatePlayClockReading(
|
| 250 |
+
detected=False,
|
| 251 |
+
value=None,
|
| 252 |
+
confidence=0.0,
|
| 253 |
+
tens_match=blank_match,
|
| 254 |
+
ones_match=ones_match,
|
| 255 |
+
method="template_single",
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
# Use only ones confidence for single-digit
|
| 259 |
+
overall_confidence = ones_match.confidence
|
| 260 |
+
detected = overall_confidence >= self.MIN_CLOCK_CONFIDENCE
|
| 261 |
+
|
| 262 |
+
return TemplatePlayClockReading(
|
| 263 |
+
detected=detected,
|
| 264 |
+
value=clock_value if detected else None,
|
| 265 |
+
confidence=overall_confidence,
|
| 266 |
+
tens_match=blank_match,
|
| 267 |
+
ones_match=ones_match,
|
| 268 |
+
method="template_single",
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
def read(self, region: np.ndarray[Any, Any]) -> TemplatePlayClockReading:
|
| 272 |
+
"""
|
| 273 |
+
Read the play clock value from a region using dual-mode template matching.
|
| 274 |
+
|
| 275 |
+
Tries both single-digit (centered) and double-digit (left/right) interpretations
|
| 276 |
+
and returns the result with higher confidence.
|
| 277 |
+
|
| 278 |
+
Args:
|
| 279 |
+
region: Play clock region (BGR format, original size ~50x28)
|
| 280 |
+
|
| 281 |
+
Returns:
|
| 282 |
+
TemplatePlayClockReading with detected value or error state
|
| 283 |
+
"""
|
| 284 |
+
# Preprocess the region (handles red-to-white conversion)
|
| 285 |
+
preprocessed = self.preprocess_region(region)
|
| 286 |
+
|
| 287 |
+
# Try both interpretations
|
| 288 |
+
double_result = self._try_double_digit(preprocessed)
|
| 289 |
+
single_result = self._try_single_digit(preprocessed)
|
| 290 |
+
|
| 291 |
+
# Pick the best result
|
| 292 |
+
# Priority: detected result with higher confidence
|
| 293 |
+
if single_result.detected and double_result.detected:
|
| 294 |
+
# Both detected - pick higher confidence
|
| 295 |
+
if single_result.confidence > double_result.confidence:
|
| 296 |
+
return single_result
|
| 297 |
+
return double_result
|
| 298 |
+
if single_result.detected:
|
| 299 |
+
return single_result
|
| 300 |
+
if double_result.detected:
|
| 301 |
+
return double_result
|
| 302 |
+
# Neither detected - return the one with higher confidence for debugging
|
| 303 |
+
if single_result.confidence > double_result.confidence:
|
| 304 |
+
return single_result
|
| 305 |
+
return double_result
|
| 306 |
+
|
| 307 |
+
def read_from_frame(
|
| 308 |
+
self,
|
| 309 |
+
frame: np.ndarray[Any, Any],
|
| 310 |
+
scorebug_bbox: Tuple[int, int, int, int],
|
| 311 |
+
clock_region_offset: Tuple[int, int, int, int],
|
| 312 |
+
) -> TemplatePlayClockReading:
|
| 313 |
+
"""
|
| 314 |
+
Read play clock from a full frame.
|
| 315 |
+
|
| 316 |
+
Args:
|
| 317 |
+
frame: Full video frame (BGR)
|
| 318 |
+
scorebug_bbox: Scorebug bounding box (x, y, w, h)
|
| 319 |
+
clock_region_offset: Play clock region offset from scorebug (x_off, y_off, w, h)
|
| 320 |
+
|
| 321 |
+
Returns:
|
| 322 |
+
TemplatePlayClockReading with detected value
|
| 323 |
+
"""
|
| 324 |
+
sb_x, sb_y, _, _ = scorebug_bbox
|
| 325 |
+
pc_x_off, pc_y_off, pc_w, pc_h = clock_region_offset
|
| 326 |
+
|
| 327 |
+
# Calculate absolute coordinates
|
| 328 |
+
pc_x = sb_x + pc_x_off
|
| 329 |
+
pc_y = sb_y + pc_y_off
|
| 330 |
+
|
| 331 |
+
# Validate bounds
|
| 332 |
+
frame_h, frame_w = frame.shape[:2]
|
| 333 |
+
if pc_x < 0 or pc_y < 0 or pc_x + pc_w > frame_w or pc_y + pc_h > frame_h:
|
| 334 |
+
return TemplatePlayClockReading(
|
| 335 |
+
detected=False,
|
| 336 |
+
value=None,
|
| 337 |
+
confidence=0.0,
|
| 338 |
+
tens_match=None,
|
| 339 |
+
ones_match=None,
|
| 340 |
+
method="template",
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
# Extract region
|
| 344 |
+
region = frame[pc_y : pc_y + pc_h, pc_x : pc_x + pc_w].copy()
|
| 345 |
+
|
| 346 |
+
return self.read(region)
|
| 347 |
+
|
| 348 |
+
def read_from_fixed_location(
|
| 349 |
+
self,
|
| 350 |
+
frame: np.ndarray[Any, Any],
|
| 351 |
+
absolute_coords: Tuple[int, int, int, int],
|
| 352 |
+
) -> TemplatePlayClockReading:
|
| 353 |
+
"""
|
| 354 |
+
Read play clock from a fixed absolute location in the frame.
|
| 355 |
+
|
| 356 |
+
This bypasses scorebug detection entirely - useful when templates
|
| 357 |
+
are built and we know exactly where the play clock should be.
|
| 358 |
+
|
| 359 |
+
Args:
|
| 360 |
+
frame: Full video frame (BGR)
|
| 361 |
+
absolute_coords: Absolute play clock location (x, y, w, h)
|
| 362 |
+
|
| 363 |
+
Returns:
|
| 364 |
+
TemplatePlayClockReading with detected value
|
| 365 |
+
"""
|
| 366 |
+
x, y, w, h = absolute_coords
|
| 367 |
+
|
| 368 |
+
# Validate bounds
|
| 369 |
+
frame_h, frame_w = frame.shape[:2]
|
| 370 |
+
if x < 0 or y < 0 or x + w > frame_w or y + h > frame_h:
|
| 371 |
+
return TemplatePlayClockReading(
|
| 372 |
+
detected=False,
|
| 373 |
+
value=None,
|
| 374 |
+
confidence=0.0,
|
| 375 |
+
tens_match=None,
|
| 376 |
+
ones_match=None,
|
| 377 |
+
method="template",
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
# Extract region
|
| 381 |
+
region = frame[y : y + h, x : x + w].copy()
|
| 382 |
+
|
| 383 |
+
return self.read(region)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
# =============================================================================
|
| 387 |
+
# Gap Interpolation
|
| 388 |
+
# =============================================================================
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def _find_gap_extent(readings: List[PlayClockReading], start_idx: int) -> int:
|
| 392 |
+
"""Find the end index of a gap starting at start_idx."""
|
| 393 |
+
gap_end = start_idx
|
| 394 |
+
while gap_end < len(readings) and (not readings[gap_end].detected or readings[gap_end].value is None):
|
| 395 |
+
gap_end += 1
|
| 396 |
+
return gap_end
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def _validate_gap_boundaries(readings: List[PlayClockReading], gap_start: int, gap_end: int, max_gap_seconds: int) -> Tuple[bool, int, int, int]:
|
| 400 |
+
"""
|
| 401 |
+
Validate that a gap can be backfilled.
|
| 402 |
+
|
| 403 |
+
Returns:
|
| 404 |
+
Tuple of (is_valid, left_value, right_value, seconds_gap).
|
| 405 |
+
is_valid is False if gap cannot be backfilled.
|
| 406 |
+
"""
|
| 407 |
+
# Check if we have valid readings on both sides
|
| 408 |
+
if gap_start == 0:
|
| 409 |
+
logger.debug("Gap at start of sequence (index 0), no left side for backfill")
|
| 410 |
+
return False, 0, 0, 0
|
| 411 |
+
|
| 412 |
+
if gap_end >= len(readings):
|
| 413 |
+
logger.debug("Gap at end of sequence (index %d), no right side for backfill", gap_start)
|
| 414 |
+
return False, 0, 0, 0
|
| 415 |
+
|
| 416 |
+
# Get the clock values on either side
|
| 417 |
+
left_value = readings[gap_start - 1].value
|
| 418 |
+
right_value = readings[gap_end].value
|
| 419 |
+
|
| 420 |
+
if left_value is None or right_value is None:
|
| 421 |
+
logger.debug("Adjacent values are None, cannot backfill")
|
| 422 |
+
return False, 0, 0, 0
|
| 423 |
+
|
| 424 |
+
# Left should be higher than right in a countdown
|
| 425 |
+
if left_value <= right_value:
|
| 426 |
+
logger.debug("Invalid countdown: left=%d not greater than right=%d", left_value, right_value)
|
| 427 |
+
return False, 0, 0, 0
|
| 428 |
+
|
| 429 |
+
seconds_gap = left_value - right_value - 1
|
| 430 |
+
|
| 431 |
+
# Check if gap in seconds is within our limit
|
| 432 |
+
if seconds_gap > max_gap_seconds:
|
| 433 |
+
logger.debug(
|
| 434 |
+
"Gap of %d seconds (left=%d, right=%d) exceeds max_gap_seconds=%d, skipping backfill",
|
| 435 |
+
seconds_gap,
|
| 436 |
+
left_value,
|
| 437 |
+
right_value,
|
| 438 |
+
max_gap_seconds,
|
| 439 |
+
)
|
| 440 |
+
return False, 0, 0, 0
|
| 441 |
+
|
| 442 |
+
return True, left_value, right_value, seconds_gap
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
def _backfill_interpolate(result: List[PlayClockReading], gap_start: int, gap_frame_count: int, left_value: int, right_value: int) -> None:
|
| 446 |
+
"""Backfill a gap using nearest value interpolation (no missing clock values)."""
|
| 447 |
+
logger.debug("No missing clock values between %d and %d, using nearest value interpolation", left_value, right_value)
|
| 448 |
+
for j in range(gap_frame_count):
|
| 449 |
+
# Use left value for first half, right value for second half
|
| 450 |
+
backfill_value = left_value if j < gap_frame_count / 2 else right_value
|
| 451 |
+
result[gap_start + j] = PlayClockReading(
|
| 452 |
+
detected=True,
|
| 453 |
+
value=backfill_value,
|
| 454 |
+
confidence=0.0,
|
| 455 |
+
raw_text=f"BACKFILLED_{backfill_value}",
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
def _backfill_with_missing_values(result: List[PlayClockReading], gap_start: int, gap_end: int, left_value: int, right_value: int) -> None:
|
| 460 |
+
"""Backfill a gap by distributing missing clock values across frames."""
|
| 461 |
+
gap_frame_count = gap_end - gap_start
|
| 462 |
+
missing_values = list(range(left_value - 1, right_value, -1))
|
| 463 |
+
|
| 464 |
+
logger.info(
|
| 465 |
+
"Backfilling gap: frames %d-%d, clock values %d to %d, missing values: %s",
|
| 466 |
+
gap_start,
|
| 467 |
+
gap_end - 1,
|
| 468 |
+
left_value,
|
| 469 |
+
right_value,
|
| 470 |
+
missing_values,
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
for j in range(gap_frame_count):
|
| 474 |
+
# Calculate which missing value this frame should have
|
| 475 |
+
position = j / gap_frame_count
|
| 476 |
+
value_index = min(int(position * len(missing_values)), len(missing_values) - 1)
|
| 477 |
+
backfill_value = missing_values[value_index]
|
| 478 |
+
|
| 479 |
+
result[gap_start + j] = PlayClockReading(
|
| 480 |
+
detected=True,
|
| 481 |
+
value=backfill_value,
|
| 482 |
+
confidence=0.0,
|
| 483 |
+
raw_text=f"BACKFILLED_{backfill_value}",
|
| 484 |
+
)
|
| 485 |
+
logger.debug(" Backfilled index %d with value %d", gap_start + j, backfill_value)
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
def backfill_missing_readings(readings: List[PlayClockReading], max_gap_seconds: int = 3) -> List[PlayClockReading]:
|
| 489 |
+
"""
|
| 490 |
+
Backfill missing play clock readings when there's a clear countdown sequence with gaps.
|
| 491 |
+
|
| 492 |
+
This function fills in missing readings when:
|
| 493 |
+
1. The gap in clock VALUES is 1-3 seconds (configurable via max_gap_seconds)
|
| 494 |
+
2. There's at least one valid reading on EACH side of the gap
|
| 495 |
+
3. The readings form a valid countdown sequence
|
| 496 |
+
|
| 497 |
+
Since the video may be sampled at higher than 1fps, each clock value may appear
|
| 498 |
+
multiple times. This function looks at the actual clock values, not frame indices.
|
| 499 |
+
|
| 500 |
+
Examples (showing clock values, with ? for undetected):
|
| 501 |
+
- [6, 6, 5, 5, ?, ?, 3, 3] → missing value is 4, backfill the gaps
|
| 502 |
+
- [10, 9, ?, ?, ?] → no backfill (no right side continuation)
|
| 503 |
+
- [6, 6, ?, ?, ?, ?, ?, 0, 0] → no backfill if gap > max_gap_seconds
|
| 504 |
+
|
| 505 |
+
Args:
|
| 506 |
+
readings: List of PlayClockReading objects (in chronological order)
|
| 507 |
+
max_gap_seconds: Maximum number of missing SECONDS to backfill (default: 3)
|
| 508 |
+
|
| 509 |
+
Returns:
|
| 510 |
+
New list with backfilled readings (original list is not modified)
|
| 511 |
+
"""
|
| 512 |
+
if not readings:
|
| 513 |
+
return readings
|
| 514 |
+
|
| 515 |
+
result = list(readings)
|
| 516 |
+
i = 0
|
| 517 |
+
|
| 518 |
+
while i < len(result):
|
| 519 |
+
# Skip if this reading is valid
|
| 520 |
+
if result[i].detected and result[i].value is not None:
|
| 521 |
+
i += 1
|
| 522 |
+
continue
|
| 523 |
+
|
| 524 |
+
# Found a missing reading - find the extent of the gap
|
| 525 |
+
gap_start = i
|
| 526 |
+
gap_end = _find_gap_extent(result, i)
|
| 527 |
+
gap_frame_count = gap_end - gap_start
|
| 528 |
+
|
| 529 |
+
# Validate that we can backfill this gap
|
| 530 |
+
is_valid, left_value, right_value, seconds_gap = _validate_gap_boundaries(result, gap_start, gap_end, max_gap_seconds)
|
| 531 |
+
if not is_valid:
|
| 532 |
+
i = gap_end
|
| 533 |
+
continue
|
| 534 |
+
|
| 535 |
+
# Perform the backfill
|
| 536 |
+
if seconds_gap <= 0:
|
| 537 |
+
_backfill_interpolate(result, gap_start, gap_frame_count, left_value, right_value)
|
| 538 |
+
else:
|
| 539 |
+
_backfill_with_missing_values(result, gap_start, gap_end, left_value, right_value)
|
| 540 |
+
|
| 541 |
+
i = gap_end
|
| 542 |
+
|
| 543 |
+
return result
|
src/setup/__init__.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Setup modules for one-time preparation before video processing.
|
| 2 |
+
|
| 3 |
+
This package contains components for:
|
| 4 |
+
- Building digit templates from OCR-labeled samples
|
| 5 |
+
- Extracting and preprocessing play clock regions for OCR
|
| 6 |
+
|
| 7 |
+
Templates are built once during an initial collection phase, then used for fast
|
| 8 |
+
digit recognition during video processing.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from .models import DigitSample, DigitTemplate, PlayClockRegionConfig
|
| 12 |
+
from .template_library import DigitTemplateLibrary
|
| 13 |
+
from .template_builder import DigitTemplateBuilder
|
| 14 |
+
from .playclock_region import PlayClockRegionExtractor
|
| 15 |
+
|
| 16 |
+
__all__ = [
|
| 17 |
+
# Models
|
| 18 |
+
"DigitSample",
|
| 19 |
+
"DigitTemplate",
|
| 20 |
+
"PlayClockRegionConfig",
|
| 21 |
+
# Classes
|
| 22 |
+
"DigitTemplateLibrary",
|
| 23 |
+
"DigitTemplateBuilder",
|
| 24 |
+
"PlayClockRegionExtractor",
|
| 25 |
+
]
|
src/setup/coverage.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Shared coverage calculation utilities for digit templates.
|
| 3 |
+
|
| 4 |
+
This module provides utilities for calculating template coverage status,
|
| 5 |
+
shared between DigitTemplateBuilder and DigitTemplateLibrary.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import Any, Dict, Iterable, Set, Tuple
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Standard digit sets for play clock displays
|
| 12 |
+
ONES_DIGITS = set(range(10)) # 0-9
|
| 13 |
+
TENS_DIGITS = {1, 2, 3, 4} # 10, 20, 30, 40
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def categorize_template_keys(keys: Iterable[Tuple[bool, int, str]]) -> Tuple[Set[int], Set[int], Set[int], bool]:
|
| 17 |
+
"""
|
| 18 |
+
Categorize template keys into digit sets.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
keys: Iterable of (is_tens, digit, position) tuples
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
Tuple of (ones_center_have, ones_right_have, tens_have, has_blank)
|
| 25 |
+
"""
|
| 26 |
+
ones_center_have: Set[int] = set()
|
| 27 |
+
ones_right_have: Set[int] = set()
|
| 28 |
+
tens_have: Set[int] = set()
|
| 29 |
+
has_blank = False
|
| 30 |
+
|
| 31 |
+
for is_tens, digit, position in keys:
|
| 32 |
+
if is_tens:
|
| 33 |
+
if digit == -1:
|
| 34 |
+
has_blank = True
|
| 35 |
+
else:
|
| 36 |
+
tens_have.add(digit)
|
| 37 |
+
else:
|
| 38 |
+
if position == "center":
|
| 39 |
+
ones_center_have.add(digit)
|
| 40 |
+
elif position == "right":
|
| 41 |
+
ones_right_have.add(digit)
|
| 42 |
+
|
| 43 |
+
return ones_center_have, ones_right_have, tens_have, has_blank
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def calculate_coverage_status(
|
| 47 |
+
ones_center_have: Set[int],
|
| 48 |
+
ones_right_have: Set[int],
|
| 49 |
+
tens_have: Set[int],
|
| 50 |
+
has_blank: bool,
|
| 51 |
+
total_items: int = 0,
|
| 52 |
+
) -> Dict[str, Any]:
|
| 53 |
+
"""
|
| 54 |
+
Calculate coverage status from digit sets.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
ones_center_have: Set of ones digits that have center templates
|
| 58 |
+
ones_right_have: Set of ones digits that have right templates
|
| 59 |
+
tens_have: Set of tens digits that have templates
|
| 60 |
+
has_blank: Whether blank template exists
|
| 61 |
+
total_items: Total number of items (templates or samples)
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
Dictionary with coverage information
|
| 65 |
+
"""
|
| 66 |
+
ones_center_missing = ONES_DIGITS - ones_center_have
|
| 67 |
+
ones_right_missing = ONES_DIGITS - ones_right_have
|
| 68 |
+
tens_missing = TENS_DIGITS - tens_have
|
| 69 |
+
|
| 70 |
+
# Total needed: 10 ones_center + 10 ones_right + 4 tens + 1 blank = 25
|
| 71 |
+
total_needed = 25
|
| 72 |
+
|
| 73 |
+
return {
|
| 74 |
+
"total_needed": total_needed,
|
| 75 |
+
"total_have": total_items,
|
| 76 |
+
"is_complete": total_items >= total_needed,
|
| 77 |
+
"ones_center_have": sorted(ones_center_have),
|
| 78 |
+
"ones_center_missing": sorted(ones_center_missing),
|
| 79 |
+
"ones_right_have": sorted(ones_right_have),
|
| 80 |
+
"ones_right_missing": sorted(ones_right_missing),
|
| 81 |
+
"tens_have": sorted(tens_have),
|
| 82 |
+
"tens_missing": sorted(tens_missing),
|
| 83 |
+
"has_blank": has_blank,
|
| 84 |
+
}
|
src/setup/models.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Pydantic models for the setup/template building phase.
|
| 3 |
+
|
| 4 |
+
These models are used during the initial template collection phase where
|
| 5 |
+
OCR-labeled samples are collected and averaged into digit templates.
|
| 6 |
+
Also includes region configuration models for setup.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from typing import Any
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class DigitSample(BaseModel):
|
| 16 |
+
"""A single digit sample extracted from a play clock region."""
|
| 17 |
+
|
| 18 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
| 19 |
+
|
| 20 |
+
digit_value: int = Field(..., description="0-9 for ones digit, 0-4 for tens digit, -1 for blank")
|
| 21 |
+
is_tens_digit: bool = Field(..., description="True if this is the tens place digit")
|
| 22 |
+
position: str = Field(..., description="'left', 'center', or 'right' - where digit appears in region")
|
| 23 |
+
image: np.ndarray[Any, Any] = Field(..., description="The digit image (grayscale, preprocessed)")
|
| 24 |
+
source_clock_value: int = Field(..., description="The full clock value this was extracted from")
|
| 25 |
+
timestamp: float = Field(..., description="Video timestamp where this was captured")
|
| 26 |
+
confidence: float = Field(..., description="OCR confidence for this sample")
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class DigitTemplate(BaseModel):
|
| 30 |
+
"""A template for matching a specific digit."""
|
| 31 |
+
|
| 32 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
| 33 |
+
|
| 34 |
+
digit_value: int = Field(..., description="0-9 for ones, 0-4 for tens, -1 for blank")
|
| 35 |
+
is_tens_digit: bool = Field(..., description="True if this is a tens place template")
|
| 36 |
+
position: str = Field(..., description="'left', 'center', or 'right' - where digit appears in region")
|
| 37 |
+
template: np.ndarray[Any, Any] = Field(..., description="The template image (grayscale)")
|
| 38 |
+
sample_count: int = Field(..., description="Number of samples used to build this template")
|
| 39 |
+
avg_confidence: float = Field(..., description="Average OCR confidence of source samples")
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class PlayClockRegionConfig(BaseModel):
|
| 43 |
+
"""Configuration for the play clock region relative to the scorebug bounding box."""
|
| 44 |
+
|
| 45 |
+
x_offset: int = Field(..., description="X offset from scorebug left edge")
|
| 46 |
+
y_offset: int = Field(..., description="Y offset from scorebug top edge")
|
| 47 |
+
width: int = Field(..., description="Width of play clock region")
|
| 48 |
+
height: int = Field(..., description="Height of play clock region")
|
| 49 |
+
source_video: str = Field(..., description="Video used to identify region")
|
| 50 |
+
scorebug_template: str = Field(..., description="Template used for scorebug detection")
|
| 51 |
+
samples_used: int = Field(..., description="Number of frames used to verify region")
|
src/{detectors/play_clock_reader.py → setup/playclock_region.py}
RENAMED
|
@@ -1,43 +1,49 @@
|
|
| 1 |
"""
|
| 2 |
-
Play clock
|
| 3 |
|
| 4 |
-
This module provides
|
| 5 |
-
|
| 6 |
-
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
|
|
|
|
|
|
| 10 |
"""
|
| 11 |
|
| 12 |
import json
|
| 13 |
import logging
|
| 14 |
from pathlib import Path
|
| 15 |
-
from typing import Optional, Tuple
|
| 16 |
|
| 17 |
import cv2
|
| 18 |
import numpy as np
|
| 19 |
|
| 20 |
-
from
|
| 21 |
-
from .models import
|
| 22 |
|
| 23 |
logger = logging.getLogger(__name__)
|
| 24 |
|
| 25 |
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
"""
|
| 28 |
Extracts and preprocesses play clock regions from video frames.
|
| 29 |
|
| 30 |
-
The
|
| 31 |
-
|
| 32 |
-
|
| 33 |
|
| 34 |
-
Note:
|
| 35 |
-
in digit_template_reader.py for the actual clock value reading.
|
| 36 |
"""
|
| 37 |
|
| 38 |
def __init__(self, region_config_path: Optional[str] = None, region_config: Optional[PlayClockRegionConfig] = None):
|
| 39 |
"""
|
| 40 |
-
Initialize the play clock
|
| 41 |
|
| 42 |
Args:
|
| 43 |
region_config_path: Path to JSON config file with play clock region coordinates
|
|
@@ -47,11 +53,11 @@ class PlayClockReader:
|
|
| 47 |
|
| 48 |
if region_config:
|
| 49 |
self.config = region_config
|
| 50 |
-
logger.info("
|
| 51 |
elif region_config_path:
|
| 52 |
self.load_config(region_config_path)
|
| 53 |
else:
|
| 54 |
-
logger.warning("
|
| 55 |
|
| 56 |
def load_config(self, config_path: str) -> None:
|
| 57 |
"""
|
|
@@ -84,7 +90,7 @@ class PlayClockReader:
|
|
| 84 |
self.config.height,
|
| 85 |
)
|
| 86 |
|
| 87 |
-
def
|
| 88 |
"""
|
| 89 |
Extract the play clock region from the frame.
|
| 90 |
|
|
@@ -125,21 +131,7 @@ class PlayClockReader:
|
|
| 125 |
region = frame[pc_y : pc_y + pc_h, pc_x : pc_x + pc_w].copy()
|
| 126 |
return region
|
| 127 |
|
| 128 |
-
def
|
| 129 |
-
"""
|
| 130 |
-
Detect if the play clock digits are red (displayed when clock is at 5 seconds or less).
|
| 131 |
-
|
| 132 |
-
Delegates to shared detect_red_digits function from digit_template_reader.
|
| 133 |
-
|
| 134 |
-
Args:
|
| 135 |
-
region: Play clock region (BGR format)
|
| 136 |
-
|
| 137 |
-
Returns:
|
| 138 |
-
True if red digits detected, False otherwise
|
| 139 |
-
"""
|
| 140 |
-
return _detect_red_shared(region)
|
| 141 |
-
|
| 142 |
-
def _preprocess_for_ocr(self, region: np.ndarray) -> np.ndarray:
|
| 143 |
"""
|
| 144 |
Preprocess the play clock region for OCR (used during template building).
|
| 145 |
|
|
@@ -158,7 +150,7 @@ class PlayClockReader:
|
|
| 158 |
Preprocessed image ready for OCR
|
| 159 |
"""
|
| 160 |
# Check if digits are red (play clock at 5 seconds or less)
|
| 161 |
-
is_red =
|
| 162 |
|
| 163 |
if is_red:
|
| 164 |
# For red digits, use the red channel directly
|
|
@@ -175,7 +167,7 @@ class PlayClockReader:
|
|
| 175 |
|
| 176 |
if is_red:
|
| 177 |
# For red digits, use percentile-based threshold on the red channel
|
| 178 |
-
threshold_value = np.percentile(scaled, 90)
|
| 179 |
_, binary = cv2.threshold(scaled, threshold_value, 255, cv2.THRESH_BINARY)
|
| 180 |
logger.debug("Red digit threshold (90th percentile): %.1f", threshold_value)
|
| 181 |
|
|
@@ -190,7 +182,7 @@ class PlayClockReader:
|
|
| 190 |
_, binary = cv2.threshold(scaled, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
| 191 |
|
| 192 |
# Determine if we need to invert
|
| 193 |
-
mean_intensity = np.mean(binary)
|
| 194 |
if mean_intensity < 128:
|
| 195 |
binary = cv2.bitwise_not(binary)
|
| 196 |
|
|
@@ -205,142 +197,23 @@ class PlayClockReader:
|
|
| 205 |
|
| 206 |
return binary
|
| 207 |
|
|
|
|
|
|
|
|
|
|
| 208 |
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
"""
|
| 212 |
-
Backfill missing play clock readings when there's a clear countdown sequence with gaps.
|
| 213 |
-
|
| 214 |
-
This function fills in missing readings when:
|
| 215 |
-
1. The gap in clock VALUES is 1-3 seconds (configurable via max_gap_seconds)
|
| 216 |
-
2. There's at least one valid reading on EACH side of the gap
|
| 217 |
-
3. The readings form a valid countdown sequence
|
| 218 |
-
|
| 219 |
-
Since the video may be sampled at higher than 1fps, each clock value may appear
|
| 220 |
-
multiple times. This function looks at the actual clock values, not frame indices.
|
| 221 |
-
|
| 222 |
-
Examples (showing clock values, with ? for undetected):
|
| 223 |
-
- [6, 6, 5, 5, ?, ?, 3, 3] → missing value is 4, backfill the gaps
|
| 224 |
-
- [10, 9, ?, ?, ?] → no backfill (no right side continuation)
|
| 225 |
-
- [6, 6, ?, ?, ?, ?, ?, 0, 0] → no backfill if gap > max_gap_seconds
|
| 226 |
-
|
| 227 |
-
Args:
|
| 228 |
-
readings: List of PlayClockReading objects (in chronological order)
|
| 229 |
-
max_gap_seconds: Maximum number of missing SECONDS to backfill (default: 3)
|
| 230 |
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
# Create a copy to avoid modifying the original
|
| 238 |
-
result = list(readings)
|
| 239 |
-
|
| 240 |
-
# Find gaps and try to backfill
|
| 241 |
-
i = 0
|
| 242 |
-
while i < len(result):
|
| 243 |
-
# Skip if this reading is valid
|
| 244 |
-
if result[i].detected and result[i].value is not None:
|
| 245 |
-
i += 1
|
| 246 |
-
continue
|
| 247 |
-
|
| 248 |
-
# Found a missing reading - find the extent of the gap (in frames)
|
| 249 |
-
gap_start = i
|
| 250 |
-
gap_end = i
|
| 251 |
-
while gap_end < len(result) and (not result[gap_end].detected or result[gap_end].value is None):
|
| 252 |
-
gap_end += 1
|
| 253 |
-
|
| 254 |
-
gap_frame_count = gap_end - gap_start
|
| 255 |
-
|
| 256 |
-
# Check if we have valid readings on both sides
|
| 257 |
-
if gap_start == 0:
|
| 258 |
-
logger.debug("Gap at start of sequence (index 0), no left side for backfill")
|
| 259 |
-
i = gap_end
|
| 260 |
-
continue
|
| 261 |
-
|
| 262 |
-
if gap_end >= len(result):
|
| 263 |
-
logger.debug("Gap at end of sequence (index %d), no right side for backfill", gap_start)
|
| 264 |
-
i = gap_end
|
| 265 |
-
continue
|
| 266 |
-
|
| 267 |
-
# Get the clock values on either side
|
| 268 |
-
left_value = result[gap_start - 1].value
|
| 269 |
-
right_value = result[gap_end].value
|
| 270 |
-
|
| 271 |
-
if left_value is None or right_value is None:
|
| 272 |
-
logger.debug("Adjacent values are None, cannot backfill")
|
| 273 |
-
i = gap_end
|
| 274 |
-
continue
|
| 275 |
-
|
| 276 |
-
# Calculate the gap in seconds (clock values)
|
| 277 |
-
# Left should be higher than right in a countdown
|
| 278 |
-
if left_value <= right_value:
|
| 279 |
-
logger.debug("Invalid countdown: left=%d not greater than right=%d", left_value, right_value)
|
| 280 |
-
i = gap_end
|
| 281 |
-
continue
|
| 282 |
-
|
| 283 |
-
seconds_gap = left_value - right_value - 1 # Number of missing clock values
|
| 284 |
-
|
| 285 |
-
# Check if gap in seconds is within our limit
|
| 286 |
-
if seconds_gap > max_gap_seconds:
|
| 287 |
-
logger.debug(
|
| 288 |
-
"Gap of %d seconds (left=%d, right=%d) exceeds max_gap_seconds=%d, skipping backfill",
|
| 289 |
-
seconds_gap,
|
| 290 |
-
left_value,
|
| 291 |
-
right_value,
|
| 292 |
-
max_gap_seconds,
|
| 293 |
-
)
|
| 294 |
-
i = gap_end
|
| 295 |
-
continue
|
| 296 |
-
|
| 297 |
-
if seconds_gap <= 0:
|
| 298 |
-
# No missing values (e.g., left=5, right=4 means we just missed some frames of 5 or 4)
|
| 299 |
-
# Still backfill with a reasonable value (closer to left or right based on position)
|
| 300 |
-
logger.debug("No missing clock values between %d and %d, using nearest value interpolation", left_value, right_value)
|
| 301 |
-
for j in range(gap_frame_count):
|
| 302 |
-
# Use left value for first half, right value for second half
|
| 303 |
-
if j < gap_frame_count / 2:
|
| 304 |
-
backfill_value = left_value
|
| 305 |
-
else:
|
| 306 |
-
backfill_value = right_value
|
| 307 |
-
result[gap_start + j] = PlayClockReading(
|
| 308 |
-
detected=True,
|
| 309 |
-
value=backfill_value,
|
| 310 |
-
confidence=0.0,
|
| 311 |
-
raw_text=f"BACKFILLED_{backfill_value}",
|
| 312 |
-
)
|
| 313 |
-
i = gap_end
|
| 314 |
-
continue
|
| 315 |
-
|
| 316 |
-
# Calculate the missing clock values
|
| 317 |
-
missing_values = list(range(left_value - 1, right_value, -1))
|
| 318 |
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
missing_values,
|
| 326 |
)
|
| 327 |
-
|
| 328 |
-
# Distribute missing values across the gap frames
|
| 329 |
-
for j in range(gap_frame_count):
|
| 330 |
-
# Calculate which missing value this frame should have
|
| 331 |
-
position = j / gap_frame_count
|
| 332 |
-
value_index = int(position * len(missing_values))
|
| 333 |
-
value_index = min(value_index, len(missing_values) - 1)
|
| 334 |
-
backfill_value = missing_values[value_index]
|
| 335 |
-
|
| 336 |
-
result[gap_start + j] = PlayClockReading(
|
| 337 |
-
detected=True,
|
| 338 |
-
value=backfill_value,
|
| 339 |
-
confidence=0.0,
|
| 340 |
-
raw_text=f"BACKFILLED_{backfill_value}",
|
| 341 |
-
)
|
| 342 |
-
logger.debug(" Backfilled index %d with value %d", gap_start + j, backfill_value)
|
| 343 |
-
|
| 344 |
-
i = gap_end
|
| 345 |
-
|
| 346 |
-
return result
|
|
|
|
| 1 |
"""
|
| 2 |
+
Play clock region extraction and OCR preprocessing for template building.
|
| 3 |
|
| 4 |
+
This module provides:
|
| 5 |
+
- PlayClockRegionExtractor: Extracts and preprocesses play clock regions
|
| 6 |
+
- OCR preprocessing for initial digit labeling during template building
|
| 7 |
|
| 8 |
+
The region extraction logic determines WHERE to look in the frame,
|
| 9 |
+
while the OCR preprocessing prepares images for EasyOCR labeling.
|
| 10 |
+
|
| 11 |
+
Color detection utilities are shared from utils.color to eliminate code duplication.
|
| 12 |
"""
|
| 13 |
|
| 14 |
import json
|
| 15 |
import logging
|
| 16 |
from pathlib import Path
|
| 17 |
+
from typing import Any, Optional, Tuple
|
| 18 |
|
| 19 |
import cv2
|
| 20 |
import numpy as np
|
| 21 |
|
| 22 |
+
from utils import detect_red_digits
|
| 23 |
+
from .models import PlayClockRegionConfig
|
| 24 |
|
| 25 |
logger = logging.getLogger(__name__)
|
| 26 |
|
| 27 |
|
| 28 |
+
# =============================================================================
|
| 29 |
+
# Play Clock Region Extractor
|
| 30 |
+
# =============================================================================
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class PlayClockRegionExtractor:
|
| 34 |
"""
|
| 35 |
Extracts and preprocesses play clock regions from video frames.
|
| 36 |
|
| 37 |
+
The extractor locates the play clock sub-region within the scorebug
|
| 38 |
+
and preprocesses it for OCR during template building. This class
|
| 39 |
+
handles the geometry of WHERE to look in the frame.
|
| 40 |
|
| 41 |
+
Note: For reading actual clock values, use ReadPlayClock from readers.playclock.
|
|
|
|
| 42 |
"""
|
| 43 |
|
| 44 |
def __init__(self, region_config_path: Optional[str] = None, region_config: Optional[PlayClockRegionConfig] = None):
|
| 45 |
"""
|
| 46 |
+
Initialize the play clock region extractor.
|
| 47 |
|
| 48 |
Args:
|
| 49 |
region_config_path: Path to JSON config file with play clock region coordinates
|
|
|
|
| 53 |
|
| 54 |
if region_config:
|
| 55 |
self.config = region_config
|
| 56 |
+
logger.info("PlayClockRegionExtractor initialized with direct config")
|
| 57 |
elif region_config_path:
|
| 58 |
self.load_config(region_config_path)
|
| 59 |
else:
|
| 60 |
+
logger.warning("PlayClockRegionExtractor initialized without region config - call load_config() before use")
|
| 61 |
|
| 62 |
def load_config(self, config_path: str) -> None:
|
| 63 |
"""
|
|
|
|
| 90 |
self.config.height,
|
| 91 |
)
|
| 92 |
|
| 93 |
+
def extract_region(self, frame: np.ndarray[Any, Any], scorebug_bbox: Tuple[int, int, int, int]) -> Optional[np.ndarray[Any, Any]]:
|
| 94 |
"""
|
| 95 |
Extract the play clock region from the frame.
|
| 96 |
|
|
|
|
| 131 |
region = frame[pc_y : pc_y + pc_h, pc_x : pc_x + pc_w].copy()
|
| 132 |
return region
|
| 133 |
|
| 134 |
+
def preprocess_for_ocr(self, region: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
"""
|
| 136 |
Preprocess the play clock region for OCR (used during template building).
|
| 137 |
|
|
|
|
| 150 |
Preprocessed image ready for OCR
|
| 151 |
"""
|
| 152 |
# Check if digits are red (play clock at 5 seconds or less)
|
| 153 |
+
is_red = detect_red_digits(region)
|
| 154 |
|
| 155 |
if is_red:
|
| 156 |
# For red digits, use the red channel directly
|
|
|
|
| 167 |
|
| 168 |
if is_red:
|
| 169 |
# For red digits, use percentile-based threshold on the red channel
|
| 170 |
+
threshold_value = float(np.percentile(np.asarray(scaled), 90))
|
| 171 |
_, binary = cv2.threshold(scaled, threshold_value, 255, cv2.THRESH_BINARY)
|
| 172 |
logger.debug("Red digit threshold (90th percentile): %.1f", threshold_value)
|
| 173 |
|
|
|
|
| 182 |
_, binary = cv2.threshold(scaled, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
| 183 |
|
| 184 |
# Determine if we need to invert
|
| 185 |
+
mean_intensity = np.mean(np.asarray(binary))
|
| 186 |
if mean_intensity < 128:
|
| 187 |
binary = cv2.bitwise_not(binary)
|
| 188 |
|
|
|
|
| 197 |
|
| 198 |
return binary
|
| 199 |
|
| 200 |
+
def get_absolute_coords(self, scorebug_bbox: Tuple[int, int, int, int]) -> Optional[Tuple[int, int, int, int]]:
|
| 201 |
+
"""
|
| 202 |
+
Get absolute coordinates of the play clock region given scorebug position.
|
| 203 |
|
| 204 |
+
Args:
|
| 205 |
+
scorebug_bbox: Scorebug bounding box (x, y, w, h)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
|
| 207 |
+
Returns:
|
| 208 |
+
Play clock absolute coordinates (x, y, w, h) or None if no config
|
| 209 |
+
"""
|
| 210 |
+
if self.config is None:
|
| 211 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
|
| 213 |
+
sb_x, sb_y, _, _ = scorebug_bbox
|
| 214 |
+
return (
|
| 215 |
+
sb_x + self.config.x_offset,
|
| 216 |
+
sb_y + self.config.y_offset,
|
| 217 |
+
self.config.width,
|
| 218 |
+
self.config.height,
|
|
|
|
| 219 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/setup/template_builder.py
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Digit template builder for creating play clock digit templates from OCR samples.
|
| 3 |
+
|
| 4 |
+
This module provides the DigitTemplateBuilder class which collects samples from
|
| 5 |
+
the play clock region, extracts individual digits, and builds averaged templates
|
| 6 |
+
for each unique digit value.
|
| 7 |
+
|
| 8 |
+
Region extraction and preprocessing utilities are shared from utils to eliminate code duplication.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import logging
|
| 12 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 13 |
+
|
| 14 |
+
import cv2
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
from utils import (
|
| 18 |
+
extract_center_region,
|
| 19 |
+
extract_far_left_region,
|
| 20 |
+
extract_left_region,
|
| 21 |
+
extract_right_region,
|
| 22 |
+
preprocess_playclock_region,
|
| 23 |
+
)
|
| 24 |
+
from .coverage import ONES_DIGITS, categorize_template_keys
|
| 25 |
+
from .models import DigitSample, DigitTemplate
|
| 26 |
+
from .template_library import DigitTemplateLibrary
|
| 27 |
+
|
| 28 |
+
logger = logging.getLogger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class DigitTemplateBuilder:
|
| 32 |
+
"""
|
| 33 |
+
Builds digit templates from OCR-labeled play clock samples.
|
| 34 |
+
|
| 35 |
+
Collects samples from the play clock region, extracts individual digits,
|
| 36 |
+
and builds averaged templates for each unique digit value.
|
| 37 |
+
|
| 38 |
+
Uses color normalization so red and white digits produce the same template.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
# Play clock region dimensions (from config)
|
| 42 |
+
DEFAULT_REGION_WIDTH = 50
|
| 43 |
+
DEFAULT_REGION_HEIGHT = 28
|
| 44 |
+
|
| 45 |
+
def __init__(self, region_width: int = DEFAULT_REGION_WIDTH, region_height: int = DEFAULT_REGION_HEIGHT):
|
| 46 |
+
"""
|
| 47 |
+
Initialize the template builder.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
region_width: Width of play clock region in pixels
|
| 51 |
+
region_height: Height of play clock region in pixels
|
| 52 |
+
"""
|
| 53 |
+
self.region_width = region_width
|
| 54 |
+
self.region_height = region_height
|
| 55 |
+
|
| 56 |
+
# Collected samples: {(is_tens, digit_value, position): [DigitSample, ...]}
|
| 57 |
+
self.samples: Dict[Tuple[bool, int, str], List[DigitSample]] = {}
|
| 58 |
+
|
| 59 |
+
# Track raw clock region images for potential reprocessing
|
| 60 |
+
self.raw_regions: List[Tuple[float, int, np.ndarray[Any, Any]]] = [] # (timestamp, clock_value, region)
|
| 61 |
+
|
| 62 |
+
logger.info("DigitTemplateBuilder initialized (region: %dx%d)", region_width, region_height)
|
| 63 |
+
|
| 64 |
+
def preprocess_region(self, region: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
|
| 65 |
+
"""
|
| 66 |
+
Preprocess play clock region for template extraction.
|
| 67 |
+
|
| 68 |
+
Delegates to shared utility function in utils.regions.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
region: Play clock region (BGR format)
|
| 72 |
+
|
| 73 |
+
Returns:
|
| 74 |
+
Preprocessed binary image (white digits on black background)
|
| 75 |
+
"""
|
| 76 |
+
return preprocess_playclock_region(region, scale_factor=4)
|
| 77 |
+
|
| 78 |
+
def extract_digits(
|
| 79 |
+
self, preprocessed: np.ndarray[Any, Any], clock_value: int
|
| 80 |
+
) -> Tuple[Optional[np.ndarray[Any, Any]], Optional[np.ndarray[Any, Any]], Optional[np.ndarray[Any, Any]], Optional[np.ndarray[Any, Any]]]:
|
| 81 |
+
"""
|
| 82 |
+
Extract individual digit images from preprocessed play clock region.
|
| 83 |
+
|
| 84 |
+
For double-digit values (10-40): extracts left (tens) and right (ones)
|
| 85 |
+
For single-digit values (0-9): extracts far-left (blank) and center (ones)
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
preprocessed: Preprocessed play clock image (scaled 4x)
|
| 89 |
+
clock_value: The known clock value (0-40)
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
Tuple of (tens_digit_image, ones_right_image, ones_center_image, blank_image)
|
| 93 |
+
- For double-digit: tens=left, ones_right=right, ones_center=None, blank=None
|
| 94 |
+
- For single-digit: tens=None, ones_right=None, ones_center=center, blank=far_left
|
| 95 |
+
"""
|
| 96 |
+
if clock_value >= 10:
|
| 97 |
+
# Double-digit: standard left/right split
|
| 98 |
+
return extract_left_region(preprocessed), extract_right_region(preprocessed), None, None
|
| 99 |
+
# Single-digit: far-left is blank (truly empty), ones is centered
|
| 100 |
+
return None, None, extract_center_region(preprocessed), extract_far_left_region(preprocessed)
|
| 101 |
+
|
| 102 |
+
def add_sample(self, region: np.ndarray[Any, Any], clock_value: int, timestamp: float, confidence: float = 1.0) -> None:
|
| 103 |
+
"""
|
| 104 |
+
Add a play clock sample for template building.
|
| 105 |
+
|
| 106 |
+
Routes samples based on display layout:
|
| 107 |
+
- Single-digit (0-9): Digit is CENTER-aligned, tens position is blank
|
| 108 |
+
- Double-digit (10-40): Tens on LEFT, ones on RIGHT
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
region: Play clock region (BGR format, original size)
|
| 112 |
+
clock_value: OCR-determined clock value (0-40)
|
| 113 |
+
timestamp: Video timestamp
|
| 114 |
+
confidence: OCR confidence score
|
| 115 |
+
"""
|
| 116 |
+
if clock_value < 0 or clock_value > 40:
|
| 117 |
+
logger.warning("Invalid clock value %d, skipping sample", clock_value)
|
| 118 |
+
return
|
| 119 |
+
|
| 120 |
+
# Store raw region for potential reprocessing
|
| 121 |
+
self.raw_regions.append((timestamp, clock_value, region.copy()))
|
| 122 |
+
|
| 123 |
+
# Preprocess (handles red-to-white conversion automatically)
|
| 124 |
+
preprocessed = self.preprocess_region(region)
|
| 125 |
+
|
| 126 |
+
# Extract digits based on single vs double digit display
|
| 127 |
+
tens_img, ones_right_img, ones_center_img, blank_img = self.extract_digits(preprocessed, clock_value)
|
| 128 |
+
|
| 129 |
+
# Determine digit values
|
| 130 |
+
ones_digit = clock_value % 10
|
| 131 |
+
tens_digit = clock_value // 10 if clock_value >= 10 else -1 # -1 = blank
|
| 132 |
+
|
| 133 |
+
if clock_value >= 10:
|
| 134 |
+
# Double-digit display (10-40): tens on left, ones on right
|
| 135 |
+
assert tens_img is not None # Asserts: validated by extract_digits
|
| 136 |
+
# Store tens sample (left position)
|
| 137 |
+
tens_sample = DigitSample(
|
| 138 |
+
digit_value=tens_digit,
|
| 139 |
+
is_tens_digit=True,
|
| 140 |
+
position="left",
|
| 141 |
+
image=tens_img,
|
| 142 |
+
source_clock_value=clock_value,
|
| 143 |
+
timestamp=timestamp,
|
| 144 |
+
confidence=confidence,
|
| 145 |
+
)
|
| 146 |
+
tens_key = (True, tens_digit, "left")
|
| 147 |
+
if tens_key not in self.samples:
|
| 148 |
+
self.samples[tens_key] = []
|
| 149 |
+
self.samples[tens_key].append(tens_sample)
|
| 150 |
+
|
| 151 |
+
# Store ones sample (right position)
|
| 152 |
+
assert ones_right_img is not None # Asserts: validated by extract_digits
|
| 153 |
+
ones_sample = DigitSample(
|
| 154 |
+
digit_value=ones_digit,
|
| 155 |
+
is_tens_digit=False,
|
| 156 |
+
position="right",
|
| 157 |
+
image=ones_right_img,
|
| 158 |
+
source_clock_value=clock_value,
|
| 159 |
+
timestamp=timestamp,
|
| 160 |
+
confidence=confidence,
|
| 161 |
+
)
|
| 162 |
+
ones_key = (False, ones_digit, "right")
|
| 163 |
+
if ones_key not in self.samples:
|
| 164 |
+
self.samples[ones_key] = []
|
| 165 |
+
self.samples[ones_key].append(ones_sample)
|
| 166 |
+
|
| 167 |
+
logger.debug(
|
| 168 |
+
"Added double-digit sample: clock=%d, tens=%d (left), ones=%d (right), t=%.1f",
|
| 169 |
+
clock_value,
|
| 170 |
+
tens_digit,
|
| 171 |
+
ones_digit,
|
| 172 |
+
timestamp,
|
| 173 |
+
)
|
| 174 |
+
else:
|
| 175 |
+
# Single-digit display (0-9): digit is centered, tens position is blank
|
| 176 |
+
# Store blank sample (far-left position - should be truly empty)
|
| 177 |
+
assert blank_img is not None # Asserts: validated by extract_digits
|
| 178 |
+
blank_sample = DigitSample(
|
| 179 |
+
digit_value=-1, # blank
|
| 180 |
+
is_tens_digit=True,
|
| 181 |
+
position="left", # Still use "left" as the position key for compatibility
|
| 182 |
+
image=blank_img, # Now using far-left region that's truly empty
|
| 183 |
+
source_clock_value=clock_value,
|
| 184 |
+
timestamp=timestamp,
|
| 185 |
+
confidence=confidence,
|
| 186 |
+
)
|
| 187 |
+
blank_key = (True, -1, "left")
|
| 188 |
+
if blank_key not in self.samples:
|
| 189 |
+
self.samples[blank_key] = []
|
| 190 |
+
self.samples[blank_key].append(blank_sample)
|
| 191 |
+
|
| 192 |
+
# Store ones sample (center position)
|
| 193 |
+
assert ones_center_img is not None # Asserts: validated by extract_digits
|
| 194 |
+
ones_sample = DigitSample(
|
| 195 |
+
digit_value=ones_digit,
|
| 196 |
+
is_tens_digit=False,
|
| 197 |
+
position="center",
|
| 198 |
+
image=ones_center_img,
|
| 199 |
+
source_clock_value=clock_value,
|
| 200 |
+
timestamp=timestamp,
|
| 201 |
+
confidence=confidence,
|
| 202 |
+
)
|
| 203 |
+
ones_key = (False, ones_digit, "center")
|
| 204 |
+
if ones_key not in self.samples:
|
| 205 |
+
self.samples[ones_key] = []
|
| 206 |
+
self.samples[ones_key].append(ones_sample)
|
| 207 |
+
|
| 208 |
+
logger.debug(
|
| 209 |
+
"Added single-digit sample: clock=%d, ones=%d (center), blank (far-left), t=%.1f",
|
| 210 |
+
clock_value,
|
| 211 |
+
ones_digit,
|
| 212 |
+
timestamp,
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
def get_sample_count(self) -> Dict[str, int]:
|
| 216 |
+
"""Get count of samples collected for each digit and position."""
|
| 217 |
+
counts = {}
|
| 218 |
+
for (is_tens, digit, position), samples in self.samples.items():
|
| 219 |
+
type_str = "tens" if is_tens else "ones"
|
| 220 |
+
digit_str = "blank" if digit == -1 else str(digit)
|
| 221 |
+
key = f"{type_str}_{digit_str}_{position}"
|
| 222 |
+
counts[key] = len(samples)
|
| 223 |
+
return counts
|
| 224 |
+
|
| 225 |
+
def build_templates(self, min_samples: int = 3) -> DigitTemplateLibrary:
|
| 226 |
+
"""
|
| 227 |
+
Build templates from collected samples.
|
| 228 |
+
|
| 229 |
+
For each digit/position combination, averages multiple samples
|
| 230 |
+
to create a robust template.
|
| 231 |
+
|
| 232 |
+
Args:
|
| 233 |
+
min_samples: Minimum samples required to build a template (default: 3)
|
| 234 |
+
|
| 235 |
+
Returns:
|
| 236 |
+
DigitTemplateLibrary with built templates
|
| 237 |
+
"""
|
| 238 |
+
library = DigitTemplateLibrary()
|
| 239 |
+
|
| 240 |
+
for (is_tens, digit, position), samples in self.samples.items():
|
| 241 |
+
if len(samples) < min_samples:
|
| 242 |
+
digit_display = "blank" if digit == -1 else str(digit)
|
| 243 |
+
logger.warning(
|
| 244 |
+
"Insufficient samples for %s digit %s (%s): %d < %d",
|
| 245 |
+
"tens" if is_tens else "ones",
|
| 246 |
+
digit_display,
|
| 247 |
+
position,
|
| 248 |
+
len(samples),
|
| 249 |
+
min_samples,
|
| 250 |
+
)
|
| 251 |
+
continue
|
| 252 |
+
|
| 253 |
+
# Resize all samples to match dimensions of first sample
|
| 254 |
+
target_shape = samples[0].image.shape
|
| 255 |
+
|
| 256 |
+
# Average the samples (with resizing if needed)
|
| 257 |
+
sum_image = np.zeros(target_shape, dtype=np.float32)
|
| 258 |
+
valid_count = 0
|
| 259 |
+
total_confidence = 0.0
|
| 260 |
+
|
| 261 |
+
for sample in samples:
|
| 262 |
+
img = sample.image
|
| 263 |
+
if img.shape != target_shape:
|
| 264 |
+
img = cv2.resize(img, (target_shape[1], target_shape[0]))
|
| 265 |
+
sum_image += img.astype(np.float32)
|
| 266 |
+
valid_count += 1
|
| 267 |
+
total_confidence += sample.confidence
|
| 268 |
+
|
| 269 |
+
if valid_count > 0:
|
| 270 |
+
avg_image = (sum_image / valid_count).astype(np.uint8)
|
| 271 |
+
|
| 272 |
+
# Threshold the averaged image to clean it up
|
| 273 |
+
_, template_img = cv2.threshold(avg_image, 127, 255, cv2.THRESH_BINARY)
|
| 274 |
+
|
| 275 |
+
template = DigitTemplate(
|
| 276 |
+
digit_value=digit,
|
| 277 |
+
is_tens_digit=is_tens,
|
| 278 |
+
position=position,
|
| 279 |
+
template=template_img,
|
| 280 |
+
sample_count=valid_count,
|
| 281 |
+
avg_confidence=total_confidence / valid_count,
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
library.add_template(template)
|
| 285 |
+
digit_display = "blank" if digit == -1 else str(digit)
|
| 286 |
+
logger.info(
|
| 287 |
+
"Built template: %s digit %s (%s) from %d samples",
|
| 288 |
+
"tens" if is_tens else "ones",
|
| 289 |
+
digit_display,
|
| 290 |
+
position,
|
| 291 |
+
valid_count,
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
# Log coverage status
|
| 295 |
+
coverage = library.get_coverage_status()
|
| 296 |
+
logger.info(
|
| 297 |
+
"Template coverage: %d/%d (%.1f%%)",
|
| 298 |
+
coverage["total_have"],
|
| 299 |
+
coverage["total_needed"],
|
| 300 |
+
100 * coverage["total_have"] / coverage["total_needed"],
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
return library
|
| 304 |
+
|
| 305 |
+
def get_coverage_status(self) -> Dict[str, Any]:
|
| 306 |
+
"""Get current sample coverage status."""
|
| 307 |
+
# Get keys for samples that have at least one entry
|
| 308 |
+
keys_with_samples = [key for key, samples in self.samples.items() if len(samples) >= 1]
|
| 309 |
+
|
| 310 |
+
# Use shared utility to categorize
|
| 311 |
+
ones_center_have, ones_right_have, tens_have, has_blank = categorize_template_keys(keys_with_samples)
|
| 312 |
+
|
| 313 |
+
return {
|
| 314 |
+
"ones_center": sorted(ones_center_have),
|
| 315 |
+
"ones_right": sorted(ones_right_have),
|
| 316 |
+
"tens": sorted(tens_have),
|
| 317 |
+
"has_blank": has_blank,
|
| 318 |
+
"ones_center_missing": sorted(ONES_DIGITS - ones_center_have),
|
| 319 |
+
"ones_right_missing": sorted(ONES_DIGITS - ones_right_have),
|
| 320 |
+
"tens_missing": sorted({1, 2, 3, 4} - tens_have),
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
def get_coverage_estimate(self) -> float:
|
| 324 |
+
"""
|
| 325 |
+
Get a simple coverage estimate as a float (0.0-1.0).
|
| 326 |
+
|
| 327 |
+
Returns:
|
| 328 |
+
Coverage estimate where 1.0 = all templates have samples
|
| 329 |
+
"""
|
| 330 |
+
status = self.get_coverage_status()
|
| 331 |
+
|
| 332 |
+
# Count what we have (with at least 1 sample each)
|
| 333 |
+
total_have = len(status["ones_center"]) + len(status["ones_right"]) + len(status["tens"])
|
| 334 |
+
if status["has_blank"]:
|
| 335 |
+
total_have += 1
|
| 336 |
+
|
| 337 |
+
# Total needed: 10 ones_center + 10 ones_right + 4 tens + 1 blank = 25
|
| 338 |
+
total_needed = 25
|
| 339 |
+
|
| 340 |
+
return total_have / total_needed
|
src/setup/template_library.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Digit template library for storing and managing play clock digit templates.
|
| 3 |
+
|
| 4 |
+
This module provides the DigitTemplateLibrary class for saving, loading, and
|
| 5 |
+
managing digit templates used for play clock reading.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import json
|
| 9 |
+
import logging
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 12 |
+
|
| 13 |
+
import cv2
|
| 14 |
+
|
| 15 |
+
from .coverage import ONES_DIGITS, calculate_coverage_status, categorize_template_keys
|
| 16 |
+
from .models import DigitTemplate
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class DigitTemplateLibrary:
|
| 22 |
+
"""
|
| 23 |
+
Stores and manages digit templates for play clock reading.
|
| 24 |
+
|
| 25 |
+
Uses color normalization to handle both red and white digits with a single
|
| 26 |
+
template set. Now supports position-aware templates to handle both single-digit
|
| 27 |
+
(centered) and double-digit (left/right split) layouts:
|
| 28 |
+
- Ones digits (center): 0-9 from single-digit displays (10 templates)
|
| 29 |
+
- Ones digits (right): 0-9 from double-digit displays (10 templates)
|
| 30 |
+
- Tens digits (left): 1, 2, 3, 4 from double-digit displays (4 templates)
|
| 31 |
+
- Blank (left): Empty tens position from single-digit displays (1 template)
|
| 32 |
+
Total: 25 templates needed for full coverage
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
# Template coverage requirements
|
| 36 |
+
ONES_DIGITS = list(range(10)) # 0-9
|
| 37 |
+
TENS_DIGITS = [-1, 1, 2, 3, 4] # -1 = blank, 1-4 for 10-40
|
| 38 |
+
POSITIONS = ["left", "center", "right"]
|
| 39 |
+
|
| 40 |
+
def __init__(self) -> None:
|
| 41 |
+
"""Initialize empty template library."""
|
| 42 |
+
# Templates: {(is_tens, digit_value, position): DigitTemplate}
|
| 43 |
+
self.templates: Dict[Tuple[bool, int, str], DigitTemplate] = {}
|
| 44 |
+
logger.info("DigitTemplateLibrary initialized (empty)")
|
| 45 |
+
|
| 46 |
+
def add_template(self, template: DigitTemplate) -> None:
|
| 47 |
+
"""
|
| 48 |
+
Add a template to the library.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
template: DigitTemplate to add
|
| 52 |
+
"""
|
| 53 |
+
key = (template.is_tens_digit, template.digit_value, template.position)
|
| 54 |
+
self.templates[key] = template
|
| 55 |
+
digit_display = "blank" if template.digit_value == -1 else str(template.digit_value)
|
| 56 |
+
logger.debug(
|
| 57 |
+
"Added template: tens=%s, digit=%s, position=%s",
|
| 58 |
+
template.is_tens_digit,
|
| 59 |
+
digit_display,
|
| 60 |
+
template.position,
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
def get_template(self, is_tens: bool, digit_value: int, position: str) -> Optional[DigitTemplate]:
|
| 64 |
+
"""
|
| 65 |
+
Get a template from the library.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
is_tens: Whether this is a tens digit
|
| 69 |
+
digit_value: The digit value (-1 for blank, 0-9 for digits)
|
| 70 |
+
position: Template position ("left", "center", or "right")
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
DigitTemplate if found, None otherwise
|
| 74 |
+
"""
|
| 75 |
+
key = (is_tens, digit_value, position)
|
| 76 |
+
return self.templates.get(key)
|
| 77 |
+
|
| 78 |
+
def get_all_templates(self, is_tens: bool, position: Optional[str] = None) -> List[DigitTemplate]:
|
| 79 |
+
"""
|
| 80 |
+
Get all templates for a specific digit position.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
is_tens: Whether to get tens digit templates
|
| 84 |
+
position: Optional position filter ("left", "center", or "right")
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
List of matching DigitTemplate objects
|
| 88 |
+
"""
|
| 89 |
+
templates = []
|
| 90 |
+
for (tens, _, pos), template in self.templates.items():
|
| 91 |
+
if tens == is_tens:
|
| 92 |
+
if position is None or pos == position:
|
| 93 |
+
templates.append(template)
|
| 94 |
+
return templates
|
| 95 |
+
|
| 96 |
+
def get_coverage_status(self) -> Dict[str, Any]:
|
| 97 |
+
"""
|
| 98 |
+
Get the current template coverage status.
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
Dictionary with coverage information
|
| 102 |
+
"""
|
| 103 |
+
# Use shared utility to categorize template keys
|
| 104 |
+
ones_center_have, ones_right_have, tens_have, has_blank = categorize_template_keys(self.templates.keys())
|
| 105 |
+
|
| 106 |
+
# Get base coverage status from shared utility
|
| 107 |
+
status = calculate_coverage_status(ones_center_have, ones_right_have, tens_have, has_blank, total_items=len(self.templates))
|
| 108 |
+
|
| 109 |
+
# Convert -1 to "blank" for display
|
| 110 |
+
def format_tens(digits: set[int]) -> list[str | int]:
|
| 111 |
+
return sorted(["blank" if d == -1 else d for d in digits], key=lambda x: (isinstance(x, str), x))
|
| 112 |
+
|
| 113 |
+
# Add legacy fields for backward compatibility
|
| 114 |
+
status["ones_have"] = sorted(ones_center_have | ones_right_have)
|
| 115 |
+
status["ones_missing"] = sorted((ONES_DIGITS - ones_center_have) & (ONES_DIGITS - ones_right_have))
|
| 116 |
+
status["tens_have_formatted"] = format_tens(tens_have | ({-1} if has_blank else set()))
|
| 117 |
+
status["tens_missing_formatted"] = format_tens(set(status["tens_missing"]) | (set() if has_blank else {-1}))
|
| 118 |
+
|
| 119 |
+
return status
|
| 120 |
+
|
| 121 |
+
def is_complete(self) -> bool:
|
| 122 |
+
"""Check if all required templates are present."""
|
| 123 |
+
return bool(self.get_coverage_status()["is_complete"])
|
| 124 |
+
|
| 125 |
+
def save(self, output_path: str) -> None:
|
| 126 |
+
"""
|
| 127 |
+
Save templates to disk.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
output_path: Path to save directory
|
| 131 |
+
"""
|
| 132 |
+
output_dir = Path(output_path)
|
| 133 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 134 |
+
|
| 135 |
+
templates_list: list[dict[str, object]] = []
|
| 136 |
+
|
| 137 |
+
for (is_tens, digit, position), template in self.templates.items():
|
| 138 |
+
# Use "blank" instead of -1 for the empty tens digit in filenames
|
| 139 |
+
digit_str = "blank" if digit == -1 else str(digit)
|
| 140 |
+
position_suffix = f"_{position}" if position != "left" or not is_tens else ""
|
| 141 |
+
filename = f"{'tens' if is_tens else 'ones'}_{digit_str}{position_suffix}.png"
|
| 142 |
+
cv2.imwrite(str(output_dir / filename), template.template)
|
| 143 |
+
templates_list.append(
|
| 144 |
+
{
|
| 145 |
+
"filename": filename,
|
| 146 |
+
"digit_value": digit_str, # Use "blank" for display
|
| 147 |
+
"is_tens_digit": template.is_tens_digit,
|
| 148 |
+
"position": position,
|
| 149 |
+
"sample_count": template.sample_count,
|
| 150 |
+
"avg_confidence": template.avg_confidence,
|
| 151 |
+
}
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
metadata = {"templates": templates_list, "version": 2} # Version 2 includes position
|
| 155 |
+
|
| 156 |
+
with open(output_dir / "templates_metadata.json", "w", encoding="utf-8") as f:
|
| 157 |
+
json.dump(metadata, f, indent=2)
|
| 158 |
+
|
| 159 |
+
logger.info("Saved %d templates to %s", len(self.templates), output_path)
|
| 160 |
+
|
| 161 |
+
def load(self, input_path: str) -> bool:
|
| 162 |
+
"""
|
| 163 |
+
Load templates from disk.
|
| 164 |
+
|
| 165 |
+
Args:
|
| 166 |
+
input_path: Path to templates directory
|
| 167 |
+
|
| 168 |
+
Returns:
|
| 169 |
+
True if loaded successfully, False otherwise
|
| 170 |
+
"""
|
| 171 |
+
input_dir = Path(input_path)
|
| 172 |
+
metadata_path = input_dir / "templates_metadata.json"
|
| 173 |
+
|
| 174 |
+
if not metadata_path.exists():
|
| 175 |
+
logger.warning("No templates metadata found at %s", metadata_path)
|
| 176 |
+
return False
|
| 177 |
+
|
| 178 |
+
with open(metadata_path, "r", encoding="utf-8") as f:
|
| 179 |
+
metadata = json.load(f)
|
| 180 |
+
|
| 181 |
+
version = metadata.get("version", 1)
|
| 182 |
+
|
| 183 |
+
for entry in metadata.get("templates", []):
|
| 184 |
+
img_path = input_dir / entry["filename"]
|
| 185 |
+
if img_path.exists():
|
| 186 |
+
template_img = cv2.imread(str(img_path), cv2.IMREAD_GRAYSCALE)
|
| 187 |
+
if template_img is not None:
|
| 188 |
+
# Convert "blank" back to -1 for internal use
|
| 189 |
+
digit_value = entry["digit_value"]
|
| 190 |
+
if digit_value == "blank":
|
| 191 |
+
digit_value = -1
|
| 192 |
+
elif isinstance(digit_value, str):
|
| 193 |
+
digit_value = int(digit_value)
|
| 194 |
+
|
| 195 |
+
# Handle position (v2) or infer from old format (v1)
|
| 196 |
+
is_tens = entry["is_tens_digit"]
|
| 197 |
+
if version >= 2:
|
| 198 |
+
position = entry.get("position", "left" if is_tens else "right")
|
| 199 |
+
else:
|
| 200 |
+
# V1 format: tens → left, ones → right (old behavior)
|
| 201 |
+
position = "left" if is_tens else "right"
|
| 202 |
+
|
| 203 |
+
template = DigitTemplate(
|
| 204 |
+
digit_value=digit_value,
|
| 205 |
+
is_tens_digit=is_tens,
|
| 206 |
+
position=position,
|
| 207 |
+
template=template_img,
|
| 208 |
+
sample_count=entry.get("sample_count", 1),
|
| 209 |
+
avg_confidence=entry.get("avg_confidence", 1.0),
|
| 210 |
+
)
|
| 211 |
+
self.add_template(template)
|
| 212 |
+
|
| 213 |
+
logger.info("Loaded %d templates from %s (v%d format)", len(self.templates), input_path, version)
|
| 214 |
+
return True
|
src/tracking/__init__.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tracking modules for cross-frame state management.
|
| 2 |
+
|
| 3 |
+
This package contains components that track state across multiple frames
|
| 4 |
+
to identify play boundaries and other temporal events.
|
| 5 |
+
|
| 6 |
+
Public API:
|
| 7 |
+
- TrackPlayState: Main state machine for play tracking (facade over internal modules)
|
| 8 |
+
- PlayMerger: Merges overlapping plays
|
| 9 |
+
- ClockResetIdentifier: Post-hoc analysis of 40→25 clock resets
|
| 10 |
+
- Models: PlayEvent, PlayState, PlayTrackingState, etc.
|
| 11 |
+
|
| 12 |
+
Internal modules (not exported):
|
| 13 |
+
- play_lifecycle: Play start/end/reset operations
|
| 14 |
+
- play_identification_checks: Clock analysis for play boundary identification
|
| 15 |
+
- state_handlers: State machine transition logic
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
from .models import PlayEvent, PlayState, PlayTrackingState, TrackPlayStateConfig, TimeoutInfo, ClockResetStats
|
| 19 |
+
from .play_state import TrackPlayState
|
| 20 |
+
from .play_merger import PlayMerger
|
| 21 |
+
from .clock_reset_identifier import ClockResetIdentifier
|
| 22 |
+
|
| 23 |
+
__all__ = [
|
| 24 |
+
# Models
|
| 25 |
+
"PlayEvent",
|
| 26 |
+
"PlayState",
|
| 27 |
+
"PlayTrackingState",
|
| 28 |
+
"TrackPlayStateConfig",
|
| 29 |
+
"TimeoutInfo",
|
| 30 |
+
"ClockResetStats",
|
| 31 |
+
# State machine
|
| 32 |
+
"TrackPlayState",
|
| 33 |
+
# Merger
|
| 34 |
+
"PlayMerger",
|
| 35 |
+
# Clock reset identification
|
| 36 |
+
"ClockResetIdentifier",
|
| 37 |
+
]
|