andytaylor-smg commited on
Commit
d12d00d
·
1 Parent(s): 21c8945

move to basemodel

Browse files
src/detection/models.py CHANGED
@@ -1,32 +1,31 @@
1
  """
2
- Dataclass models for per-frame detection.
3
 
4
  These models represent the results of detecting various elements in video frames:
5
  scorebug presence and timeout indicators.
6
  """
7
 
8
- from dataclasses import dataclass
9
  from typing import Optional, Tuple, List
10
 
 
11
 
12
- @dataclass
13
- class ScorebugDetection:
14
  """Results from scorebug detection."""
15
 
16
- detected: bool # Whether scorebug was detected
17
- confidence: float # Confidence score (0.0 to 1.0)
18
- bbox: Optional[Tuple[int, int, int, int]] = None # Bounding box (x, y, width, height)
19
- method: str = "unknown" # Detection method used
20
- left_confidence: Optional[float] = None # Left half confidence (when split detection enabled)
21
- right_confidence: Optional[float] = None # Right half confidence (when split detection enabled)
22
 
23
 
24
- @dataclass
25
- class TimeoutRegionConfig:
26
  """Configuration for a team's timeout indicator region."""
27
 
28
- team_name: str # "home" or "away"
29
- bbox: Tuple[int, int, int, int] # x, y, width, height for the 3-oval group
30
 
31
  def to_dict(self) -> dict:
32
  """Convert to dictionary for JSON serialization."""
@@ -42,12 +41,11 @@ class TimeoutRegionConfig:
42
  return cls(team_name=data["team_name"], bbox=bbox)
43
 
44
 
45
- @dataclass
46
- class TimeoutReading:
47
  """Results from timeout indicator reading."""
48
 
49
- home_timeouts: int # 0-3 timeouts remaining
50
- away_timeouts: int # 0-3 timeouts remaining
51
- confidence: float # Overall confidence in reading
52
- home_oval_states: Optional[List[bool]] = None # True = white (available), False = dark (used)
53
- away_oval_states: Optional[List[bool]] = None # True = white (available), False = dark (used)
 
1
  """
2
+ Pydantic models for per-frame detection.
3
 
4
  These models represent the results of detecting various elements in video frames:
5
  scorebug presence and timeout indicators.
6
  """
7
 
 
8
  from typing import Optional, Tuple, List
9
 
10
+ from pydantic import BaseModel, Field
11
 
12
+
13
+ class ScorebugDetection(BaseModel):
14
  """Results from scorebug detection."""
15
 
16
+ detected: bool = Field(..., description="Whether scorebug was detected")
17
+ confidence: float = Field(..., description="Confidence score (0.0 to 1.0)")
18
+ bbox: Optional[Tuple[int, int, int, int]] = Field(None, description="Bounding box (x, y, width, height)")
19
+ method: str = Field("unknown", description="Detection method used")
20
+ left_confidence: Optional[float] = Field(None, description="Left half confidence (when split detection enabled)")
21
+ right_confidence: Optional[float] = Field(None, description="Right half confidence (when split detection enabled)")
22
 
23
 
24
+ class TimeoutRegionConfig(BaseModel):
 
25
  """Configuration for a team's timeout indicator region."""
26
 
27
+ team_name: str = Field(..., description="'home' or 'away'")
28
+ bbox: Tuple[int, int, int, int] = Field(..., description="x, y, width, height for the 3-oval group")
29
 
30
  def to_dict(self) -> dict:
31
  """Convert to dictionary for JSON serialization."""
 
41
  return cls(team_name=data["team_name"], bbox=bbox)
42
 
43
 
44
+ class TimeoutReading(BaseModel):
 
45
  """Results from timeout indicator reading."""
46
 
47
+ home_timeouts: int = Field(..., description="0-3 timeouts remaining")
48
+ away_timeouts: int = Field(..., description="0-3 timeouts remaining")
49
+ confidence: float = Field(..., description="Overall confidence in reading")
50
+ home_oval_states: Optional[List[bool]] = Field(None, description="True = white (available), False = dark (used)")
51
+ away_oval_states: Optional[List[bool]] = Field(None, description="True = white (available), False = dark (used)")
src/pipeline/models.py CHANGED
@@ -1,5 +1,5 @@
1
  """
2
- Dataclass models for the pipeline module.
3
 
4
  This module contains all the data structures used by the pipeline components
5
  for configuration, intermediate results, and final output.
@@ -9,10 +9,9 @@ Streaming processing is used for optimal performance (read frame -> process imme
9
  See docs/ocr_to_template_migration.md for details.
10
  """
11
 
12
- from dataclasses import dataclass, field
13
  from typing import Optional, List, Dict, Any, Tuple
14
 
15
- from pydantic import BaseModel
16
 
17
 
18
  # =============================================================================
@@ -20,8 +19,7 @@ from pydantic import BaseModel
20
  # =============================================================================
21
 
22
 
23
- @dataclass
24
- class DetectionConfig:
25
  """Configuration for play detection pipeline.
26
 
27
  Uses template matching for play clock reading (~34x faster than OCR).
@@ -29,19 +27,19 @@ class DetectionConfig:
29
  then streaming detection processes each frame immediately via template matching.
30
  """
31
 
32
- video_path: str # Path to video file
33
- template_path: str # Path to scorebug template
34
- clock_region_config_path: str # Path to play clock region config
35
- start_time: float = 0.0 # Start time in seconds
36
- end_time: Optional[float] = None # End time in seconds (None = full video)
37
- frame_interval: float = 0.5 # Interval between frame samples (seconds)
38
- use_split_detection: bool = True # Enable split-half scorebug detection for robustness to partial overlays
39
  # Template matching configuration
40
- template_collection_frames: int = 400 # Number of frames to use for building digit templates via OCR
41
- digit_template_path: Optional[str] = None # Path to pre-built digit templates (skip collection phase if provided)
42
  # Fixed coordinates mode - skip scorebug detection entirely for maximum speed
43
- fixed_playclock_coords: Optional[Tuple[int, int, int, int]] = None # (x, y, w, h) absolute play clock coords
44
- fixed_scorebug_coords: Optional[Tuple[int, int, int, int]] = None # (x, y, w, h) scorebug region (for metadata)
45
 
46
 
47
  # =============================================================================
@@ -49,19 +47,20 @@ class DetectionConfig:
49
  # =============================================================================
50
 
51
 
52
- @dataclass
53
- class VideoContext:
54
  """Container for video properties and processing state."""
55
 
56
- cap: Any # cv2.VideoCapture (using Any to avoid cv2 typing issues)
57
- fps: float # Frames per second
58
- total_frames: int # Total frame count
59
- duration: float # Video duration in seconds
60
- start_time: float # Segment start time
61
- end_time: float # Segment end time
62
- frame_skip: int # Frames to skip between samples
63
- start_frame: int # First frame to process
64
- end_frame: int # Last frame to process
 
 
65
 
66
 
67
  # =============================================================================
@@ -69,20 +68,19 @@ class VideoContext:
69
  # =============================================================================
70
 
71
 
72
- @dataclass
73
- class DetectionResult:
74
  """Results from play detection pipeline."""
75
 
76
- video: str # Video filename
77
- segment_start: float # Segment start time
78
- segment_end: float # Segment end time
79
- total_frames_processed: int # Number of frames analyzed
80
- frames_with_scorebug: int # Frames where scorebug was detected
81
- frames_with_clock: int # Frames where clock was read successfully
82
- plays: List[Dict[str, Any]] = field(default_factory=list) # Detected plays as dicts
83
- stats: Dict[str, Any] = field(default_factory=dict) # Summary statistics
84
- timing: Dict[str, float] = field(default_factory=dict) # Timing breakdown by section
85
- config: Dict[str, Any] = field(default_factory=dict) # Configuration used for this run (regions, thresholds, etc.)
86
 
87
 
88
  # =============================================================================
@@ -97,12 +95,12 @@ class ChunkResult(BaseModel):
97
  when passing data between worker processes.
98
  """
99
 
100
- chunk_id: int # Identifier for this chunk
101
- start_time: float # Chunk start time in seconds
102
- end_time: float # Chunk end time in seconds
103
- frames_processed: int # Total frames processed in this chunk
104
- frames_with_scorebug: int # Frames where scorebug was detected
105
- frames_with_clock: int # Frames where clock was successfully read
106
- frame_data: List[Dict[str, Any]] # Per-frame detection results
107
- io_time: float # Time spent on video I/O operations
108
- processing_time: float # Total processing time for this chunk
 
1
  """
2
+ Pydantic models for the pipeline module.
3
 
4
  This module contains all the data structures used by the pipeline components
5
  for configuration, intermediate results, and final output.
 
9
  See docs/ocr_to_template_migration.md for details.
10
  """
11
 
 
12
  from typing import Optional, List, Dict, Any, Tuple
13
 
14
+ from pydantic import BaseModel, ConfigDict, Field
15
 
16
 
17
  # =============================================================================
 
19
  # =============================================================================
20
 
21
 
22
+ class DetectionConfig(BaseModel):
 
23
  """Configuration for play detection pipeline.
24
 
25
  Uses template matching for play clock reading (~34x faster than OCR).
 
27
  then streaming detection processes each frame immediately via template matching.
28
  """
29
 
30
+ video_path: str = Field(..., description="Path to video file")
31
+ template_path: str = Field(..., description="Path to scorebug template")
32
+ clock_region_config_path: str = Field(..., description="Path to play clock region config")
33
+ start_time: float = Field(0.0, description="Start time in seconds")
34
+ end_time: Optional[float] = Field(None, description="End time in seconds (None = full video)")
35
+ frame_interval: float = Field(0.5, description="Interval between frame samples (seconds)")
36
+ use_split_detection: bool = Field(True, description="Enable split-half scorebug detection for robustness to partial overlays")
37
  # Template matching configuration
38
+ template_collection_frames: int = Field(400, description="Number of frames to use for building digit templates via OCR")
39
+ digit_template_path: Optional[str] = Field(None, description="Path to pre-built digit templates (skip collection phase if provided)")
40
  # Fixed coordinates mode - skip scorebug detection entirely for maximum speed
41
+ fixed_playclock_coords: Optional[Tuple[int, int, int, int]] = Field(None, description="(x, y, w, h) absolute play clock coords")
42
+ fixed_scorebug_coords: Optional[Tuple[int, int, int, int]] = Field(None, description="(x, y, w, h) scorebug region (for metadata)")
43
 
44
 
45
  # =============================================================================
 
47
  # =============================================================================
48
 
49
 
50
+ class VideoContext(BaseModel):
 
51
  """Container for video properties and processing state."""
52
 
53
+ model_config = ConfigDict(arbitrary_types_allowed=True)
54
+
55
+ cap: Any = Field(..., description="cv2.VideoCapture (using Any to avoid cv2 typing issues)")
56
+ fps: float = Field(..., description="Frames per second")
57
+ total_frames: int = Field(..., description="Total frame count")
58
+ duration: float = Field(..., description="Video duration in seconds")
59
+ start_time: float = Field(..., description="Segment start time")
60
+ end_time: float = Field(..., description="Segment end time")
61
+ frame_skip: int = Field(..., description="Frames to skip between samples")
62
+ start_frame: int = Field(..., description="First frame to process")
63
+ end_frame: int = Field(..., description="Last frame to process")
64
 
65
 
66
  # =============================================================================
 
68
  # =============================================================================
69
 
70
 
71
+ class DetectionResult(BaseModel):
 
72
  """Results from play detection pipeline."""
73
 
74
+ video: str = Field(..., description="Video filename")
75
+ segment_start: float = Field(..., description="Segment start time")
76
+ segment_end: float = Field(..., description="Segment end time")
77
+ total_frames_processed: int = Field(..., description="Number of frames analyzed")
78
+ frames_with_scorebug: int = Field(..., description="Frames where scorebug was detected")
79
+ frames_with_clock: int = Field(..., description="Frames where clock was read successfully")
80
+ plays: List[Dict[str, Any]] = Field(default_factory=list, description="Detected plays as dicts")
81
+ stats: Dict[str, Any] = Field(default_factory=dict, description="Summary statistics")
82
+ timing: Dict[str, float] = Field(default_factory=dict, description="Timing breakdown by section")
83
+ config: Dict[str, Any] = Field(default_factory=dict, description="Configuration used for this run (regions, thresholds, etc.)")
84
 
85
 
86
  # =============================================================================
 
95
  when passing data between worker processes.
96
  """
97
 
98
+ chunk_id: int = Field(..., description="Identifier for this chunk")
99
+ start_time: float = Field(..., description="Chunk start time in seconds")
100
+ end_time: float = Field(..., description="Chunk end time in seconds")
101
+ frames_processed: int = Field(..., description="Total frames processed in this chunk")
102
+ frames_with_scorebug: int = Field(..., description="Frames where scorebug was detected")
103
+ frames_with_clock: int = Field(..., description="Frames where clock was successfully read")
104
+ frame_data: List[Dict[str, Any]] = Field(..., description="Per-frame detection results")
105
+ io_time: float = Field(..., description="Time spent on video I/O operations")
106
+ processing_time: float = Field(..., description="Total processing time for this chunk")
src/readers/models.py CHANGED
@@ -1,40 +1,38 @@
1
  """
2
- Dataclass models for value reading results.
3
 
4
  These models represent the outputs of reading values from detected regions:
5
  play clock readings, template match results, etc.
6
  """
7
 
8
- from dataclasses import dataclass
9
  from typing import Optional
10
 
 
11
 
12
- @dataclass
13
- class PlayClockReading:
14
  """Result from play clock OCR reading."""
15
 
16
- detected: bool # Whether a valid play clock value was detected
17
- value: Optional[int] # Play clock value (0-40 seconds), None if unreadable
18
- confidence: float # Confidence score (0.0 to 1.0)
19
- raw_text: str # Raw OCR output for debugging
20
 
21
 
22
- @dataclass
23
- class TemplateMatchResult:
24
  """Result from template matching for a single digit."""
25
 
26
- digit_value: int # Matched digit value (-1 for blank, 0-9 for digits)
27
- confidence: float # Match confidence (0.0 to 1.0)
28
- is_valid: bool # Whether match confidence exceeds threshold
29
 
30
 
31
- @dataclass
32
- class TemplatePlayClockReading:
33
  """Result from template-based play clock reading."""
34
 
35
- detected: bool # Whether a valid clock value was read
36
- value: Optional[int] # Clock value (0-40), None if unreadable
37
- confidence: float # Overall confidence score
38
- tens_match: Optional[TemplateMatchResult] # Tens digit match result
39
- ones_match: Optional[TemplateMatchResult] # Ones digit match result
40
- method: str # "template_single", "template_double", or "template"
 
1
  """
2
+ Pydantic models for value reading results.
3
 
4
  These models represent the outputs of reading values from detected regions:
5
  play clock readings, template match results, etc.
6
  """
7
 
 
8
  from typing import Optional
9
 
10
+ from pydantic import BaseModel, Field
11
 
12
+
13
+ class PlayClockReading(BaseModel):
14
  """Result from play clock OCR reading."""
15
 
16
+ detected: bool = Field(..., description="Whether a valid play clock value was detected")
17
+ value: Optional[int] = Field(..., description="Play clock value (0-40 seconds), None if unreadable")
18
+ confidence: float = Field(..., description="Confidence score (0.0 to 1.0)")
19
+ raw_text: str = Field(..., description="Raw OCR output for debugging")
20
 
21
 
22
+ class TemplateMatchResult(BaseModel):
 
23
  """Result from template matching for a single digit."""
24
 
25
+ digit_value: int = Field(..., description="Matched digit value (-1 for blank, 0-9 for digits)")
26
+ confidence: float = Field(..., description="Match confidence (0.0 to 1.0)")
27
+ is_valid: bool = Field(..., description="Whether match confidence exceeds threshold")
28
 
29
 
30
+ class TemplatePlayClockReading(BaseModel):
 
31
  """Result from template-based play clock reading."""
32
 
33
+ detected: bool = Field(..., description="Whether a valid clock value was read")
34
+ value: Optional[int] = Field(..., description="Clock value (0-40), None if unreadable")
35
+ confidence: float = Field(..., description="Overall confidence score")
36
+ tens_match: Optional[TemplateMatchResult] = Field(..., description="Tens digit match result")
37
+ ones_match: Optional[TemplateMatchResult] = Field(..., description="Ones digit match result")
38
+ method: str = Field(..., description="'template_single', 'template_double', or 'template'")
src/setup/models.py CHANGED
@@ -1,49 +1,49 @@
1
  """
2
- Dataclass models for the setup/template building phase.
3
 
4
  These models are used during the initial template collection phase where
5
  OCR-labeled samples are collected and averaged into digit templates.
6
  Also includes region configuration models for setup.
7
  """
8
 
9
- from dataclasses import dataclass
10
-
11
  import numpy as np
 
12
 
13
 
14
- @dataclass
15
- class DigitSample:
16
  """A single digit sample extracted from a play clock region."""
17
 
18
- digit_value: int # 0-9 for ones digit, 0-4 for tens digit, -1 for blank
19
- is_tens_digit: bool # True if this is the tens place digit
20
- position: str # "left", "center", or "right" - where digit appears in region
21
- image: np.ndarray # The digit image (grayscale, preprocessed)
22
- source_clock_value: int # The full clock value this was extracted from
23
- timestamp: float # Video timestamp where this was captured
24
- confidence: float # OCR confidence for this sample
 
 
25
 
26
 
27
- @dataclass
28
- class DigitTemplate:
29
  """A template for matching a specific digit."""
30
 
31
- digit_value: int # 0-9 for ones, 0-4 for tens, -1 for blank
32
- is_tens_digit: bool # True if this is a tens place template
33
- position: str # "left", "center", or "right" - where digit appears in region
34
- template: np.ndarray # The template image (grayscale)
35
- sample_count: int # Number of samples used to build this template
36
- avg_confidence: float # Average OCR confidence of source samples
 
 
37
 
38
 
39
- @dataclass
40
- class PlayClockRegionConfig:
41
  """Configuration for the play clock region relative to the scorebug bounding box."""
42
 
43
- x_offset: int # X offset from scorebug left edge
44
- y_offset: int # Y offset from scorebug top edge
45
- width: int # Width of play clock region
46
- height: int # Height of play clock region
47
- source_video: str # Video used to identify region
48
- scorebug_template: str # Template used for scorebug detection
49
- samples_used: int # Number of frames used to verify region
 
1
  """
2
+ Pydantic models for the setup/template building phase.
3
 
4
  These models are used during the initial template collection phase where
5
  OCR-labeled samples are collected and averaged into digit templates.
6
  Also includes region configuration models for setup.
7
  """
8
 
 
 
9
  import numpy as np
10
+ from pydantic import BaseModel, ConfigDict, Field
11
 
12
 
13
+ class DigitSample(BaseModel):
 
14
  """A single digit sample extracted from a play clock region."""
15
 
16
+ model_config = ConfigDict(arbitrary_types_allowed=True)
17
+
18
+ digit_value: int = Field(..., description="0-9 for ones digit, 0-4 for tens digit, -1 for blank")
19
+ is_tens_digit: bool = Field(..., description="True if this is the tens place digit")
20
+ position: str = Field(..., description="'left', 'center', or 'right' - where digit appears in region")
21
+ image: np.ndarray = Field(..., description="The digit image (grayscale, preprocessed)")
22
+ source_clock_value: int = Field(..., description="The full clock value this was extracted from")
23
+ timestamp: float = Field(..., description="Video timestamp where this was captured")
24
+ confidence: float = Field(..., description="OCR confidence for this sample")
25
 
26
 
27
+ class DigitTemplate(BaseModel):
 
28
  """A template for matching a specific digit."""
29
 
30
+ model_config = ConfigDict(arbitrary_types_allowed=True)
31
+
32
+ digit_value: int = Field(..., description="0-9 for ones, 0-4 for tens, -1 for blank")
33
+ is_tens_digit: bool = Field(..., description="True if this is a tens place template")
34
+ position: str = Field(..., description="'left', 'center', or 'right' - where digit appears in region")
35
+ template: np.ndarray = Field(..., description="The template image (grayscale)")
36
+ sample_count: int = Field(..., description="Number of samples used to build this template")
37
+ avg_confidence: float = Field(..., description="Average OCR confidence of source samples")
38
 
39
 
40
+ class PlayClockRegionConfig(BaseModel):
 
41
  """Configuration for the play clock region relative to the scorebug bounding box."""
42
 
43
+ x_offset: int = Field(..., description="X offset from scorebug left edge")
44
+ y_offset: int = Field(..., description="Y offset from scorebug top edge")
45
+ width: int = Field(..., description="Width of play clock region")
46
+ height: int = Field(..., description="Height of play clock region")
47
+ source_video: str = Field(..., description="Video used to identify region")
48
+ scorebug_template: str = Field(..., description="Template used for scorebug detection")
49
+ samples_used: int = Field(..., description="Number of frames used to verify region")
src/tracking/models.py CHANGED
@@ -1,24 +1,24 @@
1
  """
2
- Dataclass models for play tracking.
3
 
4
  These models represent detected plays and their temporal boundaries.
5
  """
6
 
7
- from dataclasses import dataclass
8
  from typing import Optional
9
 
 
10
 
11
- @dataclass
12
- class PlayEvent:
13
  """Represents a detected play with start and end times."""
14
 
15
- play_number: int # Sequential play number
16
- start_time: float # Video timestamp (seconds) when play started
17
- end_time: float # Video timestamp (seconds) when play ended - from backward counting
18
- confidence: float # Overall confidence score
19
- start_method: str # How start was detected: "clock_reset", "clock_reset_25", "clock_freeze"
20
- end_method: str # How end was detected: "backward_calc" (primary), "direct_detect" (secondary)
21
- direct_end_time: Optional[float] = None # End time from direct detection (for comparison)
22
- start_clock_value: Optional[int] = None # Clock value at start detection
23
- end_clock_value: Optional[int] = None # Clock value used for backward calculation
24
- play_type: str = "normal" # Type of play: "normal", "special" (punt/fg/xp after 25-second reset)
 
1
  """
2
+ Pydantic models for play tracking.
3
 
4
  These models represent detected plays and their temporal boundaries.
5
  """
6
 
 
7
  from typing import Optional
8
 
9
+ from pydantic import BaseModel, Field
10
 
11
+
12
+ class PlayEvent(BaseModel):
13
  """Represents a detected play with start and end times."""
14
 
15
+ play_number: int = Field(..., description="Sequential play number")
16
+ start_time: float = Field(..., description="Video timestamp (seconds) when play started")
17
+ end_time: float = Field(..., description="Video timestamp (seconds) when play ended - from backward counting")
18
+ confidence: float = Field(..., description="Overall confidence score")
19
+ start_method: str = Field(..., description="How start was detected: 'clock_reset', 'clock_reset_25', 'clock_freeze'")
20
+ end_method: str = Field(..., description="How end was detected: 'backward_calc' (primary), 'direct_detect' (secondary)")
21
+ direct_end_time: Optional[float] = Field(None, description="End time from direct detection (for comparison)")
22
+ start_clock_value: Optional[int] = Field(None, description="Clock value at start detection")
23
+ end_clock_value: Optional[int] = Field(None, description="Clock value used for backward calculation")
24
+ play_type: str = Field("normal", description="Type of play: 'normal', 'special' (punt/fg/xp after 25-second reset)")
src/video/ffmpeg_ops.py CHANGED
@@ -17,11 +17,12 @@ import subprocess
17
  import tempfile
18
  import time
19
  from concurrent.futures import ThreadPoolExecutor, as_completed
20
- from dataclasses import dataclass
21
  from enum import Enum
22
  from pathlib import Path
23
  from typing import Any, Dict, List, Optional, Tuple
24
 
 
 
25
  logger = logging.getLogger(__name__)
26
 
27
 
@@ -33,14 +34,13 @@ class ClipMethod(Enum):
33
  ULTRAFAST = "ultrafast" # Frame-accurate, faster encoding
34
 
35
 
36
- @dataclass
37
- class ClipInfo:
38
  """Information about a clip to extract."""
39
 
40
- start_time: float # Start time in seconds
41
- end_time: float # End time in seconds
42
- output_path: Path # Path for output file
43
- play_number: Optional[int] = None # Optional play number for logging
44
 
45
 
46
  def extract_clip_stream_copy(
 
17
  import tempfile
18
  import time
19
  from concurrent.futures import ThreadPoolExecutor, as_completed
 
20
  from enum import Enum
21
  from pathlib import Path
22
  from typing import Any, Dict, List, Optional, Tuple
23
 
24
+ from pydantic import BaseModel, Field
25
+
26
  logger = logging.getLogger(__name__)
27
 
28
 
 
34
  ULTRAFAST = "ultrafast" # Frame-accurate, faster encoding
35
 
36
 
37
+ class ClipInfo(BaseModel):
 
38
  """Information about a clip to extract."""
39
 
40
+ start_time: float = Field(..., description="Start time in seconds")
41
+ end_time: float = Field(..., description="End time in seconds")
42
+ output_path: Path = Field(..., description="Path for output file")
43
+ play_number: Optional[int] = Field(None, description="Optional play number for logging")
44
 
45
 
46
  def extract_clip_stream_copy(