| """ |
| Video Classification and NFL Play Analysis Module. |
| |
| This module handles: |
| 1. X3D-based video classification using Kinetics-400 pretrained models |
| 2. NFL-specific play state analysis (play_active, play_action, non_play) |
| 3. Play boundary detection across video sequences |
| 4. Video preprocessing and frame extraction |
| |
| Key Components: |
| - VideoClassifier: Main class for video classification |
| - PlayAnalyzer: NFL-specific play state detection |
| - BoundaryDetector: Sequence analysis for play start/end detection |
| """ |
|
|
| import os |
| import json |
| import urllib.request |
| from typing import List, Tuple, Optional, Dict, Any |
|
|
| import torch |
| import numpy as np |
| from pytorchvideo.data.encoded_video import EncodedVideo |
|
|
| from config import ( |
| VIDEO_MODEL_NAME, DEVICE, VIDEO_CLIP_DURATION, VIDEO_NUM_SAMPLES, VIDEO_SIZE, |
| KINETICS_LABELS_URL, KINETICS_LABELS_PATH, VIDEO_MEAN, VIDEO_STD, |
| PLAY_CONFIDENCE_THRESHOLD, PLAY_BOUNDARY_WINDOW_SIZE, |
| PLAY_START_INDICATORS, PLAY_ACTION_INDICATORS, NON_PLAY_INDICATORS, |
| ENABLE_DEBUG_PRINTS, ENABLE_FRAME_SHAPE_DEBUG, TORCH_HUB_CACHE_DIR |
| ) |
|
|
|
|
| class VideoClassifier: |
| """ |
| X3D-based video classifier for action recognition. |
| |
| Uses PyTorchVideo's pretrained X3D models trained on Kinetics-400 dataset. |
| Supports multiple model sizes (xs, s, m, l) with different speed/accuracy tradeoffs. |
| """ |
| |
| def __init__(self, model_name: str = VIDEO_MODEL_NAME, device: torch.device = DEVICE): |
| """ |
| Initialize the video classifier. |
| |
| Args: |
| model_name: X3D model variant (x3d_xs, x3d_s, x3d_m, x3d_l) |
| device: PyTorch device for inference |
| """ |
| self.model_name = model_name |
| self.device = device |
| self.model = None |
| self.labels = None |
| |
| |
| self._load_model() |
| self._load_kinetics_labels() |
| |
| def _load_model(self) -> None: |
| """Load and prepare the X3D model for inference.""" |
| if ENABLE_DEBUG_PRINTS: |
| print(f"Loading X3D model: {self.model_name}") |
| |
| |
| original_cache_dir = None |
| if TORCH_HUB_CACHE_DIR: |
| original_cache_dir = torch.hub.get_dir() |
| torch.hub.set_dir(TORCH_HUB_CACHE_DIR) |
| |
| try: |
| self.model = torch.hub.load( |
| "facebookresearch/pytorchvideo", |
| self.model_name, |
| pretrained=True |
| ).to(self.device).eval() |
| finally: |
| |
| if original_cache_dir is not None: |
| torch.hub.set_dir(original_cache_dir) |
| |
| if ENABLE_DEBUG_PRINTS: |
| print(f"Model loaded successfully on {self.device}") |
| |
| def _load_kinetics_labels(self) -> None: |
| """Download and load Kinetics-400 class labels.""" |
| |
| if not os.path.exists(KINETICS_LABELS_PATH): |
| if ENABLE_DEBUG_PRINTS: |
| print("Downloading Kinetics-400 labels...") |
| urllib.request.urlretrieve(KINETICS_LABELS_URL, KINETICS_LABELS_PATH) |
| |
| |
| with open(KINETICS_LABELS_PATH, "r") as f: |
| raw_labels = json.load(f) |
| |
| |
| if isinstance(raw_labels, list): |
| self.labels = {i: raw_labels[i] for i in range(len(raw_labels))} |
| elif isinstance(raw_labels, dict): |
| |
| if all(k.isdigit() for k in raw_labels.keys()): |
| self.labels = {int(k): v for k, v in raw_labels.items()} |
| else: |
| |
| self.labels = {} |
| for k, v in raw_labels.items(): |
| try: |
| self.labels[int(v)] = k |
| except (ValueError, TypeError): |
| continue |
| else: |
| raise ValueError(f"Unexpected label format in {KINETICS_LABELS_PATH}") |
| |
| if ENABLE_DEBUG_PRINTS: |
| print(f"Loaded {len(self.labels)} Kinetics-400 labels") |
| |
| def preprocess_video(self, frames: torch.Tensor, |
| num_samples: int = VIDEO_NUM_SAMPLES, |
| size: int = VIDEO_SIZE) -> torch.Tensor: |
| """ |
| Preprocess video frames for X3D model input. |
| |
| Args: |
| frames: Video tensor of shape (C, T, H, W) |
| num_samples: Number of frames to sample |
| size: Spatial size for center crop |
| |
| Returns: |
| Preprocessed tensor ready for model input |
| """ |
| C, T, H, W = frames.shape |
| |
| |
| video = frames.permute(1, 0, 2, 3).float() / 255.0 |
| |
| |
| indices = torch.linspace(0, T - 1, num_samples).long() |
| clip = video[indices] |
| |
| |
| top = max((H - size) // 2, 0) |
| left = max((W - size) // 2, 0) |
| clip = clip[:, :, top:top+size, left:left+size] |
| |
| |
| clip = clip.permute(1, 0, 2, 3).unsqueeze(0) |
| |
| |
| mean = torch.tensor(VIDEO_MEAN, device=self.device).view(1, 3, 1, 1, 1) |
| std = torch.tensor(VIDEO_STD, device=self.device).view(1, 3, 1, 1, 1) |
| clip = (clip - mean) / std |
| |
| return clip |
| |
| def classify_clip(self, video_path: str, top_k: int = 5) -> List[Tuple[str, float]]: |
| """ |
| Classify a single video clip and return top-k predictions. |
| |
| Args: |
| video_path: Path to video file |
| top_k: Number of top predictions to return |
| |
| Returns: |
| List of (label, confidence) tuples sorted by confidence |
| """ |
| try: |
| |
| video = EncodedVideo.from_path(video_path) |
| clip_data = video.get_clip(0, VIDEO_CLIP_DURATION) |
| frames = clip_data["video"] |
| |
| if frames is None: |
| if ENABLE_DEBUG_PRINTS: |
| print(f"[ERROR] Failed to load video frames from {video_path}") |
| return [] |
| |
| if ENABLE_FRAME_SHAPE_DEBUG: |
| print(f"[DEBUG] Loaded video frames shape: {frames.shape}") |
| |
| |
| input_tensor = self.preprocess_video(frames).to(self.device) |
| |
| with torch.no_grad(): |
| logits = self.model(input_tensor) |
| probabilities = torch.softmax(logits, dim=1)[0] |
| |
| |
| top_k_probs, top_k_indices = torch.topk(probabilities, k=top_k) |
| |
| results = [] |
| for idx_tensor, prob in zip(top_k_indices, top_k_probs): |
| idx = idx_tensor.item() |
| label = self.labels.get(idx, f"Class_{idx}") |
| results.append((label, float(prob))) |
| |
| return results |
| |
| except Exception as e: |
| if ENABLE_DEBUG_PRINTS: |
| print(f"[ERROR] Failed to process {video_path}: {e}") |
| return [] |
|
|
|
|
| class PlayAnalyzer: |
| """ |
| NFL-specific play state analyzer. |
| |
| Analyzes video classification results to determine: |
| - play_active: Active football plays (passing, kicking) |
| - play_action: Football-related actions (catching, throwing) |
| - non_play: Non-game activities (applauding, commentary) |
| - unknown: Low confidence or ambiguous clips |
| """ |
| |
| def __init__(self, confidence_threshold: float = PLAY_CONFIDENCE_THRESHOLD): |
| """ |
| Initialize play analyzer. |
| |
| Args: |
| confidence_threshold: Minimum confidence for state classification |
| """ |
| self.confidence_threshold = confidence_threshold |
| self.play_start_indicators = PLAY_START_INDICATORS |
| self.play_action_indicators = PLAY_ACTION_INDICATORS |
| self.non_play_indicators = NON_PLAY_INDICATORS |
| |
| def analyze_play_state(self, predictions: List[Tuple[str, float]]) -> Tuple[str, float]: |
| """ |
| Analyze video classification predictions to determine play state. |
| |
| Args: |
| predictions: List of (label, confidence) tuples from video classifier |
| |
| Returns: |
| Tuple of (play_state, confidence_score) |
| """ |
| if not predictions: |
| return "unknown", 0.0 |
| |
| |
| play_start_score = 0.0 |
| play_action_score = 0.0 |
| non_play_score = 0.0 |
| |
| for label, confidence in predictions: |
| |
| clean_label = label.replace('"', '').lower() |
| |
| |
| if self._matches_indicators(clean_label, self.play_start_indicators): |
| play_start_score += confidence * 2.0 |
| elif self._matches_indicators(clean_label, self.play_action_indicators): |
| play_action_score += confidence |
| elif self._matches_indicators(clean_label, self.non_play_indicators): |
| non_play_score += confidence |
| |
| |
| max_score = max(play_start_score, play_action_score, non_play_score) |
| |
| if max_score < self.confidence_threshold: |
| return "unknown", max_score |
| elif play_start_score == max_score: |
| return "play_active", play_start_score |
| elif play_action_score == max_score: |
| return "play_action", play_action_score |
| else: |
| return "non_play", non_play_score |
| |
| def _matches_indicators(self, label: str, indicators: List[str]) -> bool: |
| """Check if label matches any indicator in the list.""" |
| return any(indicator.lower() in label for indicator in indicators) |
|
|
|
|
| class BoundaryDetector: |
| """ |
| Play boundary detection for video sequences. |
| |
| Analyzes sequences of play states to detect: |
| - Play start points (non_play -> play_active transitions) |
| - Play end points (play_active -> non_play transitions) |
| """ |
| |
| def __init__(self, window_size: int = PLAY_BOUNDARY_WINDOW_SIZE): |
| """ |
| Initialize boundary detector. |
| |
| Args: |
| window_size: Window size for sequence analysis |
| """ |
| self.window_size = window_size |
| self.play_analyzer = PlayAnalyzer() |
| |
| def detect_boundaries(self, clip_results: List[List[Tuple[str, float]]]) -> List[Tuple[str, int, float]]: |
| """ |
| Detect play boundaries across a sequence of video clips. |
| |
| Args: |
| clip_results: List of classification results for each clip |
| |
| Returns: |
| List of (boundary_type, clip_index, confidence) tuples |
| """ |
| if len(clip_results) < self.window_size: |
| return [] |
| |
| |
| play_states = [] |
| for results in clip_results: |
| state, confidence = self.play_analyzer.analyze_play_state(results) |
| play_states.append((state, confidence)) |
| |
| boundaries = [] |
| |
| |
| for i in range(len(play_states) - 1): |
| current_state, current_conf = play_states[i] |
| next_state, next_conf = play_states[i + 1] |
| |
| |
| if (current_state in ["non_play", "unknown"] and |
| next_state in ["play_active", "play_action"] and |
| next_conf > self.play_analyzer.confidence_threshold): |
| boundaries.append(("play_start", i + 1, next_conf)) |
| |
| |
| if (current_state in ["play_active", "play_action"] and |
| next_state in ["non_play", "unknown"] and |
| current_conf > self.play_analyzer.confidence_threshold): |
| boundaries.append(("play_end", i, current_conf)) |
| |
| return boundaries |
|
|
|
|
| |
| |
| |
|
|
| |
| _video_classifier = None |
| _play_analyzer = None |
| _boundary_detector = None |
|
|
| def get_video_classifier() -> VideoClassifier: |
| """Get global video classifier instance (lazy initialization).""" |
| global _video_classifier |
| if _video_classifier is None: |
| _video_classifier = VideoClassifier() |
| return _video_classifier |
|
|
| def get_play_analyzer() -> PlayAnalyzer: |
| """Get global play analyzer instance (lazy initialization).""" |
| global _play_analyzer |
| if _play_analyzer is None: |
| _play_analyzer = PlayAnalyzer() |
| return _play_analyzer |
|
|
| def get_boundary_detector() -> BoundaryDetector: |
| """Get global boundary detector instance (lazy initialization).""" |
| global _boundary_detector |
| if _boundary_detector is None: |
| _boundary_detector = BoundaryDetector() |
| return _boundary_detector |
|
|
| def predict_clip(path: str) -> List[Tuple[str, float]]: |
| """ |
| Backward compatibility function for video classification. |
| |
| Args: |
| path: Path to video file |
| |
| Returns: |
| List of (label, confidence) tuples |
| """ |
| classifier = get_video_classifier() |
| results = classifier.classify_clip(path) |
| |
| |
| if results: |
| analyzer = get_play_analyzer() |
| play_state, confidence = analyzer.analyze_play_state(results) |
| print(f"[PLAY STATE] {play_state} (confidence: {confidence:.3f})") |
| |
| return results |
|
|
| def analyze_play_state(predictions: List[Tuple[str, float]]) -> Tuple[str, float]: |
| """ |
| Backward compatibility function for play state analysis. |
| |
| Args: |
| predictions: Classification results |
| |
| Returns: |
| Tuple of (play_state, confidence) |
| """ |
| analyzer = get_play_analyzer() |
| return analyzer.analyze_play_state(predictions) |
|
|
| def detect_play_boundaries(clip_results: List[List[Tuple[str, float]]]) -> List[Tuple[str, int, float]]: |
| """ |
| Backward compatibility function for boundary detection. |
| |
| Args: |
| clip_results: List of classification results for each clip |
| |
| Returns: |
| List of boundary detections |
| """ |
| detector = get_boundary_detector() |
| return detector.detect_boundaries(clip_results) |