#!/usr/bin/env python3 import os import json import tempfile import torch import warnings from pathlib import Path from transformers import AutoProcessor, AutoModelForImageTextToText import subprocess import logging import argparse import re from typing import List, Tuple, Dict # Suppress warnings os.environ["TOKENIZERS_PARALLELISM"] = "false" warnings.filterwarnings("ignore", category=UserWarning) warnings.filterwarnings("ignore", message=".*torchvision.*") warnings.filterwarnings("ignore", message=".*torchcodec.*") logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def get_video_duration_seconds(video_path: str) -> float: """Use ffprobe to get video duration in seconds.""" cmd = [ "ffprobe", "-v", "quiet", "-print_format", "json", "-show_format", video_path ] result = subprocess.run(cmd, capture_output=True, text=True) info = json.loads(result.stdout) return float(info["format"]["duration"]) class VideoHighlightDetector: def __init__( self, model_path: str, device: str = None, batch_size: int = 8 ): # Auto-detect device if not specified if device is None: if torch.cuda.is_available(): device = "cuda" elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): device = "mps" else: device = "cpu" else: requested = device.lower() if requested == "cuda" and not torch.cuda.is_available(): logger.warning("CUDA requested but not available. Falling back to CPU.") device = "cpu" elif requested == "mps" and not (hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()): logger.warning("MPS requested but not available. Falling back to CPU.") device = "cpu" else: device = requested self.device = device self.batch_size = batch_size self.dtype = self._get_torch_dtype() # Initialize model and processor self.processor = AutoProcessor.from_pretrained(model_path) try: self.model = AutoModelForImageTextToText.from_pretrained( model_path, dtype=self.dtype, # _attn_implementation="flash_attention_2" ).to(device) except TypeError: # Backward compatibility for older Transformers versions. self.model = AutoModelForImageTextToText.from_pretrained( model_path, torch_dtype=self.dtype, # _attn_implementation="flash_attention_2" ).to(device) # Store model path for reference self.model_path = model_path def _get_torch_dtype(self): if self.device == "cuda": return torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16 if self.device == "mps": return torch.float16 return torch.float32 def _extract_assistant_text(self, decoded: str) -> str: """Extract assistant text robustly from decoded chat output.""" if "Assistant: " in decoded: return decoded.split("Assistant: ", 1)[1].strip() if "assistant: " in decoded.lower(): parts = decoded.split("assistant: ", 1) if len(parts) > 1: return parts[1].strip() return decoded.strip() def _sentence_count(self, text: str) -> int: """Approximate sentence count using end punctuation.""" sentences = [s.strip() for s in re.split(r"[.!?]+", text) if s.strip()] return len(sentences) def _normalize_sentences(self, text: str, min_sentences: int, max_sentences: int) -> str: """Normalize free-form model output into plain sentence text.""" cleaned = text.replace("\n", " ").replace("**", "") cleaned = re.sub(r"\s+", " ", cleaned).strip() parts = [p.strip() for p in re.split(r"(?<=[.!?])\s+", cleaned) if p.strip()] sentences = [] for part in parts: s = re.sub(r"^\d+\.\s*", "", part) s = re.sub(r"^[-*]\s*", "", s) if len(s.split()) >= 3: sentences.append(s) if not sentences: return cleaned if len(sentences) >= min_sentences: return " ".join(sentences[:max_sentences]).strip() return " ".join(sentences).strip() def _rewrite_to_sentence_range(self, draft: str, min_sentences: int, max_sentences: int) -> str: """Rewrite a draft to fit a strict sentence range.""" messages = [ { "role": "system", "content": [{"type": "text", "text": "Rewrite text faithfully without adding new facts."}] }, { "role": "user", "content": [{ "type": "text", "text": ( f"Rewrite this into {min_sentences}-{max_sentences} complete sentences. " "Keep only visible, factual details and avoid assumptions.\n\n" f"Text:\n{draft}" ) }] } ] inputs = self.processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(self.device) outputs = self.model.generate(**inputs, max_new_tokens=256, do_sample=False) rewritten = self._extract_assistant_text(self.processor.decode(outputs[0], skip_special_tokens=True)) return self._normalize_sentences(rewritten, min_sentences, max_sentences) def _compose_video_description(self, draft: str) -> str: """Compose final video description with strict analyst instructions.""" messages = [ { "role": "system", "content": [{"type": "text", "text": "You are a professional video analyst."}] }, { "role": "user", "content": [{ "type": "text", "text": ( "You are reporting live from the scene.\n\n" "In 4–5 complete sentences, describe exactly what is happening.\n" "Capture the key visible moments as they unfold.\n\n" "Include:\n" "- Who is present and what they are doing\n" "- The environment or location\n" "- Visible reactions, gestures, or interactions\n" "- Any significant action or change during the clip\n\n" "Keep the tone natural, clear, and factual.\n" "Do not speculate.\n" "Do not mention the recording device.\n\n" "Use only these observed details:\n" f"{draft}" ) }] } ] inputs = self.processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(self.device) outputs = self.model.generate(**inputs, max_new_tokens=320, do_sample=False) composed = self._extract_assistant_text(self.processor.decode(outputs[0], skip_special_tokens=True)) return self._normalize_sentences(composed, 4, 5) def _describe_video_clip(self, clip_path: str) -> str: """Generate one grounded sentence for a short clip.""" messages = [ { "role": "system", "content": [{"type": "text", "text": "Describe only visible actions and scene details. Do not guess."}] }, { "role": "user", "content": [ {"type": "video", "path": clip_path}, {"type": "text", "text": "Write exactly 3-4 factual sentences about what is visually happening."} ] } ] inputs = self.processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(self.device) outputs = self.model.generate(**inputs, max_new_tokens=80, do_sample=False) text = self._extract_assistant_text(self.processor.decode(outputs[0], skip_special_tokens=True)) return self._normalize_sentences(text, 1, 1) def analyze_video_content(self, video_path: str) -> str: """Analyze video content to determine its type and description.""" duration = get_video_duration_seconds(video_path) if duration <= 0: return "Unable to analyze the video content." clip_len = min(2.5, max(1.5, duration / 12)) anchors = [0.1, 0.35, 0.6, 0.85] captions: List[str] = [] seen = set() for idx, ratio in enumerate(anchors): start = max(0.0, min(duration - clip_len, duration * ratio)) with tempfile.NamedTemporaryFile(suffix=f"_desc_{idx}.mp4", delete=False) as tmp_clip: clip_path = tmp_clip.name try: cmd = [ "ffmpeg", "-y", "-v", "quiet", "-ss", str(start), "-t", str(clip_len), "-i", video_path, "-an", "-c:v", "libx264", "-preset", "ultrafast", clip_path ] subprocess.run(cmd, check=True, capture_output=True) sentence = self._describe_video_clip(clip_path) key = sentence.lower().strip() if key and key not in seen: seen.add(key) captions.append(sentence) except Exception: continue finally: if os.path.exists(clip_path): os.unlink(clip_path) if not captions: return "Unable to analyze the video content." composed = " ".join(captions[:5]) composed = self._normalize_sentences(composed, 4, 5) count = self._sentence_count(composed) if 4 <= count <= 5: return composed final_desc = self._compose_video_description(composed) if 4 <= self._sentence_count(final_desc) <= 5: return final_desc return self._rewrite_to_sentence_range(final_desc, 4, 5) def determine_highlights(self, video_description: str, prompt_num: int = 1) -> str: """Determine what constitutes highlights based on video description with different prompts.""" system_prompts = { 1: "You are a highlight editor. Return exactly 4-5 sentences describing the most important highlight moments to look for. Be specific, realistic, and avoid fabricated names or places.", 2: "You are a visual-language assistant for creating highlight reels. Return exactly 4-5 sentences describing rare, important highlight-worthy moments. Be concrete and avoid invented details." } user_prompts = { 1: "Based on this video description, describe the top highlight moments to look for in exactly 4-5 sentences.", 2: "Based on this video description, describe dramatic moments that would make compelling highlights in exactly 4-5 sentences." } messages = [ { "role": "system", "content": [{"type": "text", "text": system_prompts[prompt_num]}] }, { "role": "user", "content": [{"type": "text", "text": f"""Here is a description of a video:\n\n{video_description}\n\n{user_prompts[prompt_num]}"""}] } ] print(f"Using prompt {prompt_num} for highlight detection") inputs = self.processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(self.device) outputs = self.model.generate(**inputs, max_new_tokens=256, do_sample=False) response = self.processor.decode(outputs[0], skip_special_tokens=True) # Extract the actual response with better formatting if "Assistant: " in response: clean_response = response.split("Assistant: ")[1] elif "assistant: " in response.lower(): clean_response = response.lower().split("assistant: ")[1] else: # If no assistant tag found, try to extract meaningful content parts = response.split("User:") if len(parts) > 1: clean_response = parts[-1].strip() else: clean_response = response clean_response = self._normalize_sentences(clean_response.strip(), 4, 5) if 4 <= self._sentence_count(clean_response) <= 5: return clean_response return self._rewrite_to_sentence_range(clean_response, 4, 5) def process_segment(self, video_path: str, highlight_types: str) -> bool: """Process a video segment and determine if it contains highlights.""" messages = [ { "role": "system", "content": [{"type": "text", "text": "You are a STRICT video highlight analyzer. You must be very selective and only identify truly exceptional moments. Most segments should be rejected. Only select segments with high dramatic value, clear action, strong visual interest, or significant events. Be critical and selective."}] }, { "role": "user", "content": [ {"type": "video", "path": video_path}, {"type": "text", "text": f"""Looking for these highlights:\n{highlight_types}\n\nDoes this video segment match ANY of these highlights?\n\nAnswer with ONE WORD ONLY:\nYES or NO\n\nNothing else. Just YES or NO."""}] } ] try: inputs = self.processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(self.device) outputs = self.model.generate( **inputs, max_new_tokens=8, # Force very short responses do_sample=False # Use greedy decoding for consistency ) response = self.processor.decode(outputs[0], skip_special_tokens=True) # Extract assistant response if "Assistant:" in response: response = response.split("Assistant:")[-1].strip() elif "assistant:" in response: response = response.split("assistant:")[-1].strip() response = response.lower() print(f" 🤖 AI Response: {response}") # Simple yes/no detection - AI returns simple answers response_clean = response.strip().replace("'", "").replace("-", "").replace(".", "").strip() if response_clean.startswith("no"): return False elif response_clean.startswith("yes"): return True else: # Default to no if unclear return False except Exception as e: print(f" ❌ Error processing segment: {str(e)}") return False def score_segment(self, video_path: str, highlight_types: str) -> int: """Fallback scorer (1-10) used when strict YES/NO rejects all segments.""" messages = [ { "role": "system", "content": [{"type": "text", "text": "You are a video highlight scorer. Score relevance conservatively."}] }, { "role": "user", "content": [ {"type": "video", "path": video_path}, {"type": "text", "text": ( f"Highlight criteria:\n{highlight_types}\n\n" "Rate how much this segment matches the criteria on a scale of 1 to 10. " "Return one number only." )} ] } ] try: inputs = self.processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(self.device) outputs = self.model.generate(**inputs, max_new_tokens=8, do_sample=False) response = self.processor.decode(outputs[0], skip_special_tokens=True) response = self._extract_assistant_text(response) match = re.search(r"\b([1-9]|10)\b", response) if match: return int(match.group(1)) return 1 except Exception: return 1 def _concatenate_scenes( self, video_path: str, scene_times: list, output_path: str, with_effects: bool = True ): """Concatenate selected scenes into final video with optional effects.""" if not scene_times: logger.warning("No scenes to concatenate, skipping.") return if with_effects: self._concatenate_with_effects(video_path, scene_times, output_path) else: self._concatenate_basic(video_path, scene_times, output_path) def _video_has_audio(self, video_path: str) -> bool: """Return True when the input contains at least one audio stream.""" cmd = [ "ffprobe", "-v", "error", "-select_streams", "a", "-show_entries", "stream=index", "-of", "csv=p=0", video_path, ] try: result = subprocess.run(cmd, capture_output=True, text=True, check=True) return bool(result.stdout.strip()) except Exception: return False def _concatenate_basic(self, video_path: str, scene_times: list, output_path: str): """Basic concatenation without effects.""" has_audio = self._video_has_audio(video_path) filter_complex_parts = [] concat_inputs = [] for i, (start_sec, end_sec) in enumerate(scene_times): filter_complex_parts.append( f"[0:v]trim=start={start_sec}:end={end_sec}," f"setpts=PTS-STARTPTS[v{i}];" ) if has_audio: filter_complex_parts.append( f"[0:a]atrim=start={start_sec}:end={end_sec}," f"asetpts=PTS-STARTPTS[a{i}];" ) concat_inputs.append(f"[v{i}][a{i}]") else: concat_inputs.append(f"[v{i}]") concat_filter = ( f"{''.join(concat_inputs)}concat=n={len(scene_times)}:v=1:a=1[outv][outa]" if has_audio else f"{''.join(concat_inputs)}concat=n={len(scene_times)}:v=1:a=0[outv]" ) filter_complex = "".join(filter_complex_parts) + concat_filter cmd = ["ffmpeg", "-y", "-i", video_path, "-filter_complex", filter_complex, "-map", "[outv]"] if has_audio: cmd += ["-map", "[outa]", "-c:v", "libx264", "-c:a", "aac", output_path] else: cmd += ["-an", "-c:v", "libx264", output_path] logger.info(f"Running ffmpeg command: {' '.join(cmd)}") subprocess.run(cmd, check=True, capture_output=True, text=True) def _concatenate_with_effects(self, video_path: str, scene_times: list, output_path: str): """Concatenate with fade effects between segments.""" has_audio = self._video_has_audio(video_path) if len(scene_times) == 1: # Single segment - just extract with fade in/out start_sec, end_sec = scene_times[0] duration = end_sec - start_sec fade_duration = min(0.5, duration / 4) # 0.5s or 25% of duration, whichever is shorter if has_audio: cmd = [ "ffmpeg", "-y", "-i", video_path, "-ss", str(start_sec), "-t", str(duration), "-vf", f"fade=in:0:{int(fade_duration*30)},fade=out:{int((duration-fade_duration)*30)}:{int(fade_duration*30)}", "-af", f"afade=in:st=0:d={fade_duration},afade=out:st={duration-fade_duration}:d={fade_duration}", "-c:v", "libx264", "-c:a", "aac", output_path ] else: cmd = [ "ffmpeg", "-y", "-i", video_path, "-ss", str(start_sec), "-t", str(duration), "-vf", f"fade=in:0:{int(fade_duration*30)},fade=out:{int((duration-fade_duration)*30)}:{int(fade_duration*30)}", "-an", "-c:v", "libx264", output_path ] else: # Multiple segments - create with crossfade transitions filter_parts = [] audio_parts = [] for i, (start_sec, end_sec) in enumerate(scene_times): duration = end_sec - start_sec fade_duration = min(0.3, duration / 6) # Shorter fades for multiple segments # Video with fade filter_parts.append( f"[0:v]trim=start={start_sec}:end={end_sec},setpts=PTS-STARTPTS," f"fade=in:0:{int(fade_duration*30)},fade=out:{int((duration-fade_duration)*30)}:{int(fade_duration*30)}[v{i}]" ) if has_audio: # Audio with fade audio_parts.append( f"[0:a]atrim=start={start_sec}:end={end_sec},asetpts=PTS-STARTPTS," f"afade=in:st=0:d={fade_duration},afade=out:st={duration-fade_duration}:d={fade_duration}[a{i}]" ) # Concatenate all segments video_concat = "".join([f"[v{i}]" for i in range(len(scene_times))]) if has_audio: audio_concat = "".join([f"[a{i}]" for i in range(len(scene_times))]) filter_complex = ( ";".join(filter_parts) + ";" + ";".join(audio_parts) + ";" + f"{video_concat}concat=n={len(scene_times)}:v=1:a=0[outv];" + f"{audio_concat}concat=n={len(scene_times)}:v=0:a=1[outa]" ) cmd = [ "ffmpeg", "-y", "-i", video_path, "-filter_complex", filter_complex, "-map", "[outv]", "-map", "[outa]", "-c:v", "libx264", "-c:a", "aac", output_path ] else: filter_complex = ( ";".join(filter_parts) + ";" + f"{video_concat}concat=n={len(scene_times)}:v=1:a=0[outv]" ) cmd = [ "ffmpeg", "-y", "-i", video_path, "-filter_complex", filter_complex, "-map", "[outv]", "-an", "-c:v", "libx264", output_path ] logger.info(f"Running ffmpeg command with effects: {' '.join(cmd)}") result = subprocess.run(cmd, capture_output=True, text=True) if result.returncode != 0: logger.error(f"FFmpeg error: {result.stderr}") # Fall back to basic concatenation logger.info("Falling back to basic concatenation...") self._concatenate_basic(video_path, scene_times, output_path) def process_video(self, video_path: str, output_path: str, segment_length: float = 10.0, with_effects: bool = True) -> Dict: """Process video using exact HuggingFace approach.""" print("🚀 Starting HuggingFace Exact Video Highlight Detection") print(f"📁 Input: {video_path}") print(f"📁 Output: {output_path}") print(f"⏱️ Segment Length: {segment_length}s") print(f"🎨 With Effects: {with_effects}") print() # Get video duration duration = get_video_duration_seconds(video_path) if duration <= 0: return {"error": "Could not determine video duration"} print(f"📹 Video duration: {duration:.1f}s ({duration/60:.1f} minutes)") # Check if video is too short for meaningful highlights if duration < segment_length * 2: return { "error": f"Video too short ({duration:.1f}s). Need at least {segment_length * 2:.1f}s for meaningful highlights.", "video_description": "Video too short for analysis", "total_segments": 0, "selected_segments": 0 } # Step 1: Analyze overall video content print("🎬 Step 1: Analyzing overall video content...") video_desc = self.analyze_video_content(video_path) print(f"📝 Video Description: {video_desc}") print() # Step 2: Get two different sets of highlights print("🎯 Step 2: Determining highlight types (2 variations)...") highlights1 = self.determine_highlights(video_desc, prompt_num=1) highlights2 = self.determine_highlights(video_desc, prompt_num=2) print(f"🎯 Highlight Set 1: {highlights1}") print() print(f"🎯 Highlight Set 2: {highlights2}") print() # Step 3: Split video into segments temp_dir = os.path.join("/tmp", "temp_segments") os.makedirs(temp_dir, mode=0o755, exist_ok=True) kept_segments1 = [] kept_segments2 = [] scored_segments1 = [] scored_segments2 = [] segments_processed = 0 total_segments = int(duration / segment_length) print(f"🔍 Step 3: Processing {total_segments} segments of {segment_length}s each...") for start_time in range(0, int(duration), int(segment_length)): progress = int((segments_processed / total_segments) * 100) if total_segments > 0 else 0 end_time = min(start_time + segment_length, duration) print(f"📊 Processing segment {segments_processed+1}/{total_segments} ({progress}%)") print(f" ⏰ Time: {start_time}s - {end_time:.1f}s") # Create segment segment_path = f"{temp_dir}/segment_{start_time}.mp4" cmd = [ "ffmpeg", "-y", "-v", "quiet", # Suppress FFmpeg output "-i", video_path, "-ss", str(start_time), "-t", str(segment_length), "-c:v", "libx264", "-preset", "ultrafast", # Use ultrafast preset for speed "-pix_fmt", "yuv420p", # Ensure compatible pixel format segment_path ] subprocess.run(cmd, check=True, capture_output=True) # Process segment with both highlight sets if self.process_segment(segment_path, highlights1): print(" ✅ KEEPING SEGMENT FOR SET 1") kept_segments1.append((start_time, end_time)) score1 = 10 else: print(" ❌ REJECTING SEGMENT FOR SET 1") score1 = self.score_segment(segment_path, highlights1) if self.process_segment(segment_path, highlights2): print(" ✅ KEEPING SEGMENT FOR SET 2") kept_segments2.append((start_time, end_time)) score2 = 10 else: print(" ❌ REJECTING SEGMENT FOR SET 2") score2 = self.score_segment(segment_path, highlights2) scored_segments1.append({"start": start_time, "end": end_time, "score": score1}) scored_segments2.append({"start": start_time, "end": end_time, "score": score2}) # Clean up segment file os.remove(segment_path) segments_processed += 1 print() # Remove temp directory os.rmdir(temp_dir) # Calculate percentages of video kept for each highlight set total_duration = duration duration1 = sum(end - start for start, end in kept_segments1) duration2 = sum(end - start for start, end in kept_segments2) percent1 = (duration1 / total_duration) * 100 percent2 = (duration2 / total_duration) * 100 print(f"📊 Results Summary:") print(f" 🎯 Highlight set 1: {percent1:.1f}% of video ({len(kept_segments1)} segments)") print(f" 🎯 Highlight set 2: {percent2:.1f}% of video ({len(kept_segments2)} segments)") # Choose the set with lower percentage unless it's zero final_segments = kept_segments2 if (0 < percent2 <= percent1 or percent1 == 0) else kept_segments1 selected_set = "2" if final_segments == kept_segments2 else "1" percent_used = percent2 if final_segments == kept_segments2 else percent1 print(f"🏆 Selected Set {selected_set} with {len(final_segments)} segments ({percent_used:.1f}% of video)") if not final_segments: print("⚠️ No strict YES segments found. Falling back to score-based selection.") avg1 = sum(s["score"] for s in scored_segments1) / len(scored_segments1) if scored_segments1 else 0 avg2 = sum(s["score"] for s in scored_segments2) / len(scored_segments2) if scored_segments2 else 0 if avg2 >= avg1: selected_set = "2" fallback_scores = sorted(scored_segments2, key=lambda x: x["score"], reverse=True) else: selected_set = "1" fallback_scores = sorted(scored_segments1, key=lambda x: x["score"], reverse=True) selected_by_threshold = [s for s in fallback_scores if s["score"] >= 6] if not selected_by_threshold: top_n = max(1, min(3, len(fallback_scores))) selected_by_threshold = fallback_scores[:top_n] final_segments = [(s["start"], s["end"]) for s in selected_by_threshold] selected_duration = sum(end - start for start, end in final_segments) percent_used = (selected_duration / total_duration) * 100 if total_duration > 0 else 0 print(f"🏆 Fallback selected Set {selected_set} with {len(final_segments)} segments ({percent_used:.1f}% of video)") # Step 4: Create final video print(f"🎬 Step 4: Creating final highlights video...") self._concatenate_scenes(video_path, final_segments, output_path, with_effects) print("✅ Highlights video created successfully!") print(f"🎉 SUCCESS! Created highlights with {len(final_segments)} segments") print(f" 📹 Total highlight duration: {sum(end - start for start, end in final_segments):.1f}s") print(f" 📊 Percentage of original video: {percent_used:.1f}%") # Return analysis results return { "success": True, "video_description": video_desc, "highlights1": highlights1, "highlights2": highlights2, "selected_set": selected_set, "total_segments": total_segments, "selected_segments": len(final_segments), "selected_times": final_segments, "total_duration": sum(end - start for start, end in final_segments), "compression_ratio": percent_used / 100, "output_path": output_path } def main(): parser = argparse.ArgumentParser(description='HuggingFace Exact Video Highlights') parser.add_argument('video_path', help='Path to input video file') parser.add_argument('--output', required=True, help='Path to output highlights video') parser.add_argument('--save-analysis', action='store_true', help='Save analysis results to JSON') parser.add_argument('--segment-length', type=float, default=10.0, help='Length of each segment in seconds (default: 10.0)') parser.add_argument('--model', default='HuggingFaceTB/SmolVLM2-256M-Video-Instruct', help='SmolVLM2 model to use') parser.add_argument('--device', default='auto', choices=['auto', 'cpu', 'cuda', 'mps'], help='Inference device') args = parser.parse_args() # Validate input file if not os.path.exists(args.video_path): print(f"❌ Error: Video file not found: {args.video_path}") return # Create output directory if needed output_dir = os.path.dirname(args.output) if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) print(f"🚀 HuggingFace Exact SmolVLM2 Video Highlights") print(f" Model: {args.model}") print(f" Requested Device: {args.device}") print() try: # Initialize detector print(f"🔥 Loading {args.model} for HuggingFace Exact Analysis...") device = None if args.device == "auto" else args.device detector = VideoHighlightDetector( model_path=args.model, device=device, batch_size=16 ) print(f"🖥️ Using device: {detector.device} (dtype: {detector.dtype})") print("✅ SmolVLM2 loaded successfully!") print() # Process video results = detector.process_video( video_path=args.video_path, output_path=args.output, segment_length=args.segment_length ) # Save analysis if requested if args.save_analysis: analysis_file = args.output.replace('.mp4', '_exact_analysis.json') with open(analysis_file, 'w') as f: json.dump(results, f, indent=2, default=str) print(f"📊 Analysis saved: {analysis_file}") except Exception as e: print(f"❌ Error: {str(e)}") import traceback traceback.print_exc() if __name__ == "__main__": main()