File size: 15,162 Bytes
c4ee290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
"""
ShortSmith v2 - Clip Extractor Module

Final clip extraction and output generation.
Handles cutting clips at precise timestamps with various output options.
"""

from pathlib import Path
from typing import List, Optional, Tuple
from dataclasses import dataclass, field
import shutil

from utils.logger import get_logger, LogTimer
from utils.helpers import (
    VideoProcessingError,
    ensure_dir,
    format_timestamp,
    get_unique_filename,
)
from config import get_config, ProcessingConfig
from core.video_processor import VideoProcessor, VideoMetadata

logger = get_logger("core.clip_extractor")


@dataclass
class ExtractedClip:
    """Represents an extracted video clip."""
    clip_path: Path              # Path to the clip file
    start_time: float            # Start timestamp in source video
    end_time: float              # End timestamp in source video
    hype_score: float            # Normalized hype score (0-1)
    rank: int                    # Rank among all clips (1 = best)
    thumbnail_path: Optional[Path] = None  # Path to thumbnail

    # Metadata
    source_video: Optional[Path] = None
    person_detected: bool = False
    person_screen_time: float = 0.0  # Percentage of clip with target person

    # Additional scores
    visual_score: float = 0.0
    audio_score: float = 0.0
    motion_score: float = 0.0

    @property
    def duration(self) -> float:
        """Clip duration in seconds."""
        return self.end_time - self.start_time

    @property
    def time_range(self) -> str:
        """Human-readable time range."""
        return f"{format_timestamp(self.start_time)} - {format_timestamp(self.end_time)}"

    def to_dict(self) -> dict:
        """Convert to dictionary for JSON serialization."""
        return {
            "clip_path": str(self.clip_path),
            "start_time": self.start_time,
            "end_time": self.end_time,
            "duration": self.duration,
            "hype_score": round(self.hype_score, 4),
            "rank": self.rank,
            "time_range": self.time_range,
            "visual_score": round(self.visual_score, 4),
            "audio_score": round(self.audio_score, 4),
            "motion_score": round(self.motion_score, 4),
            "person_detected": self.person_detected,
            "person_screen_time": round(self.person_screen_time, 4),
        }


@dataclass
class ClipCandidate:
    """A candidate segment for clip extraction."""
    start_time: float
    end_time: float
    hype_score: float
    visual_score: float = 0.0
    audio_score: float = 0.0
    motion_score: float = 0.0
    person_score: float = 0.0  # Target person visibility

    @property
    def duration(self) -> float:
        return self.end_time - self.start_time


class ClipExtractor:
    """
    Extracts final clips from video based on hype scores.

    Handles:
    - Selecting top segments based on scores
    - Enforcing diversity (minimum gap between clips)
    - Adjusting clip boundaries to scene cuts
    - Generating thumbnails
    """

    def __init__(
        self,
        video_processor: VideoProcessor,
        config: Optional[ProcessingConfig] = None,
    ):
        """
        Initialize clip extractor.

        Args:
            video_processor: VideoProcessor instance for clip cutting
            config: Processing configuration (uses default if None)
        """
        self.video_processor = video_processor
        self.config = config or get_config().processing

        logger.info(
            f"ClipExtractor initialized (duration={self.config.min_clip_duration}-"
            f"{self.config.max_clip_duration}s, gap={self.config.min_gap_between_clips}s)"
        )

    def select_clips(
        self,
        candidates: List[ClipCandidate],
        num_clips: int,
        enforce_diversity: bool = True,
    ) -> List[ClipCandidate]:
        """
        Select top clips from candidates.

        Args:
            candidates: List of clip candidates with scores
            num_clips: Number of clips to select
            enforce_diversity: Enforce minimum gap between clips

        Returns:
            List of selected ClipCandidate objects
        """
        if not candidates:
            logger.warning("No candidates provided for selection")
            return []

        # Sort by hype score
        sorted_candidates = sorted(
            candidates, key=lambda c: c.hype_score, reverse=True
        )

        if not enforce_diversity:
            return sorted_candidates[:num_clips]

        # Select with diversity constraint
        selected = []
        min_gap = self.config.min_gap_between_clips

        for candidate in sorted_candidates:
            if len(selected) >= num_clips:
                break

            # Check if this candidate is far enough from existing selections
            is_diverse = True
            for existing in selected:
                # Calculate gap between clip starts
                gap = abs(candidate.start_time - existing.start_time)
                if gap < min_gap:
                    is_diverse = False
                    break

            if is_diverse:
                selected.append(candidate)

        # If we couldn't get enough with diversity, relax constraint
        if len(selected) < num_clips:
            logger.warning(
                f"Only {len(selected)} diverse clips found, "
                f"relaxing diversity constraint"
            )
            for candidate in sorted_candidates:
                if candidate not in selected:
                    selected.append(candidate)
                if len(selected) >= num_clips:
                    break

        logger.info(f"Selected {len(selected)} clips from {len(candidates)} candidates")
        return selected

    def adjust_to_scene_boundaries(
        self,
        candidates: List[ClipCandidate],
        scene_boundaries: List[float],
        tolerance: float = 1.0,
    ) -> List[ClipCandidate]:
        """
        Adjust clip boundaries to align with scene cuts.

        Args:
            candidates: List of clip candidates
            scene_boundaries: List of scene boundary timestamps
            tolerance: Maximum adjustment in seconds

        Returns:
            List of adjusted ClipCandidate objects
        """
        if not scene_boundaries:
            return candidates

        adjusted = []

        for candidate in candidates:
            new_start = candidate.start_time
            new_end = candidate.end_time

            # Find nearest scene boundary for start
            for boundary in scene_boundaries:
                if abs(boundary - candidate.start_time) < tolerance:
                    new_start = boundary
                    break

            # Find nearest scene boundary for end
            for boundary in scene_boundaries:
                if abs(boundary - candidate.end_time) < tolerance:
                    new_end = boundary
                    break

            # Ensure minimum duration
            if new_end - new_start < self.config.min_clip_duration:
                # Keep original boundaries
                new_start = candidate.start_time
                new_end = candidate.end_time

            adjusted.append(ClipCandidate(
                start_time=new_start,
                end_time=new_end,
                hype_score=candidate.hype_score,
                visual_score=candidate.visual_score,
                audio_score=candidate.audio_score,
                motion_score=candidate.motion_score,
                person_score=candidate.person_score,
            ))

        return adjusted

    def extract_clips(
        self,
        video_path: str | Path,
        output_dir: str | Path,
        candidates: List[ClipCandidate],
        num_clips: Optional[int] = None,
        generate_thumbnails: bool = True,
        reencode: bool = False,
    ) -> List[ExtractedClip]:
        """
        Extract clips from video.

        Args:
            video_path: Path to source video
            output_dir: Directory for output clips
            candidates: List of clip candidates
            num_clips: Number of clips to extract (None = use config default)
            generate_thumbnails: Whether to generate thumbnails
            reencode: Whether to re-encode clips (slower but precise)

        Returns:
            List of ExtractedClip objects
        """
        video_path = Path(video_path)
        output_dir = ensure_dir(output_dir)
        num_clips = num_clips or self.config.default_num_clips

        with LogTimer(logger, f"Extracting {num_clips} clips"):
            # Select top clips
            selected = self.select_clips(candidates, num_clips)

            if not selected:
                logger.warning("No clips to extract")
                return []

            # Extract each clip
            clips = []

            for rank, candidate in enumerate(selected, 1):
                try:
                    clip = self._extract_single_clip(
                        video_path=video_path,
                        output_dir=output_dir,
                        candidate=candidate,
                        rank=rank,
                        generate_thumbnail=generate_thumbnails,
                        reencode=reencode,
                    )
                    clips.append(clip)

                except Exception as e:
                    logger.error(f"Failed to extract clip {rank}: {e}")

            logger.info(f"Successfully extracted {len(clips)} clips")
            return clips

    def _extract_single_clip(
        self,
        video_path: Path,
        output_dir: Path,
        candidate: ClipCandidate,
        rank: int,
        generate_thumbnail: bool,
        reencode: bool,
    ) -> ExtractedClip:
        """Extract a single clip."""
        # Generate output filename
        clip_filename = f"clip_{rank:02d}_{format_timestamp(candidate.start_time).replace(':', '-')}.mp4"
        clip_path = output_dir / clip_filename

        # Cut the clip
        self.video_processor.cut_clip(
            video_path=video_path,
            output_path=clip_path,
            start_time=candidate.start_time,
            end_time=candidate.end_time,
            reencode=reencode,
        )

        # Generate thumbnail
        thumbnail_path = None
        if generate_thumbnail:
            try:
                thumb_filename = f"thumb_{rank:02d}.jpg"
                thumbnail_path = output_dir / "thumbnails" / thumb_filename
                thumbnail_path.parent.mkdir(exist_ok=True)

                # Thumbnail at 1/3 into the clip
                thumb_time = candidate.start_time + (candidate.duration / 3)
                self.video_processor.generate_thumbnail(
                    video_path=video_path,
                    output_path=thumbnail_path,
                    timestamp=thumb_time,
                )
            except Exception as e:
                logger.warning(f"Failed to generate thumbnail for clip {rank}: {e}")
                thumbnail_path = None

        return ExtractedClip(
            clip_path=clip_path,
            start_time=candidate.start_time,
            end_time=candidate.end_time,
            hype_score=candidate.hype_score,
            rank=rank,
            thumbnail_path=thumbnail_path,
            source_video=video_path,
            visual_score=candidate.visual_score,
            audio_score=candidate.audio_score,
            motion_score=candidate.motion_score,
            person_detected=candidate.person_score > 0,
            person_screen_time=candidate.person_score,
        )

    def create_fallback_clips(
        self,
        video_path: str | Path,
        output_dir: str | Path,
        duration: float,
        num_clips: int,
    ) -> List[ExtractedClip]:
        """
        Create uniformly distributed clips when no highlights are detected.

        Args:
            video_path: Path to source video
            output_dir: Directory for output clips
            duration: Video duration in seconds
            num_clips: Number of clips to create

        Returns:
            List of fallback ExtractedClip objects
        """
        logger.warning("Creating fallback clips (no highlights detected)")

        clip_duration = self.config.default_clip_duration
        total_clip_time = clip_duration * num_clips

        if total_clip_time >= duration:
            # Video too short, adjust
            clip_duration = max(
                self.config.min_clip_duration,
                duration / (num_clips + 1)
            )

        # Calculate evenly spaced start times
        gap = (duration - clip_duration * num_clips) / (num_clips + 1)
        candidates = []

        for i in range(num_clips):
            start = gap + i * (clip_duration + gap)
            end = start + clip_duration

            candidates.append(ClipCandidate(
                start_time=start,
                end_time=min(end, duration),
                hype_score=0.5,  # Neutral score
            ))

        return self.extract_clips(
            video_path=video_path,
            output_dir=output_dir,
            candidates=candidates,
            num_clips=num_clips,
        )

    def merge_adjacent_candidates(
        self,
        candidates: List[ClipCandidate],
        max_gap: float = 2.0,
        max_duration: Optional[float] = None,
    ) -> List[ClipCandidate]:
        """
        Merge adjacent high-scoring candidates into longer clips.

        Args:
            candidates: List of clip candidates
            max_gap: Maximum gap between candidates to merge
            max_duration: Maximum merged clip duration

        Returns:
            List of merged ClipCandidate objects
        """
        max_duration = max_duration or self.config.max_clip_duration

        if not candidates:
            return []

        # Sort by start time
        sorted_candidates = sorted(candidates, key=lambda c: c.start_time)
        merged = []
        current = sorted_candidates[0]

        for candidate in sorted_candidates[1:]:
            gap = candidate.start_time - current.end_time
            potential_duration = candidate.end_time - current.start_time

            if gap <= max_gap and potential_duration <= max_duration:
                # Merge
                current = ClipCandidate(
                    start_time=current.start_time,
                    end_time=candidate.end_time,
                    hype_score=max(current.hype_score, candidate.hype_score),
                    visual_score=max(current.visual_score, candidate.visual_score),
                    audio_score=max(current.audio_score, candidate.audio_score),
                    motion_score=max(current.motion_score, candidate.motion_score),
                    person_score=max(current.person_score, candidate.person_score),
                )
            else:
                merged.append(current)
                current = candidate

        merged.append(current)
        return merged


# Export public interface
__all__ = ["ClipExtractor", "ExtractedClip", "ClipCandidate"]