File size: 4,537 Bytes
741864d
 
9f9e779
741864d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f9e779
741864d
 
 
 
 
9f9e779
741864d
 
 
 
a9be817
 
 
 
 
 
9f9e779
a9be817
741864d
9f9e779
741864d
 
 
 
 
 
9f9e779
741864d
 
 
 
 
 
9f9e779
741864d
 
9f9e779
 
 
741864d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f9e779
 
 
 
741864d
 
9f9e779
741864d
 
9f9e779
 
741864d
 
 
9f9e779
 
741864d
 
 
 
 
9f9e779
 
741864d
 
 
 
 
 
 
 
9f9e779
741864d
 
 
9f9e779
741864d
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
#!/usr/bin/env python
"""
Generate a feature-to-frame mapping file for SignX inference outputs.

Usage:
    python generate_feature_mapping.py <sample_dir> <video_path>

Example:
    python generate_feature_mapping.py detailed_prediction_20251226_155113/sample_000 \\
        eval/tiny_test_data/videos/632051.mp4
"""

import sys
import os
import json
import numpy as np
from pathlib import Path

def generate_feature_mapping(sample_dir, video_path):
    """Create the feature-to-frame mapping JSON for a given sample directory."""
    sample_dir = Path(sample_dir)

    # Check if attention_weights.npy exists
    attn_file = sample_dir / "attention_weights.npy"
    if not attn_file.exists():
        print(f"Error: missing attention_weights.npy: {attn_file}")
        return False

    # Load attention weights to get feature count
    attn_weights = np.load(attn_file)
    # Handle both 2D (time, features) and 3D (time, beam, features) shapes
    if attn_weights.ndim == 2:
        feature_count = attn_weights.shape[1]  # Shape: (time, features) - inference mode
    elif attn_weights.ndim == 3:
        feature_count = attn_weights.shape[2]  # Shape: (time, beam, features) - beam search
    else:
        print(f"Error: unexpected attention_weights shape: {attn_weights.shape}")
        return False

    print(f"Feature count: {feature_count}")

    # Get original frame count from video
    try:
        import cv2
        cap = cv2.VideoCapture(str(video_path))
        if not cap.isOpened():
            print(f"Error: failed to open video file: {video_path}")
            return False

        original_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        fps = cap.get(cv2.CAP_PROP_FPS)
        cap.release()

        print(f"Original frames: {original_frame_count}, FPS: {fps}")

    except ImportError:
        print("Warning: OpenCV not available, falling back to estimates")
        # Assume 30 fps and approximate the frame count from features
        original_frame_count = feature_count * 3  # default 3x downsampling
        fps = 30.0

    # Calculate uniform mapping: feature i -> frames [start, end]
    frame_mapping = []
    for feat_idx in range(feature_count):
        start_frame = int(feat_idx * original_frame_count / feature_count)
        end_frame = int((feat_idx + 1) * original_frame_count / feature_count)
        frame_mapping.append({
            "feature_index": feat_idx,
            "frame_start": start_frame,
            "frame_end": end_frame,
            "frame_count": end_frame - start_frame
        })

    # Save mapping
    mapping_data = {
        "original_frame_count": original_frame_count,
        "feature_count": feature_count,
        "downsampling_ratio": original_frame_count / feature_count,
        "fps": fps,
        "mapping": frame_mapping
    }

    output_file = sample_dir / "feature_frame_mapping.json"
    with open(output_file, 'w') as f:
        json.dump(mapping_data, f, indent=2)

    print(f"\n✓ Mapping file written: {output_file}")
    print(f"  Original frames: {original_frame_count}")
    print(f"  Feature count: {feature_count}")
    print(f"  Downsampling ratio: {mapping_data['downsampling_ratio']:.2f}x")

    # Print sample mappings
    print("\nSample mappings:")
    for i in range(min(3, len(frame_mapping))):
        mapping = frame_mapping[i]
        print(f"  Feature {mapping['feature_index']}: frames {mapping['frame_start']}-{mapping['frame_end']} "
              f"({mapping['frame_count']} frames)")
    if len(frame_mapping) > 3:
        print("  ...")
        mapping = frame_mapping[-1]
        print(f"  Feature {mapping['feature_index']}: frames {mapping['frame_start']}-{mapping['frame_end']} "
              f"({mapping['frame_count']} frames)")

    return True

if __name__ == "__main__":
    if len(sys.argv) != 3:
        print("Usage: python generate_feature_mapping.py <sample_dir> <video_path>")
        print("\nExample:")
        print("  python generate_feature_mapping.py detailed_prediction_20251226_155113/sample_000 \\")
        print("      eval/tiny_test_data/videos/632051.mp4")
        sys.exit(1)

    sample_dir = sys.argv[1]
    video_path = sys.argv[2]

    if not os.path.exists(sample_dir):
        print(f"Error: sample directory not found: {sample_dir}")
        sys.exit(1)

    if not os.path.exists(video_path):
        print(f"Error: video file not found: {video_path}")
        sys.exit(1)

    success = generate_feature_mapping(sample_dir, video_path)
    sys.exit(0 if success else 1)