File size: 6,606 Bytes
321f47a
 
9f9e779
321f47a
 
 
 
 
 
 
 
 
 
 
 
 
9f9e779
321f47a
 
9f9e779
 
 
321f47a
 
9f9e779
321f47a
 
 
9f9e779
321f47a
 
 
9f9e779
 
321f47a
 
9f9e779
321f47a
 
9f9e779
321f47a
 
9f9e779
 
321f47a
 
 
 
 
 
 
 
 
9f9e779
321f47a
 
9f9e779
 
321f47a
 
 
9f9e779
321f47a
 
 
 
 
 
9f9e779
321f47a
 
 
9f9e779
321f47a
 
 
9f9e779
321f47a
 
 
 
 
 
 
 
9f9e779
321f47a
 
 
9f9e779
 
 
321f47a
 
 
 
9f9e779
321f47a
 
 
9f9e779
321f47a
 
 
 
 
 
 
 
 
 
 
 
 
9f9e779
321f47a
 
 
 
 
 
 
 
 
 
 
 
9f9e779
321f47a
 
 
 
 
 
 
 
9f9e779
321f47a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f9e779
321f47a
9f9e779
321f47a
 
9f9e779
321f47a
9f9e779
 
 
 
321f47a
 
 
 
 
 
 
 
 
 
 
 
9f9e779
321f47a
 
 
 
9f9e779
 
321f47a
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
#!/usr/bin/env python3
"""
Extract peak-feature keyframes and overlay attention heatmaps on the video frames.
"""

import os
import sys
import cv2
import numpy as np
import json
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib import cm

def apply_attention_heatmap(frame, attention_weight, alpha=0.5):
    """
    Overlay a synthetic attention heatmap on top of a video frame.

    Args:
        frame: Original frame (H, W, 3)
        attention_weight: Scalar attention weight in [0, 1]
        alpha: Heatmap opacity

    Returns:
        Frame with the attention heatmap blended in.
    """
    h, w = frame.shape[:2]

    # Create a simple center-weighted Gaussian heatmap
    y, x = np.ogrid[:h, :w]
    center_y, center_x = h // 2, w // 2

    # High attention weight = tighter Gaussian
    sigma = min(h, w) / 3 * (1.5 - attention_weight)
    gaussian = np.exp(-((x - center_x)**2 + (y - center_y)**2) / (2 * sigma**2))

    # Normalize to [0, 1]
    gaussian = (gaussian - gaussian.min()) / (gaussian.max() - gaussian.min() + 1e-8)

    # Apply the attention weight
    heatmap = gaussian * attention_weight

    colormap = cm.get_cmap('jet')
    heatmap_colored = colormap(heatmap)[:, :, :3] * 255
    heatmap_colored = heatmap_colored.astype(np.uint8)

    result = cv2.addWeighted(frame, 1-alpha, heatmap_colored, alpha, 0)

    return result


def extract_keyframes_with_attention(sample_dir, video_path):
    """
    Extract peak-feature keyframes and overlay the attention visualization.

    Args:
        sample_dir: Sample directory path (e.g., detailed_xxx/sample_0)
        video_path: Original video path
    """
    sample_dir = Path(sample_dir)

    print(f"\nProcessing sample: {sample_dir.name}")

    # 检查必要文件
    mapping_file = sample_dir / "feature_frame_mapping.json"
    weights_file = sample_dir / "attention_weights.npy"

    if not mapping_file.exists():
        print(f"  ⚠ Mapping file not found: {mapping_file}")
        return

    if not weights_file.exists():
        print(f"  ⚠ Attention weights missing: {weights_file}")
        return

    if not os.path.exists(video_path):
        print(f"  ⚠ Video file not found: {video_path}")
        return

    # 加载映射和注意力权重
    with open(mapping_file, 'r') as f:
        mapping_data = json.load(f)

    attention_weights = np.load(weights_file)

    # Create output directory
    keyframes_dir = sample_dir / "attention_keyframes"
    keyframes_dir.mkdir(exist_ok=True)

    print(f"  Feature count: {mapping_data['feature_count']}")
    print(f"  Original frame count: {mapping_data['original_frame_count']}")
    print(f"  Attention weight shape: {attention_weights.shape}")

    # 打开视频
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        print(f"  ✗ Failed to open video: {video_path}")
        return

    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    print(f"  Total video frames: {total_frames}")

    # 构建特征索引到帧的映射(使用中间帧)
    feature_to_frame = {}
    for item in mapping_data['mapping']:
        feature_idx = item['feature_index']
        frame_start = item['frame_start']
        frame_end = item['frame_end']
        mid_frame = (frame_start + frame_end) // 2
        feature_to_frame[feature_idx] = mid_frame

    num_glosses = attention_weights.shape[0] if len(attention_weights.shape) > 1 else 0

    if num_glosses == 0:
        print("  ⚠ Invalid attention weight dimensions")
        cap.release()
        return

    saved_count = 0

    for gloss_idx in range(num_glosses):
        gloss_attention = attention_weights[gloss_idx]  # shape: (num_features,)

        peak_feature_idx = np.argmax(gloss_attention)
        peak_attention = gloss_attention[peak_feature_idx]

        if peak_feature_idx not in feature_to_frame:
            print(f"  ⚠ Gloss {gloss_idx}: feature {peak_feature_idx} missing frame mapping")
            continue

        frame_idx = feature_to_frame[peak_feature_idx]

        cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
        ret, frame = cap.read()

        if not ret:
            print(f"  ⚠ Gloss {gloss_idx}: unable to read frame {frame_idx}")
            continue

        frame_with_attention = apply_attention_heatmap(frame, peak_attention, alpha=0.4)

        text = f"Gloss {gloss_idx} | Feature {peak_feature_idx} | Frame {frame_idx}"
        attention_text = f"Attention: {peak_attention:.3f}"

        cv2.rectangle(frame_with_attention, (0, 0), (frame.shape[1], 60), (0, 0, 0), -1)
        cv2.putText(frame_with_attention, text, (10, 25),
                   cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
        cv2.putText(frame_with_attention, attention_text, (10, 50),
                   cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)

        output_filename = f"keyframe_{gloss_idx:03d}_feat{peak_feature_idx}_frame{frame_idx}_att{peak_attention:.3f}.jpg"
        output_path = keyframes_dir / output_filename

        cv2.imwrite(str(output_path), frame_with_attention)
        saved_count += 1

    cap.release()

    print(f"  ✓ Saved {saved_count} keyframes to: {keyframes_dir}")

    # Create index file
    index_file = keyframes_dir / "keyframes_index.txt"
    with open(index_file, 'w') as f:
        f.write("Attention Keyframe Index\n")
        f.write(f"=" * 60 + "\n\n")
        f.write(f"Sample directory: {sample_dir}\n")
        f.write(f"Video path: {video_path}\n")
        f.write(f"Total keyframes: {saved_count}\n\n")
        f.write("Keyframe list:\n")
        f.write(f"-" * 60 + "\n")

        for gloss_idx in range(num_glosses):
            gloss_attention = attention_weights[gloss_idx]
            peak_feature_idx = np.argmax(gloss_attention)
            peak_attention = gloss_attention[peak_feature_idx]

            if peak_feature_idx in feature_to_frame:
                frame_idx = feature_to_frame[peak_feature_idx]
                filename = f"keyframe_{gloss_idx:03d}_feat{peak_feature_idx}_frame{frame_idx}_att{peak_attention:.3f}.jpg"
                f.write(f"Gloss {gloss_idx:3d}: {filename}\n")

    print(f"  ✓ Index file written: {index_file}")


def main():
    if len(sys.argv) < 3:
        print("Usage: python extract_attention_keyframes.py <sample_dir> <video_path>")
        print("Example: python extract_attention_keyframes.py detailed_xxx/sample_0 video.mp4")
        sys.exit(1)

    sample_dir = sys.argv[1]
    video_path = sys.argv[2]

    extract_keyframes_with_attention(sample_dir, video_path)


if __name__ == "__main__":
    main()