ASLLRP_utterances_results / SignX /eval /extract_attention_keyframes.py
FangSen9000
The reasoning has been converted into English.
9f9e779
#!/usr/bin/env python3
"""
Extract peak-feature keyframes and overlay attention heatmaps on the video frames.
"""
import os
import sys
import cv2
import numpy as np
import json
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib import cm
def apply_attention_heatmap(frame, attention_weight, alpha=0.5):
"""
Overlay a synthetic attention heatmap on top of a video frame.
Args:
frame: Original frame (H, W, 3)
attention_weight: Scalar attention weight in [0, 1]
alpha: Heatmap opacity
Returns:
Frame with the attention heatmap blended in.
"""
h, w = frame.shape[:2]
# Create a simple center-weighted Gaussian heatmap
y, x = np.ogrid[:h, :w]
center_y, center_x = h // 2, w // 2
# High attention weight = tighter Gaussian
sigma = min(h, w) / 3 * (1.5 - attention_weight)
gaussian = np.exp(-((x - center_x)**2 + (y - center_y)**2) / (2 * sigma**2))
# Normalize to [0, 1]
gaussian = (gaussian - gaussian.min()) / (gaussian.max() - gaussian.min() + 1e-8)
# Apply the attention weight
heatmap = gaussian * attention_weight
colormap = cm.get_cmap('jet')
heatmap_colored = colormap(heatmap)[:, :, :3] * 255
heatmap_colored = heatmap_colored.astype(np.uint8)
result = cv2.addWeighted(frame, 1-alpha, heatmap_colored, alpha, 0)
return result
def extract_keyframes_with_attention(sample_dir, video_path):
"""
Extract peak-feature keyframes and overlay the attention visualization.
Args:
sample_dir: Sample directory path (e.g., detailed_xxx/sample_0)
video_path: Original video path
"""
sample_dir = Path(sample_dir)
print(f"\nProcessing sample: {sample_dir.name}")
# 检查必要文件
mapping_file = sample_dir / "feature_frame_mapping.json"
weights_file = sample_dir / "attention_weights.npy"
if not mapping_file.exists():
print(f" ⚠ Mapping file not found: {mapping_file}")
return
if not weights_file.exists():
print(f" ⚠ Attention weights missing: {weights_file}")
return
if not os.path.exists(video_path):
print(f" ⚠ Video file not found: {video_path}")
return
# 加载映射和注意力权重
with open(mapping_file, 'r') as f:
mapping_data = json.load(f)
attention_weights = np.load(weights_file)
# Create output directory
keyframes_dir = sample_dir / "attention_keyframes"
keyframes_dir.mkdir(exist_ok=True)
print(f" Feature count: {mapping_data['feature_count']}")
print(f" Original frame count: {mapping_data['original_frame_count']}")
print(f" Attention weight shape: {attention_weights.shape}")
# 打开视频
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
print(f" ✗ Failed to open video: {video_path}")
return
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
print(f" Total video frames: {total_frames}")
# 构建特征索引到帧的映射(使用中间帧)
feature_to_frame = {}
for item in mapping_data['mapping']:
feature_idx = item['feature_index']
frame_start = item['frame_start']
frame_end = item['frame_end']
mid_frame = (frame_start + frame_end) // 2
feature_to_frame[feature_idx] = mid_frame
num_glosses = attention_weights.shape[0] if len(attention_weights.shape) > 1 else 0
if num_glosses == 0:
print(" ⚠ Invalid attention weight dimensions")
cap.release()
return
saved_count = 0
for gloss_idx in range(num_glosses):
gloss_attention = attention_weights[gloss_idx] # shape: (num_features,)
peak_feature_idx = np.argmax(gloss_attention)
peak_attention = gloss_attention[peak_feature_idx]
if peak_feature_idx not in feature_to_frame:
print(f" ⚠ Gloss {gloss_idx}: feature {peak_feature_idx} missing frame mapping")
continue
frame_idx = feature_to_frame[peak_feature_idx]
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
ret, frame = cap.read()
if not ret:
print(f" ⚠ Gloss {gloss_idx}: unable to read frame {frame_idx}")
continue
frame_with_attention = apply_attention_heatmap(frame, peak_attention, alpha=0.4)
text = f"Gloss {gloss_idx} | Feature {peak_feature_idx} | Frame {frame_idx}"
attention_text = f"Attention: {peak_attention:.3f}"
cv2.rectangle(frame_with_attention, (0, 0), (frame.shape[1], 60), (0, 0, 0), -1)
cv2.putText(frame_with_attention, text, (10, 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
cv2.putText(frame_with_attention, attention_text, (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
output_filename = f"keyframe_{gloss_idx:03d}_feat{peak_feature_idx}_frame{frame_idx}_att{peak_attention:.3f}.jpg"
output_path = keyframes_dir / output_filename
cv2.imwrite(str(output_path), frame_with_attention)
saved_count += 1
cap.release()
print(f" ✓ Saved {saved_count} keyframes to: {keyframes_dir}")
# Create index file
index_file = keyframes_dir / "keyframes_index.txt"
with open(index_file, 'w') as f:
f.write("Attention Keyframe Index\n")
f.write(f"=" * 60 + "\n\n")
f.write(f"Sample directory: {sample_dir}\n")
f.write(f"Video path: {video_path}\n")
f.write(f"Total keyframes: {saved_count}\n\n")
f.write("Keyframe list:\n")
f.write(f"-" * 60 + "\n")
for gloss_idx in range(num_glosses):
gloss_attention = attention_weights[gloss_idx]
peak_feature_idx = np.argmax(gloss_attention)
peak_attention = gloss_attention[peak_feature_idx]
if peak_feature_idx in feature_to_frame:
frame_idx = feature_to_frame[peak_feature_idx]
filename = f"keyframe_{gloss_idx:03d}_feat{peak_feature_idx}_frame{frame_idx}_att{peak_attention:.3f}.jpg"
f.write(f"Gloss {gloss_idx:3d}: {filename}\n")
print(f" ✓ Index file written: {index_file}")
def main():
if len(sys.argv) < 3:
print("Usage: python extract_attention_keyframes.py <sample_dir> <video_path>")
print("Example: python extract_attention_keyframes.py detailed_xxx/sample_0 video.mp4")
sys.exit(1)
sample_dir = sys.argv[1]
video_path = sys.argv[2]
extract_keyframes_with_attention(sample_dir, video_path)
if __name__ == "__main__":
main()