FangSen9000 Claude commited on
Commit ·
321f47a
1
Parent(s): a9be817
Add attention keyframe extraction with heatmap visualization
Browse files- 新增 eval/extract_attention_keyframes.py:提取peak feature对应的关键帧并叠加注意力热力图
* 从SLTUNET的注意力权重中找到每个gloss的peak feature
* 提取对应的视频帧
* 使用热力图(橙红=高注意力,蓝色=低注意力)可视化注意力强度
* 生成带索引的关键帧文件夹
- 更新 inference.sh:在推理流程中自动生成关键帧
* 在生成详细分析后自动调用关键帧提取脚本
* 更新输出信息,告知用户关键帧位置
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
- SignX/eval/extract_attention_keyframes.py +213 -0
- SignX/inference.sh +18 -0
SignX/eval/extract_attention_keyframes.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
提取peak feature对应的关键帧,并将注意力可视化叠加到帧上
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
import cv2
|
| 9 |
+
import numpy as np
|
| 10 |
+
import json
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
import matplotlib.pyplot as plt
|
| 13 |
+
from matplotlib import cm
|
| 14 |
+
|
| 15 |
+
def apply_attention_heatmap(frame, attention_weight, alpha=0.5):
|
| 16 |
+
"""
|
| 17 |
+
将注意力热力图叠加到视频帧上
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
frame: 原始帧 (H, W, 3)
|
| 21 |
+
attention_weight: 注意力权重 (0-1之间的标量值)
|
| 22 |
+
alpha: 热力图透明度
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
带有注意力热力图的帧
|
| 26 |
+
"""
|
| 27 |
+
h, w = frame.shape[:2]
|
| 28 |
+
|
| 29 |
+
# 创建一个简单的中心高斯热力图(假设注意力集中在中心区域)
|
| 30 |
+
# 更好的方法是使用真实的空间注意力权重,但这需要模型输出空间维度的注意力
|
| 31 |
+
|
| 32 |
+
# 创建热力图 - 使用注意力权重调整强度
|
| 33 |
+
y, x = np.ogrid[:h, :w]
|
| 34 |
+
center_y, center_x = h // 2, w // 2
|
| 35 |
+
|
| 36 |
+
# 高斯分布,注意力权重越高,热力图越集中
|
| 37 |
+
sigma = min(h, w) / 3 * (1.5 - attention_weight) # 权重高时sigma更小,更集中
|
| 38 |
+
gaussian = np.exp(-((x - center_x)**2 + (y - center_y)**2) / (2 * sigma**2))
|
| 39 |
+
|
| 40 |
+
# 归一化到 [0, 1]
|
| 41 |
+
gaussian = (gaussian - gaussian.min()) / (gaussian.max() - gaussian.min() + 1e-8)
|
| 42 |
+
|
| 43 |
+
# 应用注意力权重
|
| 44 |
+
heatmap = gaussian * attention_weight
|
| 45 |
+
|
| 46 |
+
# 使用colormap: 蓝色(低) -> 绿色 -> 黄色 -> 橙色 -> 红色(高)
|
| 47 |
+
# 使用 'jet' 或 'hot' colormap
|
| 48 |
+
colormap = cm.get_cmap('jet') # 或使用 'hot'
|
| 49 |
+
heatmap_colored = colormap(heatmap)[:, :, :3] * 255 # 转为RGB
|
| 50 |
+
heatmap_colored = heatmap_colored.astype(np.uint8)
|
| 51 |
+
|
| 52 |
+
# 叠加到原始帧
|
| 53 |
+
result = cv2.addWeighted(frame, 1-alpha, heatmap_colored, alpha, 0)
|
| 54 |
+
|
| 55 |
+
return result
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def extract_keyframes_with_attention(sample_dir, video_path):
|
| 59 |
+
"""
|
| 60 |
+
提取peak feature对应的关键帧,并叠加注意力可视化
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
sample_dir: sample目录路径 (e.g., detailed_xxx/sample_0)
|
| 64 |
+
video_path: 原始视频路径
|
| 65 |
+
"""
|
| 66 |
+
sample_dir = Path(sample_dir)
|
| 67 |
+
|
| 68 |
+
print(f"\n处理样本: {sample_dir.name}")
|
| 69 |
+
|
| 70 |
+
# 检查必要文件
|
| 71 |
+
mapping_file = sample_dir / "feature_frame_mapping.json"
|
| 72 |
+
weights_file = sample_dir / "attention_weights.npy"
|
| 73 |
+
|
| 74 |
+
if not mapping_file.exists():
|
| 75 |
+
print(f" ⚠ 未找到映射文件: {mapping_file}")
|
| 76 |
+
return
|
| 77 |
+
|
| 78 |
+
if not weights_file.exists():
|
| 79 |
+
print(f" ⚠ 未找到注意力权重: {weights_file}")
|
| 80 |
+
return
|
| 81 |
+
|
| 82 |
+
if not os.path.exists(video_path):
|
| 83 |
+
print(f" ⚠ 视频文件不存在: {video_path}")
|
| 84 |
+
return
|
| 85 |
+
|
| 86 |
+
# 加载映射和注意力权重
|
| 87 |
+
with open(mapping_file, 'r') as f:
|
| 88 |
+
mapping_data = json.load(f)
|
| 89 |
+
|
| 90 |
+
attention_weights = np.load(weights_file)
|
| 91 |
+
|
| 92 |
+
# 创建关键帧输出目录
|
| 93 |
+
keyframes_dir = sample_dir / "attention_keyframes"
|
| 94 |
+
keyframes_dir.mkdir(exist_ok=True)
|
| 95 |
+
|
| 96 |
+
print(f" 特征数量: {mapping_data['feature_count']}")
|
| 97 |
+
print(f" 原始帧数: {mapping_data['original_frame_count']}")
|
| 98 |
+
print(f" 注意力权重形状: {attention_weights.shape}")
|
| 99 |
+
|
| 100 |
+
# 打开视频
|
| 101 |
+
cap = cv2.VideoCapture(video_path)
|
| 102 |
+
if not cap.isOpened():
|
| 103 |
+
print(f" ✗ 无法打开视频: {video_path}")
|
| 104 |
+
return
|
| 105 |
+
|
| 106 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 107 |
+
print(f" 视频总帧数: {total_frames}")
|
| 108 |
+
|
| 109 |
+
# 构建特征索引到帧的映射(使用中间帧)
|
| 110 |
+
feature_to_frame = {}
|
| 111 |
+
for item in mapping_data['mapping']:
|
| 112 |
+
feature_idx = item['feature_index']
|
| 113 |
+
frame_start = item['frame_start']
|
| 114 |
+
frame_end = item['frame_end']
|
| 115 |
+
# 使用中间帧
|
| 116 |
+
mid_frame = (frame_start + frame_end) // 2
|
| 117 |
+
feature_to_frame[feature_idx] = mid_frame
|
| 118 |
+
|
| 119 |
+
num_glosses = attention_weights.shape[0] if len(attention_weights.shape) > 1 else 0
|
| 120 |
+
|
| 121 |
+
if num_glosses == 0:
|
| 122 |
+
print(f" ⚠ 注意力权重维度不正确")
|
| 123 |
+
cap.release()
|
| 124 |
+
return
|
| 125 |
+
|
| 126 |
+
saved_count = 0
|
| 127 |
+
|
| 128 |
+
for gloss_idx in range(num_glosses):
|
| 129 |
+
# 获取该gloss的注意力权重 (对所有特征的注意力)
|
| 130 |
+
gloss_attention = attention_weights[gloss_idx] # shape: (num_features,)
|
| 131 |
+
|
| 132 |
+
# 找到peak特征 (注意力最高的特征)
|
| 133 |
+
peak_feature_idx = np.argmax(gloss_attention)
|
| 134 |
+
peak_attention = gloss_attention[peak_feature_idx]
|
| 135 |
+
|
| 136 |
+
# 获取对应的帧索引
|
| 137 |
+
if peak_feature_idx not in feature_to_frame:
|
| 138 |
+
print(f" ⚠ Gloss {gloss_idx}: 特征 {peak_feature_idx} 没有对应的帧")
|
| 139 |
+
continue
|
| 140 |
+
|
| 141 |
+
frame_idx = feature_to_frame[peak_feature_idx]
|
| 142 |
+
|
| 143 |
+
# 读取该帧
|
| 144 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
|
| 145 |
+
ret, frame = cap.read()
|
| 146 |
+
|
| 147 |
+
if not ret:
|
| 148 |
+
print(f" ⚠ Gloss {gloss_idx}: 无法读取帧 {frame_idx}")
|
| 149 |
+
continue
|
| 150 |
+
|
| 151 |
+
# 应用注意力热力图
|
| 152 |
+
frame_with_attention = apply_attention_heatmap(frame, peak_attention, alpha=0.4)
|
| 153 |
+
|
| 154 |
+
# 添加文本信息
|
| 155 |
+
text = f"Gloss {gloss_idx} | Feature {peak_feature_idx} | Frame {frame_idx}"
|
| 156 |
+
attention_text = f"Attention: {peak_attention:.3f}"
|
| 157 |
+
|
| 158 |
+
# 在图像顶部添加黑色背景条
|
| 159 |
+
cv2.rectangle(frame_with_attention, (0, 0), (frame.shape[1], 60), (0, 0, 0), -1)
|
| 160 |
+
cv2.putText(frame_with_attention, text, (10, 25),
|
| 161 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
|
| 162 |
+
cv2.putText(frame_with_attention, attention_text, (10, 50),
|
| 163 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
|
| 164 |
+
|
| 165 |
+
# 保存关键帧
|
| 166 |
+
output_filename = f"keyframe_{gloss_idx:03d}_feat{peak_feature_idx}_frame{frame_idx}_att{peak_attention:.3f}.jpg"
|
| 167 |
+
output_path = keyframes_dir / output_filename
|
| 168 |
+
|
| 169 |
+
cv2.imwrite(str(output_path), frame_with_attention)
|
| 170 |
+
saved_count += 1
|
| 171 |
+
|
| 172 |
+
cap.release()
|
| 173 |
+
|
| 174 |
+
print(f" ✓ 已保存 {saved_count} 个关键帧到: {keyframes_dir}")
|
| 175 |
+
|
| 176 |
+
# 创建索引文件
|
| 177 |
+
index_file = keyframes_dir / "keyframes_index.txt"
|
| 178 |
+
with open(index_file, 'w') as f:
|
| 179 |
+
f.write(f"关键帧索引\n")
|
| 180 |
+
f.write(f"=" * 60 + "\n\n")
|
| 181 |
+
f.write(f"样本目录: {sample_dir}\n")
|
| 182 |
+
f.write(f"视频路径: {video_path}\n")
|
| 183 |
+
f.write(f"总关键帧数: {saved_count}\n\n")
|
| 184 |
+
f.write(f"关键帧列表:\n")
|
| 185 |
+
f.write(f"-" * 60 + "\n")
|
| 186 |
+
|
| 187 |
+
for gloss_idx in range(num_glosses):
|
| 188 |
+
gloss_attention = attention_weights[gloss_idx]
|
| 189 |
+
peak_feature_idx = np.argmax(gloss_attention)
|
| 190 |
+
peak_attention = gloss_attention[peak_feature_idx]
|
| 191 |
+
|
| 192 |
+
if peak_feature_idx in feature_to_frame:
|
| 193 |
+
frame_idx = feature_to_frame[peak_feature_idx]
|
| 194 |
+
filename = f"keyframe_{gloss_idx:03d}_feat{peak_feature_idx}_frame{frame_idx}_att{peak_attention:.3f}.jpg"
|
| 195 |
+
f.write(f"Gloss {gloss_idx:3d}: {filename}\n")
|
| 196 |
+
|
| 197 |
+
print(f" ✓ 索引文件已创建: {index_file}")
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def main():
|
| 201 |
+
if len(sys.argv) < 3:
|
| 202 |
+
print("用法: python extract_attention_keyframes.py <sample_dir> <video_path>")
|
| 203 |
+
print("示例: python extract_attention_keyframes.py detailed_xxx/sample_0 video.mp4")
|
| 204 |
+
sys.exit(1)
|
| 205 |
+
|
| 206 |
+
sample_dir = sys.argv[1]
|
| 207 |
+
video_path = sys.argv[2]
|
| 208 |
+
|
| 209 |
+
extract_keyframes_with_attention(sample_dir, video_path)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
if __name__ == "__main__":
|
| 213 |
+
main()
|
SignX/inference.sh
CHANGED
|
@@ -327,6 +327,21 @@ if [ -f "$TEMP_DIR/prediction.txt" ]; then
|
|
| 327 |
echo " ⓘ generate_interactive_alignment.py 未找到,跳过交互式HTML生成"
|
| 328 |
fi
|
| 329 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 330 |
# 切换回 slt_tf1 环境
|
| 331 |
conda activate slt_tf1
|
| 332 |
done
|
|
@@ -350,6 +365,9 @@ if [ -f "$TEMP_DIR/prediction.txt" ]; then
|
|
| 350 |
echo " - Gloss-视频帧对应图 (gloss_to_frames.png)"
|
| 351 |
echo " - 分析报告 (analysis_report.txt)"
|
| 352 |
echo " - 原始数据 (attention_weights.npy)"
|
|
|
|
|
|
|
|
|
|
| 353 |
fi
|
| 354 |
|
| 355 |
echo ""
|
|
|
|
| 327 |
echo " ⓘ generate_interactive_alignment.py 未找到,跳过交互式HTML生成"
|
| 328 |
fi
|
| 329 |
|
| 330 |
+
# 步骤4:提取关键帧并叠加注意力可视化
|
| 331 |
+
echo ""
|
| 332 |
+
echo -e "${BLUE}提取关键帧并叠加注意力可视化...${NC}"
|
| 333 |
+
if [ -f "$SCRIPT_DIR/eval/extract_attention_keyframes.py" ]; then
|
| 334 |
+
# 处理所有样本
|
| 335 |
+
for sample_dir in "$dest_path"/sample_*; do
|
| 336 |
+
if [ -d "$sample_dir" ]; then
|
| 337 |
+
echo " 处理样本: $(basename "$sample_dir")"
|
| 338 |
+
python "$SCRIPT_DIR/eval/extract_attention_keyframes.py" "$sample_dir" "$VIDEO_PATH"
|
| 339 |
+
fi
|
| 340 |
+
done
|
| 341 |
+
else
|
| 342 |
+
echo " ⓘ extract_attention_keyframes.py 未找到,跳过关键帧提取"
|
| 343 |
+
fi
|
| 344 |
+
|
| 345 |
# 切换回 slt_tf1 环境
|
| 346 |
conda activate slt_tf1
|
| 347 |
done
|
|
|
|
| 365 |
echo " - Gloss-视频帧对应图 (gloss_to_frames.png)"
|
| 366 |
echo " - 分析报告 (analysis_report.txt)"
|
| 367 |
echo " - 原始数据 (attention_weights.npy)"
|
| 368 |
+
echo " - 关键帧可视化 (attention_keyframes/ 文件夹)"
|
| 369 |
+
echo " * 包含每个gloss的peak feature帧"
|
| 370 |
+
echo " * 注意力热力图叠加(橙红=高注意力,蓝色=低注意力)"
|
| 371 |
fi
|
| 372 |
|
| 373 |
echo ""
|