File size: 8,460 Bytes
7162aa8 cfee709 7162aa8 cfee709 7162aa8 cfee709 7162aa8 cfee709 7162aa8 cfee709 7162aa8 cfee709 7162aa8 eaf4dff 7162aa8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
#!/usr/bin/env python3
"""
后处理脚本:从已有的详细分析结果生成 gloss-to-frames 可视化
使用方法:
python generate_gloss_frames.py <detailed_prediction_dir> <video_path>
例如:
python generate_gloss_frames.py detailed_prediction_20251225_170455 ./eval/tiny_test_data/videos/666.mp4
"""
import sys
import json
import numpy as np
import cv2
from pathlib import Path
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.font_manager as fm
# 设置中文字体支持
plt.rcParams['font.sans-serif'] = ['WenQuanYi Micro Hei', 'DejaVu Sans'] # Linux中文字体
plt.rcParams['axes.unicode_minus'] = False # 解决负号显示问题
def extract_video_frames(video_path, frame_indices):
"""从视频中提取指定索引的帧"""
cap = cv2.VideoCapture(video_path)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frames = {}
for idx in frame_indices:
if idx >= total_frames:
idx = total_frames - 1
cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
ret, frame = cap.read()
if ret:
# BGR to RGB
frames[idx] = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cap.release()
return frames, total_frames
def generate_gloss_to_frames_visualization(sample_dir, video_path, output_path):
"""生成 gloss-to-frames 可视化"""
sample_dir = Path(sample_dir)
# 1. 读取对齐数据
with open(sample_dir / "frame_alignment.json", 'r') as f:
alignment_data = json.load(f)
# 2. 读取翻译结果
with open(sample_dir / "translation.txt", 'r') as f:
lines = f.readlines()
gloss_sequence = None
for line in lines:
if line.startswith('Clean:'):
gloss_sequence = line.replace('Clean:', '').strip()
break
if not gloss_sequence:
print("无法找到翻译结果")
return
glosses = gloss_sequence.split()
print(f"Gloss序列: {glosses}")
# 3. 获取视频信息
cap = cv2.VideoCapture(str(video_path))
total_video_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(cv2.CAP_PROP_FPS)
cap.release()
print(f"视频总帧数: {total_video_frames}, FPS: {fps}")
# 4. 从对齐数据中提取每个gloss的特征帧范围
gloss_frames_info = []
# 获取特征帧总数(从 attention weights 的 shape 推断)
attention_weights = np.load(sample_dir / "attention_weights.npy")
total_feature_frames = attention_weights.shape[1] # shape: [time, src_len, beam]
# 计算映射到原始视频帧
# 原始帧索引 = 特征帧索引 * (总视频帧数 / 总特征帧数)
scale_factor = total_video_frames / total_feature_frames
for gloss_data in alignment_data['frame_ranges']:
gloss = gloss_data['word']
start_feat_frame = gloss_data['start_frame']
peak_feat_frame = gloss_data['peak_frame']
end_feat_frame = gloss_data['end_frame']
# 映射到原始视频帧
start_video_frame = int(start_feat_frame * scale_factor)
peak_video_frame = int(peak_feat_frame * scale_factor)
end_video_frame = int(end_feat_frame * scale_factor)
# 计算相对时间 (%)
relative_time_start = (start_feat_frame / total_feature_frames) * 100
relative_time_end = (end_feat_frame / total_feature_frames) * 100
gloss_frames_info.append({
'gloss': gloss,
'feature_frames': (start_feat_frame, peak_feat_frame, end_feat_frame),
'video_frames': (start_video_frame, peak_video_frame, end_video_frame),
'relative_time': (relative_time_start, relative_time_end),
'total_feature_frames': total_feature_frames,
'confidence': gloss_data.get('confidence', 'unknown'),
'avg_attention': gloss_data.get('avg_attention', 0.0)
})
# 5. 提取所需的视频帧
all_frame_indices = set()
for info in gloss_frames_info:
all_frame_indices.update(info['video_frames'])
print(f"提取 {len(all_frame_indices)} 个视频帧...")
video_frames, _ = extract_video_frames(str(video_path), sorted(all_frame_indices))
# 6. 生成可视化
num_glosses = len(gloss_frames_info)
fig = plt.figure(figsize=(16, num_glosses * 2.5))
for i, info in enumerate(gloss_frames_info):
gloss = info['gloss']
feat_start, feat_peak, feat_end = info['feature_frames']
vid_start, vid_peak, vid_end = info['video_frames']
rel_start, rel_end = info['relative_time']
total_feat = info['total_feature_frames']
# 创建3列布局:Gloss | 时间信息 | 帧图像
# 列1:Gloss文本
ax_text = plt.subplot(num_glosses, 3, i*3 + 1)
ax_text.text(0.5, 0.5, gloss,
fontsize=20, fontweight='bold',
ha='center', va='center')
ax_text.axis('off')
# 列2:时间和帧信息
ax_info = plt.subplot(num_glosses, 3, i*3 + 2)
confidence = info.get('confidence', 'unknown')
avg_attn = info.get('avg_attention', 0.0)
# 置信度颜色
conf_colors = {'high': 'green', 'medium': 'orange', 'low': 'red', 'unknown': 'gray'}
conf_color = conf_colors.get(confidence, 'gray')
info_text = f"""Feature idx: {feat_start} -> {feat_peak} -> {feat_end}
Rel. time: {rel_start:.1f}% -> {rel_end:.1f}%
Video frame: {vid_start} -> {vid_peak} -> {vid_end}
Total features: {total_feat}
Total frames: {total_video_frames}
Confidence: {confidence.upper()}
Attention: {avg_attn:.3f}"""
ax_info.text(0.05, 0.5, info_text,
fontsize=9, family='monospace',
ha='left', va='center')
# 添加置信度颜色条
ax_info.add_patch(mpatches.Rectangle((0.85, 0.2), 0.1, 0.6,
facecolor=conf_color, alpha=0.3))
ax_info.axis('off')
# 列3:视频帧(Start | Peak | End)横向拼接
ax_frames = plt.subplot(num_glosses, 3, i*3 + 3)
# 获取三个关键帧
frames_to_show = []
labels = []
for idx, label in [(vid_start, 'Start'), (vid_peak, 'Peak'), (vid_end, 'End')]:
if idx in video_frames:
frames_to_show.append(video_frames[idx])
labels.append(f"{label}\n(#{idx})")
if frames_to_show:
# 调整帧大小
frame_height = 120
resized_frames = []
for frame in frames_to_show:
h, w = frame.shape[:2]
new_w = int(w * frame_height / h)
resized = cv2.resize(frame, (new_w, frame_height))
resized_frames.append(resized)
# 横向拼接
combined = np.hstack(resized_frames)
ax_frames.imshow(combined)
# 添加标签
x_pos = 0
for j, (frame, label) in enumerate(zip(resized_frames, labels)):
w = frame.shape[1]
ax_frames.text(x_pos + w//2, -10, label,
ha='center', va='bottom',
fontsize=9, fontweight='bold')
x_pos += w
ax_frames.axis('off')
plt.tight_layout()
plt.savefig(output_path, dpi=150, bbox_inches='tight')
print(f"✓ 已生成可视化: {output_path}")
plt.close()
if __name__ == "__main__":
if len(sys.argv) != 3:
print("使用方法: python generate_gloss_frames.py <detailed_prediction_dir> <video_path>")
print("例如: python generate_gloss_frames.py detailed_prediction_20251225_170455 ./eval/tiny_test_data/videos/666.mp4")
sys.exit(1)
detailed_dir = Path(sys.argv[1])
video_path = sys.argv[2]
if not detailed_dir.exists():
print(f"错误: 目录不存在: {detailed_dir}")
sys.exit(1)
if not Path(video_path).exists():
print(f"错误: 视频文件不存在: {video_path}")
sys.exit(1)
# 处理所有样本
sample_dirs = sorted([d for d in detailed_dir.iterdir() if d.is_dir()])
for sample_dir in sample_dirs:
print(f"\n处理 {sample_dir.name}...")
output_path = sample_dir / "gloss_to_frames.png"
generate_gloss_to_frames_visualization(sample_dir, video_path, output_path)
print(f"\n✓ 完成!共处理 {len(sample_dirs)} 个样本")
|