|
|
|
|
|
""" |
|
|
后处理脚本:从已有的详细分析结果生成 gloss-to-frames 可视化 |
|
|
使用方法: |
|
|
python generate_gloss_frames.py <detailed_prediction_dir> <video_path> |
|
|
|
|
|
例如: |
|
|
python generate_gloss_frames.py detailed_prediction_20251225_170455 ./eval/tiny_test_data/videos/666.mp4 |
|
|
""" |
|
|
|
|
|
import sys |
|
|
import json |
|
|
import numpy as np |
|
|
import cv2 |
|
|
from pathlib import Path |
|
|
import matplotlib.pyplot as plt |
|
|
import matplotlib.patches as mpatches |
|
|
import matplotlib.font_manager as fm |
|
|
|
|
|
|
|
|
plt.rcParams['font.sans-serif'] = ['WenQuanYi Micro Hei', 'DejaVu Sans'] |
|
|
plt.rcParams['axes.unicode_minus'] = False |
|
|
|
|
|
def extract_video_frames(video_path, frame_indices): |
|
|
"""从视频中提取指定索引的帧""" |
|
|
cap = cv2.VideoCapture(video_path) |
|
|
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
|
|
|
|
frames = {} |
|
|
for idx in frame_indices: |
|
|
if idx >= total_frames: |
|
|
idx = total_frames - 1 |
|
|
cap.set(cv2.CAP_PROP_POS_FRAMES, idx) |
|
|
ret, frame = cap.read() |
|
|
if ret: |
|
|
|
|
|
frames[idx] = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
cap.release() |
|
|
return frames, total_frames |
|
|
|
|
|
def generate_gloss_to_frames_visualization(sample_dir, video_path, output_path): |
|
|
"""生成 gloss-to-frames 可视化""" |
|
|
|
|
|
sample_dir = Path(sample_dir) |
|
|
|
|
|
|
|
|
with open(sample_dir / "frame_alignment.json", 'r') as f: |
|
|
alignment_data = json.load(f) |
|
|
|
|
|
|
|
|
with open(sample_dir / "translation.txt", 'r') as f: |
|
|
lines = f.readlines() |
|
|
gloss_sequence = None |
|
|
for line in lines: |
|
|
if line.startswith('Clean:'): |
|
|
gloss_sequence = line.replace('Clean:', '').strip() |
|
|
break |
|
|
|
|
|
if not gloss_sequence: |
|
|
print("无法找到翻译结果") |
|
|
return |
|
|
|
|
|
glosses = gloss_sequence.split() |
|
|
print(f"Gloss序列: {glosses}") |
|
|
|
|
|
|
|
|
cap = cv2.VideoCapture(str(video_path)) |
|
|
total_video_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
|
fps = cap.get(cv2.CAP_PROP_FPS) |
|
|
cap.release() |
|
|
|
|
|
print(f"视频总帧数: {total_video_frames}, FPS: {fps}") |
|
|
|
|
|
|
|
|
gloss_frames_info = [] |
|
|
|
|
|
|
|
|
attention_weights = np.load(sample_dir / "attention_weights.npy") |
|
|
total_feature_frames = attention_weights.shape[1] |
|
|
|
|
|
|
|
|
|
|
|
scale_factor = total_video_frames / total_feature_frames |
|
|
|
|
|
for gloss_data in alignment_data['frame_ranges']: |
|
|
gloss = gloss_data['word'] |
|
|
start_feat_frame = gloss_data['start_frame'] |
|
|
peak_feat_frame = gloss_data['peak_frame'] |
|
|
end_feat_frame = gloss_data['end_frame'] |
|
|
|
|
|
|
|
|
start_video_frame = int(start_feat_frame * scale_factor) |
|
|
peak_video_frame = int(peak_feat_frame * scale_factor) |
|
|
end_video_frame = int(end_feat_frame * scale_factor) |
|
|
|
|
|
|
|
|
relative_time_start = (start_feat_frame / total_feature_frames) * 100 |
|
|
relative_time_end = (end_feat_frame / total_feature_frames) * 100 |
|
|
|
|
|
gloss_frames_info.append({ |
|
|
'gloss': gloss, |
|
|
'feature_frames': (start_feat_frame, peak_feat_frame, end_feat_frame), |
|
|
'video_frames': (start_video_frame, peak_video_frame, end_video_frame), |
|
|
'relative_time': (relative_time_start, relative_time_end), |
|
|
'total_feature_frames': total_feature_frames, |
|
|
'confidence': gloss_data.get('confidence', 'unknown'), |
|
|
'avg_attention': gloss_data.get('avg_attention', 0.0) |
|
|
}) |
|
|
|
|
|
|
|
|
all_frame_indices = set() |
|
|
for info in gloss_frames_info: |
|
|
all_frame_indices.update(info['video_frames']) |
|
|
|
|
|
print(f"提取 {len(all_frame_indices)} 个视频帧...") |
|
|
video_frames, _ = extract_video_frames(str(video_path), sorted(all_frame_indices)) |
|
|
|
|
|
|
|
|
num_glosses = len(gloss_frames_info) |
|
|
fig = plt.figure(figsize=(16, num_glosses * 2.5)) |
|
|
|
|
|
for i, info in enumerate(gloss_frames_info): |
|
|
gloss = info['gloss'] |
|
|
feat_start, feat_peak, feat_end = info['feature_frames'] |
|
|
vid_start, vid_peak, vid_end = info['video_frames'] |
|
|
rel_start, rel_end = info['relative_time'] |
|
|
total_feat = info['total_feature_frames'] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ax_text = plt.subplot(num_glosses, 3, i*3 + 1) |
|
|
ax_text.text(0.5, 0.5, gloss, |
|
|
fontsize=20, fontweight='bold', |
|
|
ha='center', va='center') |
|
|
ax_text.axis('off') |
|
|
|
|
|
|
|
|
ax_info = plt.subplot(num_glosses, 3, i*3 + 2) |
|
|
confidence = info.get('confidence', 'unknown') |
|
|
avg_attn = info.get('avg_attention', 0.0) |
|
|
|
|
|
|
|
|
conf_colors = {'high': 'green', 'medium': 'orange', 'low': 'red', 'unknown': 'gray'} |
|
|
conf_color = conf_colors.get(confidence, 'gray') |
|
|
|
|
|
info_text = f"""Feature idx: {feat_start} -> {feat_peak} -> {feat_end} |
|
|
Rel. time: {rel_start:.1f}% -> {rel_end:.1f}% |
|
|
Video frame: {vid_start} -> {vid_peak} -> {vid_end} |
|
|
|
|
|
Total features: {total_feat} |
|
|
Total frames: {total_video_frames} |
|
|
|
|
|
Confidence: {confidence.upper()} |
|
|
Attention: {avg_attn:.3f}""" |
|
|
|
|
|
ax_info.text(0.05, 0.5, info_text, |
|
|
fontsize=9, family='monospace', |
|
|
ha='left', va='center') |
|
|
|
|
|
ax_info.add_patch(mpatches.Rectangle((0.85, 0.2), 0.1, 0.6, |
|
|
facecolor=conf_color, alpha=0.3)) |
|
|
ax_info.axis('off') |
|
|
|
|
|
|
|
|
ax_frames = plt.subplot(num_glosses, 3, i*3 + 3) |
|
|
|
|
|
|
|
|
frames_to_show = [] |
|
|
labels = [] |
|
|
for idx, label in [(vid_start, 'Start'), (vid_peak, 'Peak'), (vid_end, 'End')]: |
|
|
if idx in video_frames: |
|
|
frames_to_show.append(video_frames[idx]) |
|
|
labels.append(f"{label}\n(#{idx})") |
|
|
|
|
|
if frames_to_show: |
|
|
|
|
|
frame_height = 120 |
|
|
resized_frames = [] |
|
|
for frame in frames_to_show: |
|
|
h, w = frame.shape[:2] |
|
|
new_w = int(w * frame_height / h) |
|
|
resized = cv2.resize(frame, (new_w, frame_height)) |
|
|
resized_frames.append(resized) |
|
|
|
|
|
|
|
|
combined = np.hstack(resized_frames) |
|
|
ax_frames.imshow(combined) |
|
|
|
|
|
|
|
|
x_pos = 0 |
|
|
for j, (frame, label) in enumerate(zip(resized_frames, labels)): |
|
|
w = frame.shape[1] |
|
|
ax_frames.text(x_pos + w//2, -10, label, |
|
|
ha='center', va='bottom', |
|
|
fontsize=9, fontweight='bold') |
|
|
x_pos += w |
|
|
|
|
|
ax_frames.axis('off') |
|
|
|
|
|
plt.tight_layout() |
|
|
plt.savefig(output_path, dpi=150, bbox_inches='tight') |
|
|
print(f"✓ 已生成可视化: {output_path}") |
|
|
plt.close() |
|
|
|
|
|
if __name__ == "__main__": |
|
|
if len(sys.argv) != 3: |
|
|
print("使用方法: python generate_gloss_frames.py <detailed_prediction_dir> <video_path>") |
|
|
print("例如: python generate_gloss_frames.py detailed_prediction_20251225_170455 ./eval/tiny_test_data/videos/666.mp4") |
|
|
sys.exit(1) |
|
|
|
|
|
detailed_dir = Path(sys.argv[1]) |
|
|
video_path = sys.argv[2] |
|
|
|
|
|
if not detailed_dir.exists(): |
|
|
print(f"错误: 目录不存在: {detailed_dir}") |
|
|
sys.exit(1) |
|
|
|
|
|
if not Path(video_path).exists(): |
|
|
print(f"错误: 视频文件不存在: {video_path}") |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
sample_dirs = sorted([d for d in detailed_dir.iterdir() if d.is_dir()]) |
|
|
|
|
|
for sample_dir in sample_dirs: |
|
|
print(f"\n处理 {sample_dir.name}...") |
|
|
output_path = sample_dir / "gloss_to_frames.png" |
|
|
generate_gloss_to_frames_visualization(sample_dir, video_path, output_path) |
|
|
|
|
|
print(f"\n✓ 完成!共处理 {len(sample_dirs)} 个样本") |
|
|
|