FangSen9000 commited on
Commit
7162aa8
·
1 Parent(s): 9803b71

Relative time and the original frame can now be displayed.

Browse files
SignX/detailed_prediction_20251225_192957/sample_000/analysis_report.txt ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ================================================================================
2
+ Sign Language Recognition - Attention分析报告
3
+ ================================================================================
4
+
5
+ 生成时间: 2025-12-25 19:30:01
6
+
7
+ 翻译结果:
8
+ --------------------------------------------------------------------------------
9
+ <unk> NOW-WEEK STUDENT IX HAVE NONE/NOTHING GO NONE/NOTHING
10
+
11
+ 视频信息:
12
+ --------------------------------------------------------------------------------
13
+ 总帧数: 24
14
+ 词数量: 8
15
+
16
+ Attention权重信息:
17
+ --------------------------------------------------------------------------------
18
+ 形状: (29, 8, 24)
19
+ - 解码步数: 29
20
+ - Batch大小: 8
21
+
22
+ 词-帧对应详情:
23
+ ================================================================================
24
+ No. Word Frames Peak Attn Conf
25
+ --------------------------------------------------------------------------------
26
+ 1 <unk> 0-23 0 0.068 low
27
+ 2 NOW-WEEK 2-3 2 0.479 medium
28
+ 3 STUDENT 1-23 21 0.134 low
29
+ 4 IX 1-23 3 0.092 low
30
+ 5 HAVE 4-6 5 0.274 medium
31
+ 6 NONE/NOTHING 7-8 7 0.324 medium
32
+ 7 GO 7-23 7 0.188 low
33
+ 8 NONE/NOTHING 8-8 8 0.733 high
34
+
35
+ ================================================================================
36
+
37
+ 统计摘要:
38
+ --------------------------------------------------------------------------------
39
+ 平均attention权重: 0.287
40
+ 高置信度词: 1 (12.5%)
41
+ 中置信度词: 3 (37.5%)
42
+ 低置信度词: 4 (50.0%)
43
+
44
+ ================================================================================
SignX/detailed_prediction_20251225_192957/sample_000/attention_heatmap.png ADDED

Git LFS Details

  • SHA256: 3d935d0668af8781f4ee17f433681751862d0b550f1f954dc230cba154698ac8
  • Pointer size: 130 Bytes
  • Size of remote file: 85.9 kB
SignX/detailed_prediction_20251225_192957/sample_000/attention_weights.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25434051e14c2b1741bf1376aaae36ca9a9fc276b01859a40b74bab3b603bcf8
3
+ size 22400
SignX/detailed_prediction_20251225_192957/sample_000/debug_video_path.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ video_path = '/common/users/sf895/output/huggingface_asllrp_repo/SignX/eval/tiny_test_data/videos/666.mp4'
2
+ video_path type = <class 'str'>
3
+ video_path is None: False
4
+ bool(video_path): True
SignX/detailed_prediction_20251225_192957/sample_000/frame_alignment.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "translation": "<unk> NOW-WEEK STUDENT IX HAVE NONE/NOTHING GO NONE/NOTHING",
3
+ "words": [
4
+ "<unk>",
5
+ "NOW-WEEK",
6
+ "STUDENT",
7
+ "IX",
8
+ "HAVE",
9
+ "NONE/NOTHING",
10
+ "GO",
11
+ "NONE/NOTHING"
12
+ ],
13
+ "total_video_frames": 24,
14
+ "frame_ranges": [
15
+ {
16
+ "word": "<unk>",
17
+ "start_frame": 0,
18
+ "end_frame": 23,
19
+ "peak_frame": 0,
20
+ "avg_attention": 0.06790952384471893,
21
+ "confidence": "low"
22
+ },
23
+ {
24
+ "word": "NOW-WEEK",
25
+ "start_frame": 2,
26
+ "end_frame": 3,
27
+ "peak_frame": 2,
28
+ "avg_attention": 0.4792596399784088,
29
+ "confidence": "medium"
30
+ },
31
+ {
32
+ "word": "STUDENT",
33
+ "start_frame": 1,
34
+ "end_frame": 23,
35
+ "peak_frame": 21,
36
+ "avg_attention": 0.13404551148414612,
37
+ "confidence": "low"
38
+ },
39
+ {
40
+ "word": "IX",
41
+ "start_frame": 1,
42
+ "end_frame": 23,
43
+ "peak_frame": 3,
44
+ "avg_attention": 0.09226731956005096,
45
+ "confidence": "low"
46
+ },
47
+ {
48
+ "word": "HAVE",
49
+ "start_frame": 4,
50
+ "end_frame": 6,
51
+ "peak_frame": 5,
52
+ "avg_attention": 0.27426692843437195,
53
+ "confidence": "medium"
54
+ },
55
+ {
56
+ "word": "NONE/NOTHING",
57
+ "start_frame": 7,
58
+ "end_frame": 8,
59
+ "peak_frame": 7,
60
+ "avg_attention": 0.3239603638648987,
61
+ "confidence": "medium"
62
+ },
63
+ {
64
+ "word": "GO",
65
+ "start_frame": 7,
66
+ "end_frame": 23,
67
+ "peak_frame": 7,
68
+ "avg_attention": 0.1878073364496231,
69
+ "confidence": "low"
70
+ },
71
+ {
72
+ "word": "NONE/NOTHING",
73
+ "start_frame": 8,
74
+ "end_frame": 8,
75
+ "peak_frame": 8,
76
+ "avg_attention": 0.7333312630653381,
77
+ "confidence": "high"
78
+ }
79
+ ],
80
+ "statistics": {
81
+ "avg_confidence": 0.2866059858351946,
82
+ "high_confidence_words": 1,
83
+ "medium_confidence_words": 3,
84
+ "low_confidence_words": 4
85
+ }
86
+ }
SignX/detailed_prediction_20251225_192957/sample_000/frame_alignment.png ADDED

Git LFS Details

  • SHA256: 99adca8a5afcf82daf82e99922efef400dcc8453cf6246d573c53a622bd6a2bf
  • Pointer size: 131 Bytes
  • Size of remote file: 125 kB
SignX/detailed_prediction_20251225_192957/sample_000/gloss_to_frames.png ADDED

Git LFS Details

  • SHA256: 3a1298e4d1aa177b43375e34cf6084f213d3ccc29c1544fcdf7008be91e82bf2
  • Pointer size: 132 Bytes
  • Size of remote file: 1.5 MB
SignX/detailed_prediction_20251225_192957/sample_000/translation.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ With BPE: <unk> NOW@@ -@@ WEEK STUDENT I@@ X HAVE NONE/NOTHING GO NONE/NOTHING
2
+ Clean: <unk> NOW-WEEK STUDENT IX HAVE NONE/NOTHING GO NONE/NOTHING
SignX/detailed_prediction_20251225_193758/sample_000/analysis_report.txt ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ================================================================================
2
+ Sign Language Recognition - Attention分析报告
3
+ ================================================================================
4
+
5
+ 生成时间: 2025-12-25 19:38:00
6
+
7
+ 翻译结果:
8
+ --------------------------------------------------------------------------------
9
+ <unk> NOW-WEEK STUDENT IX HAVE NONE/NOTHING GO NONE/NOTHING
10
+
11
+ 视频信息:
12
+ --------------------------------------------------------------------------------
13
+ 总帧数: 24
14
+ 词数量: 8
15
+
16
+ Attention权重信息:
17
+ --------------------------------------------------------------------------------
18
+ 形状: (29, 8, 24)
19
+ - 解码步数: 29
20
+ - Batch大小: 8
21
+
22
+ 词-帧对应详情:
23
+ ================================================================================
24
+ No. Word Frames Peak Attn Conf
25
+ --------------------------------------------------------------------------------
26
+ 1 <unk> 0-23 0 0.068 low
27
+ 2 NOW-WEEK 2-3 2 0.479 medium
28
+ 3 STUDENT 1-23 21 0.134 low
29
+ 4 IX 1-23 3 0.092 low
30
+ 5 HAVE 4-6 5 0.274 medium
31
+ 6 NONE/NOTHING 7-8 7 0.324 medium
32
+ 7 GO 7-23 7 0.188 low
33
+ 8 NONE/NOTHING 8-8 8 0.733 high
34
+
35
+ ================================================================================
36
+
37
+ 统计摘要:
38
+ --------------------------------------------------------------------------------
39
+ 平均attention权重: 0.287
40
+ 高置信度词: 1 (12.5%)
41
+ 中置信度词: 3 (37.5%)
42
+ 低置信度词: 4 (50.0%)
43
+
44
+ ================================================================================
SignX/detailed_prediction_20251225_193758/sample_000/attention_heatmap.png ADDED

Git LFS Details

  • SHA256: 3d935d0668af8781f4ee17f433681751862d0b550f1f954dc230cba154698ac8
  • Pointer size: 130 Bytes
  • Size of remote file: 85.9 kB
SignX/detailed_prediction_20251225_193758/sample_000/attention_weights.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25434051e14c2b1741bf1376aaae36ca9a9fc276b01859a40b74bab3b603bcf8
3
+ size 22400
SignX/detailed_prediction_20251225_193758/sample_000/debug_video_path.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ video_path = '/common/users/sf895/output/huggingface_asllrp_repo/SignX/eval/tiny_test_data/videos/666.mp4'
2
+ video_path type = <class 'str'>
3
+ video_path is None: False
4
+ bool(video_path): True
SignX/detailed_prediction_20251225_193758/sample_000/frame_alignment.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "translation": "<unk> NOW-WEEK STUDENT IX HAVE NONE/NOTHING GO NONE/NOTHING",
3
+ "words": [
4
+ "<unk>",
5
+ "NOW-WEEK",
6
+ "STUDENT",
7
+ "IX",
8
+ "HAVE",
9
+ "NONE/NOTHING",
10
+ "GO",
11
+ "NONE/NOTHING"
12
+ ],
13
+ "total_video_frames": 24,
14
+ "frame_ranges": [
15
+ {
16
+ "word": "<unk>",
17
+ "start_frame": 0,
18
+ "end_frame": 23,
19
+ "peak_frame": 0,
20
+ "avg_attention": 0.06790952384471893,
21
+ "confidence": "low"
22
+ },
23
+ {
24
+ "word": "NOW-WEEK",
25
+ "start_frame": 2,
26
+ "end_frame": 3,
27
+ "peak_frame": 2,
28
+ "avg_attention": 0.4792596399784088,
29
+ "confidence": "medium"
30
+ },
31
+ {
32
+ "word": "STUDENT",
33
+ "start_frame": 1,
34
+ "end_frame": 23,
35
+ "peak_frame": 21,
36
+ "avg_attention": 0.13404551148414612,
37
+ "confidence": "low"
38
+ },
39
+ {
40
+ "word": "IX",
41
+ "start_frame": 1,
42
+ "end_frame": 23,
43
+ "peak_frame": 3,
44
+ "avg_attention": 0.09226731956005096,
45
+ "confidence": "low"
46
+ },
47
+ {
48
+ "word": "HAVE",
49
+ "start_frame": 4,
50
+ "end_frame": 6,
51
+ "peak_frame": 5,
52
+ "avg_attention": 0.27426692843437195,
53
+ "confidence": "medium"
54
+ },
55
+ {
56
+ "word": "NONE/NOTHING",
57
+ "start_frame": 7,
58
+ "end_frame": 8,
59
+ "peak_frame": 7,
60
+ "avg_attention": 0.3239603638648987,
61
+ "confidence": "medium"
62
+ },
63
+ {
64
+ "word": "GO",
65
+ "start_frame": 7,
66
+ "end_frame": 23,
67
+ "peak_frame": 7,
68
+ "avg_attention": 0.1878073364496231,
69
+ "confidence": "low"
70
+ },
71
+ {
72
+ "word": "NONE/NOTHING",
73
+ "start_frame": 8,
74
+ "end_frame": 8,
75
+ "peak_frame": 8,
76
+ "avg_attention": 0.7333312630653381,
77
+ "confidence": "high"
78
+ }
79
+ ],
80
+ "statistics": {
81
+ "avg_confidence": 0.2866059858351946,
82
+ "high_confidence_words": 1,
83
+ "medium_confidence_words": 3,
84
+ "low_confidence_words": 4
85
+ }
86
+ }
SignX/detailed_prediction_20251225_193758/sample_000/frame_alignment.png ADDED

Git LFS Details

  • SHA256: 99adca8a5afcf82daf82e99922efef400dcc8453cf6246d573c53a622bd6a2bf
  • Pointer size: 131 Bytes
  • Size of remote file: 125 kB
SignX/detailed_prediction_20251225_193758/sample_000/translation.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ With BPE: <unk> NOW@@ -@@ WEEK STUDENT I@@ X HAVE NONE/NOTHING GO NONE/NOTHING
2
+ Clean: <unk> NOW-WEEK STUDENT IX HAVE NONE/NOTHING GO NONE/NOTHING
SignX/eval/attention_analysis.py CHANGED
@@ -22,7 +22,10 @@ Attention权重分析和可视化模块
22
  """
23
 
24
  import os
 
25
  import json
 
 
26
  import numpy as np
27
  from pathlib import Path
28
  from datetime import datetime
@@ -31,23 +34,44 @@ from datetime import datetime
31
  class AttentionAnalyzer:
32
  """Attention权重分析器"""
33
 
34
- def __init__(self, attentions, translation, video_frames, beam_sequences=None, beam_scores=None):
 
35
  """
36
  Args:
37
  attentions: numpy array, shape [time_steps, batch, beam, src_len]
38
  或 [time_steps, src_len] (已提取最佳beam)
39
  translation: str, 翻译结果(BPE已移除)
40
- video_frames: int, 视频总帧数
41
  beam_sequences: list, 所有beam的序列 (可选)
42
  beam_scores: list, 所有beam的分数 (可选)
 
 
 
43
  """
44
  self.attentions = attentions
45
  self.translation = translation
46
  self.words = translation.split()
47
- self.video_frames = video_frames
48
  self.beam_sequences = beam_sequences
49
  self.beam_scores = beam_scores
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  # 提取最佳路径的attention (batch=0, beam=0)
52
  if len(attentions.shape) == 4:
53
  self.attn_best = attentions[:, 0, 0, :] # [time, src_len]
@@ -156,6 +180,28 @@ class AttentionAnalyzer:
156
  # 5. 保存numpy数据(供进一步分析)
157
  np.save(output_dir / "attention_weights.npy", self.attentions)
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  print(f"✓ 已生成 {len(list(output_dir.glob('*')))} 个文件")
160
 
161
  def plot_attention_heatmap(self, output_path):
@@ -371,6 +417,352 @@ class AttentionAnalyzer:
371
  print(f" ✓ {output_path.name}")
372
 
373
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
374
  def analyze_from_numpy_file(attention_file, translation, video_frames, output_dir):
375
  """
376
  从numpy文件加载attention并分析
 
22
  """
23
 
24
  import os
25
+ import io
26
  import json
27
+ import shutil
28
+ import subprocess
29
  import numpy as np
30
  from pathlib import Path
31
  from datetime import datetime
 
34
  class AttentionAnalyzer:
35
  """Attention权重分析器"""
36
 
37
+ def __init__(self, attentions, translation, video_frames, beam_sequences=None, beam_scores=None,
38
+ video_path=None, original_video_fps=30, original_video_total_frames=None):
39
  """
40
  Args:
41
  attentions: numpy array, shape [time_steps, batch, beam, src_len]
42
  或 [time_steps, src_len] (已提取最佳beam)
43
  translation: str, 翻译结果(BPE已移除)
44
+ video_frames: int, SMKD特征序列帧数
45
  beam_sequences: list, 所有beam的序列 (可选)
46
  beam_scores: list, 所有beam的分数 (可选)
47
+ video_path: str, 原始视频文件路径 (可选,用于提取视频帧)
48
+ original_video_fps: int, 原始视频FPS (默认30)
49
+ original_video_total_frames: int, 原始视频总帧数 (可选,如果不提供则从视频中读取)
50
  """
51
  self.attentions = attentions
52
  self.translation = translation
53
  self.words = translation.split()
54
+ self.video_frames = video_frames # SMKD特征帧数
55
  self.beam_sequences = beam_sequences
56
  self.beam_scores = beam_scores
57
 
58
+ # 原始视频相关
59
+ self.video_path = video_path
60
+ self.original_video_fps = original_video_fps
61
+ self.original_video_total_frames = original_video_total_frames
62
+ self._cv2_module = None
63
+ self._cv2_checked = False
64
+
65
+ # 如果提供了视频路径但没有提供总帧数,尝试读取
66
+ if video_path and original_video_total_frames is None:
67
+ metadata = self._read_video_metadata()
68
+ if metadata:
69
+ self.original_video_total_frames = metadata.get('frames')
70
+ if metadata.get('fps'):
71
+ self.original_video_fps = metadata['fps']
72
+ elif video_path:
73
+ print(f"Warning: 无法解析视频信息, Gloss-to-Frames 可视化将无法对齐实际帧 ({video_path})")
74
+
75
  # 提取最佳路径的attention (batch=0, beam=0)
76
  if len(attentions.shape) == 4:
77
  self.attn_best = attentions[:, 0, 0, :] # [time, src_len]
 
180
  # 5. 保存numpy数据(供进一步分析)
181
  np.save(output_dir / "attention_weights.npy", self.attentions)
182
 
183
+ # 6. Gloss-to-Frames可视化 (如果提供了视频路径)
184
+ # Write debug info to file
185
+ debug_file = output_dir / "debug_video_path.txt"
186
+ with open(debug_file, 'w') as f:
187
+ f.write(f"video_path = {repr(self.video_path)}\n")
188
+ f.write(f"video_path type = {type(self.video_path)}\n")
189
+ f.write(f"video_path is None: {self.video_path is None}\n")
190
+ f.write(f"bool(video_path): {bool(self.video_path)}\n")
191
+
192
+ print(f"[DEBUG] video_path = {self.video_path}")
193
+ if self.video_path:
194
+ print(f"[DEBUG] Generating gloss-to-frames visualization with video: {self.video_path}")
195
+ try:
196
+ self.generate_gloss_to_frames_visualization(output_dir / "gloss_to_frames.png")
197
+ print(f"[DEBUG] Successfully generated gloss_to_frames.png")
198
+ except Exception as e:
199
+ print(f"[DEBUG] Failed to generate gloss_to_frames.png: {e}")
200
+ import traceback
201
+ traceback.print_exc()
202
+ else:
203
+ print("[DEBUG] Skipping gloss-to-frames visualization (no video path provided)")
204
+
205
  print(f"✓ 已生成 {len(list(output_dir.glob('*')))} 个文件")
206
 
207
  def plot_attention_heatmap(self, output_path):
 
417
  print(f" ✓ {output_path.name}")
418
 
419
 
420
+ def _map_feature_frame_to_original(self, feature_frame_idx):
421
+ """
422
+ 将SMKD特征帧索引映射到原始视频帧索引
423
+
424
+ Args:
425
+ feature_frame_idx: SMKD特征帧索引 (0-based)
426
+
427
+ Returns:
428
+ int: 原始视频帧索引,如果无法映射则返回None
429
+ """
430
+ if self.original_video_total_frames is None:
431
+ return None
432
+
433
+ # 计算降采样率
434
+ downsample_ratio = self.original_video_total_frames / self.video_frames
435
+
436
+ # 映射到原始视频帧
437
+ original_frame_idx = int(feature_frame_idx * downsample_ratio)
438
+
439
+ return min(original_frame_idx, self.original_video_total_frames - 1)
440
+
441
+ def _extract_video_frames(self, frame_indices):
442
+ """
443
+ 从视频中提取指定索引的帧
444
+
445
+ Args:
446
+ frame_indices: list of int, 要提取的帧索引列表
447
+
448
+ Returns:
449
+ dict: {frame_idx: numpy_array}, 帧索引到图像数据的映射
450
+ """
451
+ if not self.video_path:
452
+ return {}
453
+
454
+ cv2 = self._get_cv2_module()
455
+ if cv2 is not None:
456
+ return self._extract_frames_with_cv2(cv2, frame_indices)
457
+
458
+ return self._extract_frames_with_ffmpeg(frame_indices)
459
+
460
+ def _get_cv2_module(self):
461
+ """惰性加载cv2, 缓存导入结果"""
462
+ if self._cv2_checked:
463
+ return self._cv2_module
464
+
465
+ try:
466
+ import cv2
467
+ self._cv2_module = cv2
468
+ except ImportError:
469
+ self._cv2_module = None
470
+ finally:
471
+ self._cv2_checked = True
472
+
473
+ if self._cv2_module is None:
474
+ print("Warning: opencv-python 未安装, 将尝试使用 ffmpeg 提取视频帧")
475
+ return self._cv2_module
476
+
477
+ def _extract_frames_with_cv2(self, cv2, frame_indices):
478
+ """使用opencv提取视频帧"""
479
+ frames = {}
480
+ cap = cv2.VideoCapture(self.video_path)
481
+
482
+ if not cap.isOpened():
483
+ print(f"Warning: Cannot open video file: {self.video_path}")
484
+ return {}
485
+
486
+ for frame_idx in sorted(frame_indices):
487
+ cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
488
+ ret, frame = cap.read()
489
+ if ret:
490
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
491
+ frames[frame_idx] = frame_rgb
492
+
493
+ cap.release()
494
+ return frames
495
+
496
+ def _extract_frames_with_ffmpeg(self, frame_indices):
497
+ """使用ffmpeg + Pillow提取视频帧(在opencv缺失时调用)"""
498
+ if shutil.which("ffmpeg") is None:
499
+ print("Warning: 未找到 ffmpeg, 无法提取视频帧")
500
+ return {}
501
+
502
+ try:
503
+ from PIL import Image
504
+ except ImportError:
505
+ print("Warning: Pillow 未安装, 无法解码ffmpeg输出的图像")
506
+ return {}
507
+
508
+ frames = {}
509
+ for frame_idx in sorted(frame_indices):
510
+ cmd = [
511
+ "ffmpeg",
512
+ "-v", "error",
513
+ "-i", str(self.video_path),
514
+ "-vf", f"select=eq(n\\,{frame_idx})",
515
+ "-vframes", "1",
516
+ "-f", "image2pipe",
517
+ "-vcodec", "png",
518
+ "-"
519
+ ]
520
+ try:
521
+ result = subprocess.run(
522
+ cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
523
+ )
524
+ if not result.stdout:
525
+ continue
526
+ image = Image.open(io.BytesIO(result.stdout)).convert("RGB")
527
+ frames[frame_idx] = np.array(image)
528
+ except subprocess.CalledProcessError as e:
529
+ print(f"Warning: ffmpeg 提取帧 {frame_idx} 失败: {e}")
530
+ except Exception as ex:
531
+ print(f"Warning: 解码帧 {frame_idx} 失败: {ex}")
532
+
533
+ if frames:
534
+ print(f" ✓ 使用 ffmpeg 提取了 {len(frames)} 帧")
535
+ else:
536
+ print(" ⓘ ffmpeg 未能提取任何帧")
537
+ return frames
538
+
539
+ def generate_gloss_to_frames_visualization(self, output_path):
540
+ """
541
+ 生成 Gloss-to-Frames 可视化图像
542
+ 布局: 每行对应一个gloss
543
+ 列1: Gloss文本
544
+ 列2: 相对时间和帧索引信息
545
+ 列3: 该时间段内的视频帧缩略图
546
+
547
+ Args:
548
+ output_path: 输出图像路径
549
+ """
550
+ if not self.video_path:
551
+ print(" ⓘ Skipping gloss-to-frames visualization (no video path provided)")
552
+ return
553
+
554
+ try:
555
+ import matplotlib.pyplot as plt
556
+ import matplotlib.gridspec as gridspec
557
+ except ImportError:
558
+ print("Warning: matplotlib not installed")
559
+ return
560
+
561
+ # 收集所有需要提取的原始视频帧
562
+ all_original_frames = set()
563
+ for word_info in self.word_frame_ranges:
564
+ # 特征帧范围
565
+ start_feat = word_info['start_frame']
566
+ end_feat = word_info['end_frame']
567
+ peak_feat = word_info['peak_frame']
568
+
569
+ # 映射到原始视频帧
570
+ for feat_idx in [start_feat, peak_feat, end_feat]:
571
+ orig_idx = self._map_feature_frame_to_original(feat_idx)
572
+ if orig_idx is not None:
573
+ all_original_frames.add(orig_idx)
574
+
575
+ # 提取视频帧
576
+ print(f" 提取 {len(all_original_frames)} 个视频帧...")
577
+ video_frames_dict = self._extract_video_frames(list(all_original_frames))
578
+
579
+ if not video_frames_dict:
580
+ print(" ⓘ No video frames extracted, skipping visualization")
581
+ return
582
+
583
+ # 创建figure
584
+ n_words = len(self.words)
585
+ fig = plt.figure(figsize=(20, 3 * n_words))
586
+ gs = gridspec.GridSpec(n_words, 3, width_ratios=[1.5, 2, 6], hspace=0.3, wspace=0.2)
587
+
588
+ for row_idx, (word, word_info) in enumerate(zip(self.words, self.word_frame_ranges)):
589
+ # 列1: Gloss文本
590
+ ax_gloss = fig.add_subplot(gs[row_idx, 0])
591
+ ax_gloss.text(0.5, 0.5, word, fontsize=24, weight='bold',
592
+ ha='center', va='center', wrap=True)
593
+ ax_gloss.axis('off')
594
+
595
+ # 列2: 时间和帧信息
596
+ ax_info = fig.add_subplot(gs[row_idx, 1])
597
+
598
+ # 特征帧信息
599
+ feat_start = word_info['start_frame']
600
+ feat_end = word_info['end_frame']
601
+ feat_peak = word_info['peak_frame']
602
+
603
+ # 相对时间 (0-100%)
604
+ rel_start = (feat_start / self.video_frames) * 100
605
+ rel_end = (feat_end / self.video_frames) * 100
606
+ rel_peak = (feat_peak / self.video_frames) * 100
607
+
608
+ info_text = f"Feature Frames:\n"
609
+ info_text += f" Range: {feat_start}-{feat_end}\n"
610
+ info_text += f" Peak: {feat_peak}\n\n"
611
+ info_text += f"Relative Time:\n"
612
+ info_text += f" Range: {rel_start:.1f}%-{rel_end:.1f}%\n"
613
+ info_text += f" Peak: {rel_peak:.1f}%\n"
614
+
615
+ # 如果有原始视频帧映射
616
+ if self.original_video_total_frames:
617
+ orig_start = self._map_feature_frame_to_original(feat_start)
618
+ orig_end = self._map_feature_frame_to_original(feat_end)
619
+ orig_peak = self._map_feature_frame_to_original(feat_peak)
620
+ info_text += f"\nOriginal Video:\n"
621
+ info_text += f" Total: {self.original_video_total_frames} frames\n"
622
+ info_text += f" Range: {orig_start}-{orig_end}\n"
623
+ info_text += f" Peak: {orig_peak}\n"
624
+
625
+ ax_info.text(0.05, 0.5, info_text, fontsize=10, family='monospace',
626
+ va='center', ha='left')
627
+ ax_info.axis('off')
628
+
629
+ # 列3: 视频帧
630
+ ax_frames = fig.add_subplot(gs[row_idx, 2])
631
+
632
+ # 选择要显示的帧: start, peak, end
633
+ frames_to_show = []
634
+ labels_to_show = []
635
+
636
+ for feat_idx, label in [(feat_start, 'Start'), (feat_peak, 'Peak'), (feat_end, 'End')]:
637
+ orig_idx = self._map_feature_frame_to_original(feat_idx)
638
+ if orig_idx is not None and orig_idx in video_frames_dict:
639
+ frames_to_show.append(video_frames_dict[orig_idx])
640
+ labels_to_show.append(f"{label}\nF{orig_idx}")
641
+
642
+ if frames_to_show:
643
+ # 水平拼接帧
644
+ combined = np.hstack(frames_to_show)
645
+ ax_frames.imshow(combined)
646
+
647
+ # 添加标签
648
+ frame_width = frames_to_show[0].shape[1]
649
+ for i, label in enumerate(labels_to_show):
650
+ x_pos = (i + 0.5) * frame_width
651
+ ax_frames.text(x_pos, -20, label, fontsize=10, weight='bold',
652
+ ha='center', va='top', color='blue')
653
+ else:
654
+ ax_frames.text(0.5, 0.5, "No frames available",
655
+ ha='center', va='center', transform=ax_frames.transAxes)
656
+
657
+ ax_frames.axis('off')
658
+
659
+ plt.suptitle(f"Gloss-to-Frames Alignment\nTranslation: {self.translation}",
660
+ fontsize=16, weight='bold', y=0.995)
661
+
662
+ plt.savefig(output_path, dpi=150, bbox_inches='tight', facecolor='white')
663
+ plt.close()
664
+
665
+ print(f" ✓ {Path(output_path).name}")
666
+
667
+ def _read_video_metadata(self):
668
+ """尝试读取原始视频的帧数和fps"""
669
+ metadata = self._read_metadata_with_cv2()
670
+ if metadata:
671
+ return metadata
672
+ return self._read_metadata_with_ffprobe()
673
+
674
+ def _read_metadata_with_cv2(self):
675
+ cv2 = self._get_cv2_module()
676
+ if cv2 is None:
677
+ return None
678
+
679
+ cap = cv2.VideoCapture(self.video_path)
680
+ if not cap.isOpened():
681
+ return None
682
+
683
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
684
+ fps = cap.get(cv2.CAP_PROP_FPS)
685
+ cap.release()
686
+
687
+ if total_frames <= 0:
688
+ return None
689
+
690
+ return {'frames': total_frames, 'fps': fps or self.original_video_fps}
691
+
692
+ def _read_metadata_with_ffprobe(self):
693
+ if shutil.which("ffprobe") is None:
694
+ return None
695
+
696
+ cmd = [
697
+ "ffprobe",
698
+ "-v", "error",
699
+ "-select_streams", "v:0",
700
+ "-show_entries", "stream=nb_frames,r_frame_rate,avg_frame_rate,duration",
701
+ "-of", "json",
702
+ str(self.video_path)
703
+ ]
704
+
705
+ try:
706
+ result = subprocess.run(
707
+ cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
708
+ )
709
+ except subprocess.CalledProcessError:
710
+ return None
711
+
712
+ try:
713
+ info = json.loads(result.stdout)
714
+ except json.JSONDecodeError:
715
+ return None
716
+
717
+ streams = info.get("streams") or []
718
+ if not streams:
719
+ return None
720
+
721
+ stream = streams[0]
722
+ total_frames = stream.get("nb_frames")
723
+ fps = stream.get("avg_frame_rate") or stream.get("r_frame_rate")
724
+ duration = stream.get("duration")
725
+
726
+ fps_value = self._parse_ffprobe_fps(fps)
727
+ total_frames_value = None
728
+
729
+ if isinstance(total_frames, str) and total_frames.isdigit():
730
+ total_frames_value = int(total_frames)
731
+
732
+ if total_frames_value is None and duration and fps_value:
733
+ try:
734
+ total_frames_value = int(round(float(duration) * fps_value))
735
+ except ValueError:
736
+ total_frames_value = None
737
+
738
+ if total_frames_value is None:
739
+ return None
740
+
741
+ return {'frames': total_frames_value, 'fps': fps_value or self.original_video_fps}
742
+
743
+ @staticmethod
744
+ def _parse_ffprobe_fps(rate_str):
745
+ """解析ffprobe输出的帧率字符串,例如'30000/1001'"""
746
+ if not rate_str or rate_str in ("0/0", "0"):
747
+ return None
748
+
749
+ if "/" in rate_str:
750
+ num, denom = rate_str.split("/", 1)
751
+ try:
752
+ num = float(num)
753
+ denom = float(denom)
754
+ if denom == 0:
755
+ return None
756
+ return num / denom
757
+ except ValueError:
758
+ return None
759
+
760
+ try:
761
+ return float(rate_str)
762
+ except ValueError:
763
+ return None
764
+
765
+
766
  def analyze_from_numpy_file(attention_file, translation, video_frames, output_dir):
767
  """
768
  从numpy文件加载attention并分析
SignX/eval/generate_gloss_frames.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ 后处理脚本:从已有的详细分析结果生成 gloss-to-frames 可视化
4
+ 使用方法:
5
+ python generate_gloss_frames.py <detailed_prediction_dir> <video_path>
6
+
7
+ 例如:
8
+ python generate_gloss_frames.py detailed_prediction_20251225_170455 ./eval/tiny_test_data/videos/666.mp4
9
+ """
10
+
11
+ import sys
12
+ import json
13
+ import numpy as np
14
+ import cv2
15
+ from pathlib import Path
16
+ import matplotlib.pyplot as plt
17
+ import matplotlib.patches as mpatches
18
+
19
+ def extract_video_frames(video_path, frame_indices):
20
+ """从视频中提取指定索引的帧"""
21
+ cap = cv2.VideoCapture(video_path)
22
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
23
+
24
+ frames = {}
25
+ for idx in frame_indices:
26
+ if idx >= total_frames:
27
+ idx = total_frames - 1
28
+ cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
29
+ ret, frame = cap.read()
30
+ if ret:
31
+ # BGR to RGB
32
+ frames[idx] = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
33
+
34
+ cap.release()
35
+ return frames, total_frames
36
+
37
+ def generate_gloss_to_frames_visualization(sample_dir, video_path, output_path):
38
+ """生成 gloss-to-frames 可视化"""
39
+
40
+ sample_dir = Path(sample_dir)
41
+
42
+ # 1. 读取对齐数据
43
+ with open(sample_dir / "frame_alignment.json", 'r') as f:
44
+ alignment_data = json.load(f)
45
+
46
+ # 2. 读取翻译结果
47
+ with open(sample_dir / "translation.txt", 'r') as f:
48
+ lines = f.readlines()
49
+ gloss_sequence = None
50
+ for line in lines:
51
+ if line.startswith('Clean:'):
52
+ gloss_sequence = line.replace('Clean:', '').strip()
53
+ break
54
+
55
+ if not gloss_sequence:
56
+ print("无法找到翻译结果")
57
+ return
58
+
59
+ glosses = gloss_sequence.split()
60
+ print(f"Gloss序列: {glosses}")
61
+
62
+ # 3. 获取视频信息
63
+ cap = cv2.VideoCapture(str(video_path))
64
+ total_video_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
65
+ fps = cap.get(cv2.CAP_PROP_FPS)
66
+ cap.release()
67
+
68
+ print(f"视频总帧数: {total_video_frames}, FPS: {fps}")
69
+
70
+ # 4. 从对齐数据中提取每个gloss的特征帧范围
71
+ gloss_frames_info = []
72
+
73
+ # 获取特征帧总数(从 attention weights 的 shape 推断)
74
+ attention_weights = np.load(sample_dir / "attention_weights.npy")
75
+ total_feature_frames = attention_weights.shape[1] # shape: [time, src_len, beam]
76
+
77
+ # 计算映射到原始视频帧
78
+ # 原始帧索引 = 特征帧索引 * (总视频帧数 / 总特征帧数)
79
+ scale_factor = total_video_frames / total_feature_frames
80
+
81
+ for gloss_data in alignment_data['frame_ranges']:
82
+ gloss = gloss_data['word']
83
+ start_feat_frame = gloss_data['start_frame']
84
+ peak_feat_frame = gloss_data['peak_frame']
85
+ end_feat_frame = gloss_data['end_frame']
86
+
87
+ # 映射到原始视频帧
88
+ start_video_frame = int(start_feat_frame * scale_factor)
89
+ peak_video_frame = int(peak_feat_frame * scale_factor)
90
+ end_video_frame = int(end_feat_frame * scale_factor)
91
+
92
+ # 计算相对时间 (%)
93
+ relative_time_start = (start_feat_frame / total_feature_frames) * 100
94
+ relative_time_end = (end_feat_frame / total_feature_frames) * 100
95
+
96
+ gloss_frames_info.append({
97
+ 'gloss': gloss,
98
+ 'feature_frames': (start_feat_frame, peak_feat_frame, end_feat_frame),
99
+ 'video_frames': (start_video_frame, peak_video_frame, end_video_frame),
100
+ 'relative_time': (relative_time_start, relative_time_end),
101
+ 'total_feature_frames': total_feature_frames,
102
+ 'confidence': gloss_data.get('confidence', 'unknown'),
103
+ 'avg_attention': gloss_data.get('avg_attention', 0.0)
104
+ })
105
+
106
+ # 5. 提取所需的视频帧
107
+ all_frame_indices = set()
108
+ for info in gloss_frames_info:
109
+ all_frame_indices.update(info['video_frames'])
110
+
111
+ print(f"提取 {len(all_frame_indices)} 个视频帧...")
112
+ video_frames, _ = extract_video_frames(str(video_path), sorted(all_frame_indices))
113
+
114
+ # 6. 生成可视化
115
+ num_glosses = len(gloss_frames_info)
116
+ fig = plt.figure(figsize=(16, num_glosses * 2.5))
117
+
118
+ for i, info in enumerate(gloss_frames_info):
119
+ gloss = info['gloss']
120
+ feat_start, feat_peak, feat_end = info['feature_frames']
121
+ vid_start, vid_peak, vid_end = info['video_frames']
122
+ rel_start, rel_end = info['relative_time']
123
+ total_feat = info['total_feature_frames']
124
+
125
+ # 创建3列布局:Gloss | 时间信息 | 帧图像
126
+
127
+ # 列1:Gloss文本
128
+ ax_text = plt.subplot(num_glosses, 3, i*3 + 1)
129
+ ax_text.text(0.5, 0.5, gloss,
130
+ fontsize=20, fontweight='bold',
131
+ ha='center', va='center')
132
+ ax_text.axis('off')
133
+
134
+ # 列2:时间和帧信息
135
+ ax_info = plt.subplot(num_glosses, 3, i*3 + 2)
136
+ confidence = info.get('confidence', 'unknown')
137
+ avg_attn = info.get('avg_attention', 0.0)
138
+
139
+ info_text = f"""特征帧: {feat_start} → {feat_peak} → {feat_end}
140
+ 相对时间: {rel_start:.1f}% → {rel_end:.1f}%
141
+ 原始帧: {vid_start} → {vid_peak} → {vid_end}
142
+
143
+ 总特征帧: {total_feat}
144
+ 总视频帧: {total_video_frames}
145
+
146
+ 置信度: {confidence}
147
+ 注意力: {avg_attn:.3f}"""
148
+
149
+ ax_info.text(0.1, 0.5, info_text,
150
+ fontsize=10, family='monospace',
151
+ ha='left', va='center')
152
+ ax_info.axis('off')
153
+
154
+ # 列3:视频帧(Start | Peak | End)横向拼接
155
+ ax_frames = plt.subplot(num_glosses, 3, i*3 + 3)
156
+
157
+ # 获取三个关键帧
158
+ frames_to_show = []
159
+ labels = []
160
+ for idx, label in [(vid_start, 'Start'), (vid_peak, 'Peak'), (vid_end, 'End')]:
161
+ if idx in video_frames:
162
+ frames_to_show.append(video_frames[idx])
163
+ labels.append(f"{label}\n(#{idx})")
164
+
165
+ if frames_to_show:
166
+ # 调整帧大小
167
+ frame_height = 120
168
+ resized_frames = []
169
+ for frame in frames_to_show:
170
+ h, w = frame.shape[:2]
171
+ new_w = int(w * frame_height / h)
172
+ resized = cv2.resize(frame, (new_w, frame_height))
173
+ resized_frames.append(resized)
174
+
175
+ # 横向拼接
176
+ combined = np.hstack(resized_frames)
177
+ ax_frames.imshow(combined)
178
+
179
+ # 添加标签
180
+ x_pos = 0
181
+ for j, (frame, label) in enumerate(zip(resized_frames, labels)):
182
+ w = frame.shape[1]
183
+ ax_frames.text(x_pos + w//2, -10, label,
184
+ ha='center', va='bottom',
185
+ fontsize=9, fontweight='bold')
186
+ x_pos += w
187
+
188
+ ax_frames.axis('off')
189
+
190
+ plt.tight_layout()
191
+ plt.savefig(output_path, dpi=150, bbox_inches='tight')
192
+ print(f"✓ 已生成可视化: {output_path}")
193
+ plt.close()
194
+
195
+ if __name__ == "__main__":
196
+ if len(sys.argv) != 3:
197
+ print("使用方法: python generate_gloss_frames.py <detailed_prediction_dir> <video_path>")
198
+ print("例如: python generate_gloss_frames.py detailed_prediction_20251225_170455 ./eval/tiny_test_data/videos/666.mp4")
199
+ sys.exit(1)
200
+
201
+ detailed_dir = Path(sys.argv[1])
202
+ video_path = sys.argv[2]
203
+
204
+ if not detailed_dir.exists():
205
+ print(f"错误: 目录不存在: {detailed_dir}")
206
+ sys.exit(1)
207
+
208
+ if not Path(video_path).exists():
209
+ print(f"错误: 视频文件不存在: {video_path}")
210
+ sys.exit(1)
211
+
212
+ # 处理所有样本
213
+ sample_dirs = sorted(detailed_dir.glob("sample_*"))
214
+
215
+ for sample_dir in sample_dirs:
216
+ print(f"\n处理 {sample_dir.name}...")
217
+ output_path = sample_dir / "gloss_to_frames.png"
218
+ generate_gloss_to_frames_visualization(sample_dir, video_path, output_path)
219
+
220
+ print(f"\n✓ 完成!共处理 {len(sample_dirs)} 个样本")
SignX/inference.sh CHANGED
@@ -204,6 +204,7 @@ cat > "$TEMP_DIR/infer_config.py" <<EOF
204
  'gpus': [0],
205
  'remove_bpe': True,
206
  'collect_attention_weights': True,
 
207
  }
208
  EOF
209
 
@@ -245,6 +246,15 @@ if [ -f "$TEMP_DIR/prediction.txt" ]; then
245
  # 统计样本数量
246
  sample_count=$(find "$dest_path" -maxdepth 1 -type d -name "sample_*" | wc -l)
247
  echo " ✓ 已保存 $sample_count 个样本的详细分析到: $dest_path"
 
 
 
 
 
 
 
 
 
248
  done
249
  fi
250
 
@@ -263,6 +273,7 @@ if [ -f "$TEMP_DIR/prediction.txt" ]; then
263
  echo "Attention分析包含:"
264
  echo " - 注意力权重热图 (attention_heatmap.png)"
265
  echo " - 词-帧对齐图 (word_frame_alignment.png)"
 
266
  echo " - 分析报告 (analysis_report.txt)"
267
  echo " - 原始数据 (attention_weights.npy)"
268
  fi
 
204
  'gpus': [0],
205
  'remove_bpe': True,
206
  'collect_attention_weights': True,
207
+ 'inference_video_path': '$VIDEO_PATH',
208
  }
209
  EOF
210
 
 
246
  # 统计样本数量
247
  sample_count=$(find "$dest_path" -maxdepth 1 -type d -name "sample_*" | wc -l)
248
  echo " ✓ 已保存 $sample_count 个样本的详细分析到: $dest_path"
249
+
250
+ # 后处理:生成 gloss-to-frames 可视化
251
+ echo ""
252
+ echo -e "${BLUE}生成 Gloss-to-Frames 可视化...${NC}"
253
+ if [ -f "$SCRIPT_DIR/eval/generate_gloss_frames.py" ]; then
254
+ python "$SCRIPT_DIR/eval/generate_gloss_frames.py" "$dest_path" "$VIDEO_PATH" 2>&1 | grep -E "(处理|提取|生成|完成|✓)"
255
+ else
256
+ echo " ⓘ generate_gloss_frames.py 未找到,跳过后处理"
257
+ fi
258
  done
259
  fi
260
 
 
273
  echo "Attention分析包含:"
274
  echo " - 注意力权重热图 (attention_heatmap.png)"
275
  echo " - 词-帧对齐图 (word_frame_alignment.png)"
276
+ echo " - Gloss-视频帧对应图 (gloss_to_frames.png)"
277
  echo " - 分析报告 (analysis_report.txt)"
278
  echo " - 原始数据 (attention_weights.npy)"
279
  fi
SignX/inference_output.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ <unk> NOW@@ -@@ WEEK STUDENT I@@ X HAVE NONE/NOTHING GO NONE/NOTHING
SignX/inference_output.txt.clean ADDED
@@ -0,0 +1 @@
 
 
1
+ <unk> NOW-WEEK STUDENT IX HAVE NONE/NOTHING GO NONE/NOTHING
SignX/main.py CHANGED
@@ -480,7 +480,9 @@ def evaluate(params):
480
  )
481
 
482
  # save translation
483
- evalu.dump_tanslation(tranes, params.test_output, indices=indices, attentions=attentions)
 
 
484
 
485
  return bleu
486
 
@@ -572,4 +574,6 @@ def inference(params):
572
  )
573
 
574
  # save translation
575
- evalu.dump_tanslation(tranes, params.test_output, indices=indices, attentions=attentions)
 
 
 
480
  )
481
 
482
  # save translation
483
+ # Get video path from params if available (for test mode with inference video)
484
+ video_path = getattr(params, 'inference_video_path', None)
485
+ evalu.dump_tanslation(tranes, params.test_output, indices=indices, attentions=attentions, video_path=video_path)
486
 
487
  return bleu
488
 
 
574
  )
575
 
576
  # save translation
577
+ # Get video path from params if available (for inference mode)
578
+ video_path = getattr(params, 'inference_video_path', None)
579
+ evalu.dump_tanslation(tranes, params.test_output, indices=indices, attentions=attentions, video_path=video_path)
SignX/models/evalu.py CHANGED
@@ -198,7 +198,7 @@ def eval_metric(trans, target_file, indices=None, remove_bpe=False):
198
  return metric.bleu(trans, references)
199
 
200
 
201
- def dump_tanslation(tranes, output, indices=None, attentions=None):
202
  """save translation"""
203
  if indices is not None:
204
  tranes = [data[1] for data in
@@ -220,7 +220,7 @@ def dump_tanslation(tranes, output, indices=None, attentions=None):
220
  if attentions is not None and len(attentions) > 0:
221
  tf.logging.info("[DEBUG] Calling dump_detailed_attention_output")
222
  try:
223
- dump_detailed_attention_output(tranes, output, indices, attentions)
224
  except Exception as e:
225
  tf.logging.warning(f"Failed to save detailed attention output: {e}")
226
  import traceback
@@ -279,7 +279,7 @@ def dump_translation_with_reference(tranes, output, ref_file, indices=None, remo
279
  tf.logging.info("Saving comparison into {}".format(comparison_file))
280
 
281
 
282
- def dump_detailed_attention_output(tranes, output, indices, attentions):
283
  """
284
  保存详细的attention分析结果
285
 
@@ -288,6 +288,7 @@ def dump_detailed_attention_output(tranes, output, indices, attentions):
288
  output: 输出文件路径
289
  indices: 样本索引
290
  attentions: attention权重数据(list of numpy arrays)
 
291
  """
292
  import os
293
  import sys
@@ -329,16 +330,24 @@ def dump_detailed_attention_output(tranes, output, indices, attentions):
329
 
330
  # 检查是否所有元素都是numpy array
331
  # Note: Each element in attentions is a list (one per GPU), so we need to extract from it
 
 
 
 
 
 
 
 
 
332
  all_attentions = []
333
- for attn_batch in attentions:
334
  if attn_batch is not None:
335
- # Handle both list (multi-GPU) and numpy array (already processed) cases
336
- if isinstance(attn_batch, list):
337
- # Extract first element (GPU 0's result)
338
- if len(attn_batch) > 0 and isinstance(attn_batch[0], np.ndarray):
339
- all_attentions.append(attn_batch[0])
340
- elif isinstance(attn_batch, np.ndarray):
341
- all_attentions.append(attn_batch)
342
 
343
  if len(all_attentions) == 0:
344
  tf.logging.warning("No valid attention data found")
@@ -394,7 +403,8 @@ def dump_detailed_attention_output(tranes, output, indices, attentions):
394
  analyzer = AttentionAnalyzer(
395
  attentions=sample_attn,
396
  translation=trans_clean,
397
- video_frames=video_frames
 
398
  )
399
 
400
  analyzer.generate_all_visualizations(sample_dir)
 
198
  return metric.bleu(trans, references)
199
 
200
 
201
+ def dump_tanslation(tranes, output, indices=None, attentions=None, video_path=None):
202
  """save translation"""
203
  if indices is not None:
204
  tranes = [data[1] for data in
 
220
  if attentions is not None and len(attentions) > 0:
221
  tf.logging.info("[DEBUG] Calling dump_detailed_attention_output")
222
  try:
223
+ dump_detailed_attention_output(tranes, output, indices, attentions, video_path)
224
  except Exception as e:
225
  tf.logging.warning(f"Failed to save detailed attention output: {e}")
226
  import traceback
 
279
  tf.logging.info("Saving comparison into {}".format(comparison_file))
280
 
281
 
282
+ def dump_detailed_attention_output(tranes, output, indices, attentions, video_path=None):
283
  """
284
  保存详细的attention分析结果
285
 
 
288
  output: 输出文件路径
289
  indices: 样本索引
290
  attentions: attention权重数据(list of numpy arrays)
291
+ video_path: 视频文件路径(可选,用于提取视频帧)
292
  """
293
  import os
294
  import sys
 
330
 
331
  # 检查是否所有元素都是numpy array
332
  # Note: Each element in attentions is a list (one per GPU), so we need to extract from it
333
+ def extract_numpy_array(obj):
334
+ """Recursively extract numpy array from nested lists"""
335
+ if isinstance(obj, np.ndarray):
336
+ return obj
337
+ elif isinstance(obj, list) and len(obj) > 0:
338
+ return extract_numpy_array(obj[0])
339
+ else:
340
+ return None
341
+
342
  all_attentions = []
343
+ for idx, attn_batch in enumerate(attentions):
344
  if attn_batch is not None:
345
+ extracted = extract_numpy_array(attn_batch)
346
+ if extracted is not None:
347
+ tf.logging.info(f"[DEBUG] attn_batch[{idx}] extracted shape: {extracted.shape}")
348
+ all_attentions.append(extracted)
349
+ else:
350
+ tf.logging.info(f"[DEBUG] attn_batch[{idx}] could not extract numpy array")
 
351
 
352
  if len(all_attentions) == 0:
353
  tf.logging.warning("No valid attention data found")
 
403
  analyzer = AttentionAnalyzer(
404
  attentions=sample_attn,
405
  translation=trans_clean,
406
+ video_frames=video_frames,
407
+ video_path=video_path
408
  )
409
 
410
  analyzer.generate_all_visualizations(sample_dir)
SignX/run.py CHANGED
@@ -40,6 +40,9 @@ global_params = tc.training.HParams(
40
  # collect attention weights during inference for detailed analysis
41
  collect_attention_weights=False, # Disabled by default, enable when needed
42
 
 
 
 
43
  # separately encoding textual and sign video until `sep_layer`
44
  sep_layer=0,
45
  # source/target BPE codes and dropout rate => used for BPE-dropout
 
40
  # collect attention weights during inference for detailed analysis
41
  collect_attention_weights=False, # Disabled by default, enable when needed
42
 
43
+ # video path for inference (used to extract video frames for visualization)
44
+ inference_video_path=None,
45
+
46
  # separately encoding textual and sign video until `sep_layer`
47
  sep_layer=0,
48
  # source/target BPE codes and dropout rate => used for BPE-dropout