File size: 30,648 Bytes
0275184
 
 
 
 
 
 
eaf4dff
0275184
 
 
 
eaf4dff
0275184
 
b4b4729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eaf4dff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0275184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9803b71
0275184
9803b71
 
 
 
0275184
 
 
9803b71
0275184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9803b71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0275184
 
 
 
 
9803b71
0275184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9803b71
 
0275184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9803b71
 
0275184
 
 
 
 
 
 
 
 
9803b71
0275184
 
 
 
 
 
 
 
 
 
 
 
 
 
b4b4729
 
 
 
 
 
 
 
 
 
0275184
 
 
b4b4729
 
 
 
 
 
 
0275184
b4b4729
0275184
 
7162aa8
0275184
 
 
 
 
 
 
 
 
 
 
 
9803b71
 
 
 
 
 
 
 
 
7162aa8
9803b71
 
 
 
 
 
 
0275184
b4b4729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0275184
b4b4729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0275184
b4b4729
0275184
b4b4729
 
 
 
 
 
 
 
 
0275184
 
 
 
 
 
b4b4729
 
0275184
 
 
 
b4b4729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0275184
 
 
 
 
 
 
 
 
 
 
b4b4729
0275184
 
b4b4729
0275184
 
 
b4b4729
 
0275184
 
 
b4b4729
 
0275184
b4b4729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0275184
b4b4729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0275184
 
 
9803b71
b4b4729
 
 
 
 
 
 
 
 
 
9803b71
7162aa8
9803b71
 
 
 
 
 
 
 
7162aa8
9803b71
 
 
eaf4dff
9803b71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4b4729
 
 
 
 
 
 
eaf4dff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4b4729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eaf4dff
 
 
 
 
 
b4b4729
 
 
 
 
 
 
 
 
eaf4dff
 
 
 
b4b4729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9803b71
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
# coding: utf-8

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import time
import re
import numpy as np
import tensorflow as tf

from utils import queuer, util, metric
from pathlib import Path


def strip_leading_index_tokens(tokens):
    if not tokens:
        return tokens
    return tokens[1:] if tokens[0].isdigit() else tokens


def _extract_numpy_from_nested(obj):
    """Extract first numpy array from nested list/tuple structures."""
    if isinstance(obj, np.ndarray):
        return obj
    if isinstance(obj, (list, tuple)) and len(obj) > 0:
        for item in obj:
            extracted = _extract_numpy_from_nested(item)
            if extracted is not None:
                return extracted
    return None


def _flatten_attention_batches(attention_batches):
    """
    Convert raw attention fetches (list per eval batch, each possibly nested per GPU)
    into a per-sample list aligned with translation order.
    """
    flattened = []
    if not attention_batches:
        return flattened

    for batch_idx, batch in enumerate(attention_batches):
        arr = _extract_numpy_from_nested(batch)
        if arr is None:
            tf.logging.warning(f"[ATTN] Unable to extract numpy array from batch {batch_idx}")
            continue

        if arr.ndim == 4:
            # [time, batch, beam, src_len]
            time_steps, batch_size = arr.shape[0], arr.shape[1]
            for b in range(batch_size):
                flattened.append(arr[:, b, :, :])
        elif arr.ndim in (2, 3):
            flattened.append(arr)
        else:
            tf.logging.warning(f"[ATTN] Unexpected attention ndim={arr.ndim} for batch {batch_idx}")

    return flattened


def _load_gloss_mapping():
    """Lazy-load ASLLRP gloss mapping (video_id -> text)."""
    cache = getattr(_load_gloss_mapping, "_cache", None)
    if cache is not None:
        return cache

    candidate_paths = [
        Path(__file__).resolve().parent.parent / "../ASLLRP_utterances_mapping.txt",
        Path(__file__).resolve().parent.parent / "ASLLRP_utterances_mapping.txt",
        Path(__file__).resolve().parent.parent / "output/ASLLRP_utterances_mapping.txt",
        Path(__file__).resolve().parent.parent / "../output/ASLLRP_utterances_mapping.txt",
    ]

    mapping = {}
    for cand in candidate_paths:
        cand = cand.resolve()
        if not cand.exists():
            continue
        try:
            with cand.open('r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if not line or ':' not in line:
                        continue
                    key, text = line.split(':', 1)
                    mapping[key.strip()] = text.strip()
            tf.logging.info(f"[GT] Loaded gloss mapping from {cand}")
            break
        except Exception as exc:
            tf.logging.warning(f"[GT] Failed to load mapping from {cand}: {exc}")
            mapping = {}

    _load_gloss_mapping._cache = mapping if mapping else None
    return _load_gloss_mapping._cache


def _lookup_ground_truth_text(video_path):
    """Return GT gloss string for the given video path if available."""
    if not video_path:
        return None
    mapping = _load_gloss_mapping()
    if not mapping:
        return None

    stem = Path(video_path).stem
    candidates = [stem]
    if '_' in stem:
        candidates.append(stem.split('_')[0])

    for candidate in candidates:
        if candidate in mapping:
            return mapping[candidate]
        # also try stripping leading zeros
        stripped = candidate.lstrip('0')
        if stripped and stripped in mapping:
            return mapping[stripped]
    return None


def decode_target_token(id_seq, vocab):
    """Convert sequence ids into tokens"""
    valid_id_seq = []
    for tok_id in id_seq:
        if tok_id == vocab.eos() \
                or tok_id == vocab.pad():
            break
        valid_id_seq.append(tok_id)
    return vocab.to_tokens(valid_id_seq)


def decode_hypothesis(seqs, scores, params, mask=None):
    """Generate decoded sequence from seqs"""
    if mask is None:
        mask = [1.] * len(seqs)

    hypoes = []
    marks = []
    for _seqs, _scores, _m in zip(seqs, scores, mask):
        if _m < 1.: continue

        for seq, score in zip(_seqs, _scores):
            # Temporarily, Use top-1 decoding
            best_seq = seq[0]
            best_score = score[0]

            hypo = decode_target_token(best_seq, params.tgt_vocab)
            mark = best_score

            hypoes.append(hypo)
            marks.append(mark)

    return hypoes, marks


def decoding(session, features, out_seqs, out_scores, dataset, params, out_attention=None):
    """Performing decoding with exising information"""
    tf.logging.info(f"[DEBUG] decoding called with out_attention={out_attention is not None}")
    if out_attention is not None:
        tf.logging.info(f"[DEBUG] out_attention type: {type(out_attention)}")

    translations = []
    scores = []
    indices = []
    attentions = [] if out_attention is not None else None

    eval_queue = queuer.EnQueuer(
        dataset.batcher(params.eval_batch_size,
                        buffer_size=params.buffer_size,
                        shuffle=False,
                        train=False),
        lambda x: x,
        worker_processes_num=params.process_num,
        input_queue_size=params.input_queue_size,
        output_queue_size=params.output_queue_size,
    )

    def _predict_one_batch(_data_on_gpu):
        feed_dicts = {}

        _step_indices = []
        for fidx, shard_data in enumerate(_data_on_gpu):
            # define feed_dict
            _feed_dict = {
                features[fidx]["image"]: shard_data['img'],
                features[fidx]["mask"]: shard_data['mask'],
                features[fidx]["source"]: shard_data['src'],
            }
            feed_dicts.update(_feed_dict)

            # collect data indices
            _step_indices.extend(shard_data['index'])

        # pick up valid outputs
        data_size = len(_data_on_gpu)
        valid_out_seqs = out_seqs[:data_size]
        valid_out_scores = out_scores[:data_size]

        # Prepare outputs to fetch
        fetch_list = [valid_out_seqs, valid_out_scores]
        if out_attention is not None:
            valid_out_attention = out_attention[:data_size]
            fetch_list.append(valid_out_attention)

        # Run session
        fetch_results = session.run(fetch_list, feed_dict=feed_dicts)
        _decode_seqs, _decode_scores = fetch_results[0], fetch_results[1]
        _decode_attention = fetch_results[2] if out_attention is not None else None

        # DEBUG: Check what we got from session.run
        if _decode_attention is not None and bidx == 0:  # Only log first batch to avoid spam
            tf.logging.info(f"[DEBUG] _decode_attention type: {type(_decode_attention)}")
            if isinstance(_decode_attention, list):
                tf.logging.info(f"[DEBUG] _decode_attention is list, len: {len(_decode_attention)}")
                for i, item in enumerate(_decode_attention):
                    if item is not None:
                        tf.logging.info(f"[DEBUG]   item[{i}] type: {type(item)}, shape: {item.shape if hasattr(item, 'shape') else 'no shape'}")

        _step_translations, _step_scores = decode_hypothesis(
            _decode_seqs, _decode_scores, params
        )

        return _step_translations, _step_scores, _step_indices, _decode_attention

    very_begin_time = time.time()
    data_on_gpu = []
    for bidx, data in enumerate(eval_queue):
        if bidx == 0:
            # remove the data reading time
            very_begin_time = time.time()

        data_on_gpu.append(data)
        # use multiple gpus, and data samples is not enough
        if len(params.gpus) > 0 and len(data_on_gpu) < len(params.gpus):
            continue

        start_time = time.time()
        step_outputs = _predict_one_batch(data_on_gpu)
        data_on_gpu = []

        translations.extend(step_outputs[0])
        scores.extend(step_outputs[1])
        indices.extend(step_outputs[2])
        if attentions is not None and step_outputs[3] is not None:
            attentions.append(step_outputs[3])

        tf.logging.info(
            "Decoding Batch {} using {:.3f} s, translating {} "
            "sentences using {:.3f} s in total".format(
                bidx, time.time() - start_time,
                len(translations), time.time() - very_begin_time
            )
        )

    if len(data_on_gpu) > 0:

        start_time = time.time()
        step_outputs = _predict_one_batch(data_on_gpu)

        translations.extend(step_outputs[0])
        scores.extend(step_outputs[1])
        indices.extend(step_outputs[2])
        if attentions is not None and step_outputs[3] is not None:
            attentions.append(step_outputs[3])

        tf.logging.info(
            "Decoding Batch {} using {:.3f} s, translating {} "
            "sentences using {:.3f} s in total".format(
                'final', time.time() - start_time,
                len(translations), time.time() - very_begin_time
            )
        )

    return translations, scores, indices, attentions


def eval_metric(trans, target_file, indices=None, remove_bpe=False):
    """BLEU Evaluate """
    target_valid_files = util.fetch_valid_ref_files(target_file)
    if target_valid_files is None:
        return 0.0

    if indices is not None:
        trans = [data[1] for data in sorted(zip(indices, trans), key=lambda x: x[0])]

    references = []
    for ref_file in target_valid_files:
        cur_refs = tf.gfile.Open(ref_file).readlines()
        processed = []
        for line in cur_refs:
            tokens = line.strip().split()
            tokens = tokens[1:] if tokens and tokens[0].isdigit() else tokens
            if remove_bpe:
                cleaned = (' '.join(tokens)).replace("@@ ", "").split()
                processed.append(cleaned)
            else:
                processed.append(tokens)
        references.append(processed)

    references = list(zip(*references))

    new_trans = []
    for line in trans:
        tokens = line
        tokens = tokens[1:] if tokens and tokens[0].isdigit() else tokens
        if remove_bpe:
            tokens = (' '.join(tokens)).replace("@@ ", "").split()
        new_trans.append(tokens)

    return metric.bleu(new_trans, references)


def dump_tanslation(tranes, output, indices=None, attentions=None, video_path=None):
    """save translation"""
    if indices is not None:
        tranes = [data[1] for data in
                  sorted(zip(indices, tranes), key=lambda x: x[0])]
    with tf.gfile.Open(output, 'w') as writer:
        for hypo in tranes:
            if isinstance(hypo, list):
                writer.write(' '.join(hypo) + "\n")
            else:
                writer.write(str(hypo) + "\n")
    tf.logging.info("Saving translations into {}".format(output))

    # DEBUG: Check attention status
    tf.logging.info(f"[DEBUG] attentions is None: {attentions is None}")
    if attentions is not None:
        tf.logging.info(f"[DEBUG] attentions type: {type(attentions)}, len: {len(attentions)}")

    # Save detailed attention analysis if available
    if attentions is not None and len(attentions) > 0:
        tf.logging.info("[DEBUG] Calling dump_detailed_attention_output")
        try:
            dump_detailed_attention_output(tranes, output, indices, attentions, video_path)
        except Exception as e:
            tf.logging.warning(f"Failed to save detailed attention output: {e}")
            import traceback
            tf.logging.warning(traceback.format_exc())
    else:
        tf.logging.info("[DEBUG] Skipping attention analysis (attentions is None or empty)")


def dump_translation_with_reference(tranes, output, ref_file, indices=None, remove_bpe=False,
                                     attentions=None, dataset=None):
    """
    Save translation with reference for easy comparison

    Args:
        tranes: Translation results
        output: Output file path
        ref_file: Reference file path
        indices: Sample indices
        remove_bpe: Whether to remove BPE
        attentions: Attention weights (list of numpy arrays) for frame alignment
        dataset: Dataset object for getting video frame counts

    Returns:
        dict: Coverage metrics {'complete_coverage': float, 'avg_iou': float}
    """
    import tensorflow as tf
    import numpy as np
    import json
    import os
    import sys
    from pathlib import Path

    per_sample_attn = None
    if attentions is not None and len(attentions) > 0:
        per_sample_attn = _flatten_attention_batches(attentions)
        if len(per_sample_attn) == 0:
            per_sample_attn = None

    # Align attention list with translations
    if per_sample_attn is None:
        per_sample_attn = [None] * len(tranes)
    elif len(per_sample_attn) < len(tranes):
        per_sample_attn.extend([None] * (len(tranes) - len(per_sample_attn)))

    # Sort translations (and attentions) by sample index if provided
    if indices is not None:
        sorted_data = sorted(
            zip(indices, tranes, per_sample_attn),
            key=lambda x: x[0]
        )
        tranes = [data[1] for data in sorted_data]
        per_sample_attn = [data[2] for data in sorted_data]
        sorted_indices = [data[0] for data in sorted_data]
    else:
        sorted_indices = list(range(len(tranes)))

    # Load references
    references = []
    if tf.gfile.Exists(ref_file):
        refs = tf.gfile.Open(ref_file).readlines()
        for line in refs:
            tokens = strip_leading_index_tokens(line.strip().split())
            ref = ' '.join(tokens)
            if remove_bpe:
                ref = ref.replace("@@ ", "")
            references.append(ref)

    # Initialize coverage metrics
    all_complete_coverage = []
    all_iou = []

    # Try to load GT annotations and test info for ASLLRP
    gt_annotations = None
    test_info = None
    try:
        # Determine repo root from absolute output path
        output_abs = os.path.abspath(output)
        output_parts = output_abs.split(os.sep)
        if 'SignX' in output_parts:
            signx_idx = output_parts.index('SignX')
            repo_root = os.sep.join(output_parts[:signx_idx+1])
        else:
            # Fallback: search parents for directory named SignX
            repo_root = None
            current_path = Path(output_abs).parent
            for parent in [current_path] + list(current_path.parents):
                if parent.name == 'SignX':
                    repo_root = str(parent)
                    break
            if repo_root is None:
                raise RuntimeError("Cannot locate SignX repo root from output path.")

            # Load GT annotations
            gt_path = os.path.join(os.path.dirname(repo_root), 'ASLLRP_utterances_with_frames.json')
            if os.path.exists(gt_path):
                with open(gt_path, 'r') as f:
                    gt_annotations = json.load(f)
                tf.logging.info(f"Loaded GT annotations from {gt_path}")

            # Load test info
            test_info_path = os.path.join(os.path.dirname(repo_root), 'CorrNet_Plus_modified/dataset/ASLLRP/test_info.npy')
            if os.path.exists(test_info_path):
                test_info = np.load(test_info_path, allow_pickle=True)
                tf.logging.info(f"Loaded test info from {test_info_path}")
    except Exception as e:
        tf.logging.warning(f"Could not load GT annotations or test info: {e}")

    # Import AttentionAnalyzer if we have attentions
    AttentionAnalyzer = None
    if attentions is not None and len(attentions) > 0 and gt_annotations is not None:
        try:
            # Add eval directory to path
            eval_dir = os.path.join(os.path.dirname(__file__), '..', 'eval')
            if os.path.exists(eval_dir) and eval_dir not in sys.path:
                sys.path.insert(0, eval_dir)
            from attention_analysis import AttentionAnalyzer as AA
            AttentionAnalyzer = AA
        except ImportError as e:
            tf.logging.warning(f"Could not import AttentionAnalyzer: {e}")

    # Save comparison file
    comparison_file = output.replace('.trans.txt', '.comparison.txt')
    with tf.gfile.Open(comparison_file, 'w') as writer:
        writer.write("=" * 100 + "\n")
        writer.write("TRANSLATION COMPARISON (Hypothesis vs Reference)\n")
        writer.write("=" * 100 + "\n\n")

        for i, hypo in enumerate(tranes):
            # Format hypothesis
            if isinstance(hypo, list):
                hypo_str = ' '.join(hypo)
                hypo_tokens = hypo
            else:
                hypo_str = str(hypo)
                hypo_tokens = hypo_str.split()

            if remove_bpe:
                hypo_str = hypo_str.replace("@@ ", "")
                hypo_tokens = [t.replace("@@", "").strip() for t in hypo_tokens]
                hypo_tokens = [t for t in hypo_tokens if t]  # Remove empty

            # Get reference
            ref_str = references[i] if i < len(references) else "[NO REFERENCE]"
            ref_tokens = strip_leading_index_tokens(ref_str.split())
            ref_str = ' '.join(ref_tokens)

            # Calculate frame alignment if we have attention weights
            pred_frame_info = None
            gt_frame_info = None
            coverage_metrics = None

            sample_attn = per_sample_attn[i] if per_sample_attn and i < len(per_sample_attn) else None

            if (AttentionAnalyzer is not None and sample_attn is not None and
                test_info is not None and gt_annotations is not None):
                try:
                    sample_idx = sorted_indices[i]

                    # Get video ID from test_info
                    video_id = test_info[sample_idx]['video_id']

                    # Get GT annotations for this video
                    if video_id in gt_annotations:
                        gt_glosses = gt_annotations[video_id]['glosses']

                        attn = sample_attn

                        # Get feature frame count from attention shape
                        if isinstance(attn, np.ndarray) and len(attn.shape) >= 2:
                            # Reduce dimensions to [time, src_len]
                            if len(attn.shape) == 4:
                                attn = attn[:, 0, 0, :]
                            elif len(attn.shape) == 3:
                                attn = attn[:, 0, :]

                            feature_frames = attn.shape[1]

                            # Use AttentionAnalyzer to compute frame ranges
                            analyzer = AttentionAnalyzer(
                                attentions=attn,
                                translation=' '.join(hypo_tokens),
                                video_frames=feature_frames
                            )

                            # Get predicted frame ranges
                            pred_frame_ranges = analyzer.word_frame_ranges

                            # Calculate coverage metrics
                            complete_count = 0
                            iou_scores = []

                            # Build GT gloss-to-frame mapping
                            gt_frame_map = {gt['gloss']: (gt['start_24fps'], gt['end_24fps'])
                                           for gt in gt_glosses}

                            for pred_info in pred_frame_ranges:
                                pred_gloss = pred_info['word']
                                pred_start = pred_info['start_frame']
                                pred_end = pred_info['end_frame']

                                if pred_gloss in gt_frame_map:
                                    gt_start, gt_end = gt_frame_map[pred_gloss]

                                    # Complete coverage: prediction contains GT
                                    if pred_start <= gt_start and pred_end >= gt_end:
                                        complete_count += 1

                                    # IoU calculation
                                    intersection_start = max(pred_start, gt_start)
                                    intersection_end = min(pred_end, gt_end)
                                    intersection = max(0, intersection_end - intersection_start + 1)

                                    union_start = min(pred_start, gt_start)
                                    union_end = max(pred_end, gt_end)
                                    union = union_end - union_start + 1

                                    iou = intersection / union if union > 0 else 0.0
                                    iou_scores.append(iou)

                            # Calculate rates
                            num_matched = len(iou_scores)
                            if num_matched > 0:
                                complete_coverage_rate = complete_count / num_matched
                                avg_iou = np.mean(iou_scores)

                                all_complete_coverage.append(complete_coverage_rate)
                                all_iou.append(avg_iou)

                                coverage_metrics = {
                                    'complete_coverage_rate': complete_coverage_rate,
                                    'avg_iou': avg_iou,
                                    'num_matched': num_matched
                                }

                                pred_frame_info = pred_frame_ranges
                                gt_frame_info = gt_frame_map

                except Exception as e:
                    tf.logging.warning(f"Error calculating frame alignment for sample {i}: {e}")

            # Write comparison with frame information
            writer.write(f"[Sample {i}]\n")

            # Write HYP with frames if available
            if pred_frame_info:
                hyp_with_frames = []
                for pred_info in pred_frame_info:
                    gloss = pred_info['word']
                    start = pred_info['start_frame']
                    end = pred_info['end_frame']
                    hyp_with_frames.append(f"{gloss}[{start}-{end}]")
                writer.write(f"HYP: {' '.join(hyp_with_frames)}\n")
            else:
                writer.write(f"HYP: {hypo_str}\n")

            # Write REF with frames if available
            if gt_frame_info:
                ref_with_frames = []
                for token in ref_tokens:
                    if token in gt_frame_info:
                        start, end = gt_frame_info[token]
                        ref_with_frames.append(f"{token}[{start}-{end}]")
                    else:
                        ref_with_frames.append(token)
                writer.write(f"REF: {' '.join(ref_with_frames)}\n")
            else:
                writer.write(f"REF: {ref_str}\n")

            # Write coverage metrics if available
            if coverage_metrics:
                writer.write(f"Complete Coverage: {coverage_metrics['complete_coverage_rate']:.2%} "
                           f"({coverage_metrics['num_matched']} matched glosses)\n")
                writer.write(f"IoU Coverage: {coverage_metrics['avg_iou']:.4f}\n")

            writer.write("-" * 100 + "\n\n")

    tf.logging.info("Saving comparison into {}".format(comparison_file))

    # Return average coverage metrics
    result_metrics = {}
    if all_complete_coverage:
        result_metrics['complete_coverage'] = float(np.mean(all_complete_coverage))
        result_metrics['avg_iou'] = float(np.mean(all_iou))
        tf.logging.info(f"Coverage Metrics: Complete={result_metrics['complete_coverage']:.2%}, "
                       f"IoU={result_metrics['avg_iou']:.4f}")

    return result_metrics


def dump_detailed_attention_output(tranes, output, indices, attentions, video_path=None):
    """
    保存详细的attention分析结果

    Args:
        tranes: 翻译结果列表
        output: 输出文件路径
        indices: 样本索引
        attentions: attention权重数据(list of numpy arrays)
        video_path: 视频文件路径(可选,用于提取视频帧)
    """
    import os
    import sys
    import re
    from datetime import datetime
    from pathlib import Path

    # 获取输出目录和文件名
    output_path = Path(output)
    base_dir = output_path.parent
    base_name = output_path.stem  # 不带扩展名

    # 创建带时间戳的详细分析目录
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    detail_dir = base_dir / f"detailed_{base_name}_{timestamp}"
    detail_dir.mkdir(parents=True, exist_ok=True)

    tf.logging.info(f"Saving detailed attention analysis to: {detail_dir}")

    # 重排序翻译结果
    if indices is not None:
        sorted_items = sorted(zip(indices, tranes), key=lambda x: x[0])
        tranes = [item[1] for item in sorted_items]

    # 合并所有batch的attention数据
    try:
        import numpy as np

        flattened_attn = _flatten_attention_batches(attentions)
        if len(flattened_attn) == 0:
            tf.logging.warning("No valid attention data found")
            return

        tf.logging.info(f"[DEBUG] Found {len(flattened_attn)} valid attention samples")

        # If只推理单个视频,使用文件名作为样本目录,便于定位
        video_based_name = None
        if video_path:
            try:
                candidate = Path(video_path).stem
                if candidate:
                    sanitized = re.sub(r'[^0-9A-Za-z._-]+', '_', candidate).strip('_')
                    if sanitized:
                        video_based_name = sanitized
            except Exception as exc:
                tf.logging.warning(f"Failed to derive video-based sample name: {exc}")

        use_video_name = video_based_name is not None and len(flattened_attn) == 1
        gt_text = _lookup_ground_truth_text(video_path) if video_path else None

        for sample_idx, sample_attn in enumerate(flattened_attn):
            if sample_idx >= len(tranes):
                break

            # Ensure shape is [time, src_len]
            if sample_attn.ndim == 4:
                sample_attn = sample_attn[:, 0, 0, :]
            elif sample_attn.ndim == 3:
                sample_attn = sample_attn[:, 0, :]

            # 获取翻译结果
            trans = tranes[sample_idx]
            if isinstance(trans, list):
                trans = ' '.join(trans)
            trans_clean = trans.replace('@@ ', '')

            # 创建样本专属目录(单视频推理时用文件名,更易记忆)
            if use_video_name and sample_idx == 0:
                sample_name = video_based_name
            else:
                sample_name = f"sample_{sample_idx:03d}"
            sample_dir = detail_dir / sample_name
            sample_dir.mkdir(exist_ok=True)

            # 保存numpy数据
            np.save(sample_dir / "attention_weights.npy", sample_attn)

            # 保存翻译结果
            with open(sample_dir / "translation.txt", 'w', encoding='utf-8') as f:
                f.write(f"With BPE: {trans}\n")
                f.write(f"Clean: {trans_clean}\n")
                if gt_text:
                    f.write(f"Ground Truth: {gt_text}\n")
                else:
                    f.write("Ground Truth: [NOT FOUND]\n")

            # Calculate and save feature-to-frame mapping
            if video_path and os.path.exists(video_path):
                try:
                    import cv2
                    import json

                    cap = cv2.VideoCapture(video_path)
                    original_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
                    cap.release()

                    feature_count = sample_attn.shape[1]
                    frame_mapping = []
                    for feat_idx in range(feature_count):
                        start_frame = int(feat_idx * original_frame_count / feature_count)
                        end_frame = int((feat_idx + 1) * original_frame_count / feature_count)
                        frame_mapping.append({
                            "feature_index": feat_idx,
                            "frame_start": start_frame,
                            "frame_end": end_frame,
                            "frame_count": end_frame - start_frame
                        })

                    mapping_data = {
                        "original_frame_count": original_frame_count,
                        "feature_count": feature_count,
                        "downsampling_ratio": original_frame_count / feature_count,
                        "mapping": frame_mapping
                    }

                    with open(sample_dir / "feature_frame_mapping.json", 'w') as f:
                        json.dump(mapping_data, f, indent=2)

                    tf.logging.info(f"  ✓ Feature-to-frame mapping saved ({original_frame_count} frames → {feature_count} features)")

                except Exception as e:
                    tf.logging.warning(f"Failed to generate feature-to-frame mapping: {e}")

            # 使用attention_analysis模块生成可视化
            try:
                script_dir = Path(__file__).parent.parent
                eval_dir = script_dir / "eval"
                if str(eval_dir) not in sys.path:
                    sys.path.insert(0, str(eval_dir))

                from attention_analysis import AttentionAnalyzer

                analyzer = AttentionAnalyzer(
                    attentions=sample_attn,
                    translation=trans_clean,
                    video_frames=sample_attn.shape[1],
                    video_path=video_path
                )
                analyzer.generate_all_visualizations(sample_dir)
                tf.logging.info(f"  ✓ Sample {sample_idx}: {sample_dir.name}")

            except Exception as e:
                tf.logging.warning(f"Failed to generate visualizations for sample {sample_idx}: {e}")

        tf.logging.info(f"Detailed attention analysis complete: {detail_dir}")
        tf.logging.info(f"  - Analyzed {min(len(flattened_attn), len(tranes))} samples")
        tf.logging.info(f"  - Output directory: {detail_dir}")

    except Exception as e:
        import traceback
        tf.logging.error(f"Error in dump_detailed_attention_output: {e}")
        tf.logging.error(traceback.format_exc())