File size: 4,455 Bytes
0275184
 
 
 
 
 
 
 
 
 
 
 
 
 
37a8276
0275184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
# coding: utf-8
"""
Enhanced evaluation module with multiple metrics
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import sys
sys.path.insert(0, os.path.dirname(__file__))

from utils import util, metric
from models import evalu


def eval_metrics_multi(trans, target_file, indices=None, remove_bpe=False):
    """
    Evaluate with multiple metrics: BLEU, OTEM, UTEM

    Returns:
        dict with keys: bleu4, bleu1, bleu2, bleu3, otem2, utem4
    """
    target_valid_files = util.fetch_valid_ref_files(target_file)
    if target_valid_files is None:
        return {
            'bleu4': 0.0,
            'bleu1': 0.0,
            'bleu2': 0.0,
            'bleu3': 0.0,
            'otem2': 0.0,
            'utem4': 0.0
        }

    if indices is not None:
        trans = [data[1] for data in sorted(zip(indices, trans), key=lambda x: x[0])]

    # Load references
    references = []
    for ref_file in target_valid_files:
        import tensorflow as tf
        cur_refs = tf.gfile.Open(ref_file).readlines()
        if remove_bpe:
            cur_refs = [line.replace("@@ ", "") for line in cur_refs]
        cur_refs = [line.strip().split() for line in cur_refs]
        references.append(cur_refs)

    references = list(zip(*references))

    # Process translations
    if remove_bpe:
        new_trans = []
        for line in trans:
            line = (' '.join(line)).replace('@@ ', '').split()
            new_trans.append(line)
        trans = new_trans

    # Calculate multiple metrics
    results = {}

    # BLEU-4 (default)
    results['bleu4'] = metric.bleu(trans, references, n=4)

    # BLEU-1, BLEU-2, BLEU-3
    results['bleu1'] = metric.bleu(trans, references, n=1)
    results['bleu2'] = metric.bleu(trans, references, n=2)
    results['bleu3'] = metric.bleu(trans, references, n=3)

    # OTEM-2 (Over-Translation Evaluation Metric)
    results['otem2'] = metric.otem(trans, references, n=2)

    # UTEM-4 (Under-Translation Evaluation Metric)
    results['utem4'] = metric.utem(trans, references, n=4)

    return results


def eval_metrics_full(trans, target_file, indices=None, remove_bpe=False):
    """
    Full evaluation with all available metrics from eval/metrics.py
    Requires sacrebleu and other dependencies

    Returns:
        dict with comprehensive metrics
    """
    try:
        # Import eval metrics (may fail if dependencies not installed)
        from eval import metrics as eval_metrics

        target_valid_files = util.fetch_valid_ref_files(target_file)
        if target_valid_files is None:
            return {}

        if indices is not None:
            trans = [data[1] for data in sorted(zip(indices, trans), key=lambda x: x[0])]

        # Load references as strings
        import tensorflow as tf
        references = []
        for ref_file in target_valid_files:
            cur_refs = [line.strip() for line in tf.gfile.Open(ref_file).readlines()]
            references.append(cur_refs)

        # For single reference, take first one
        references = references[0] if len(references) == 1 else references[0]

        # Convert translations to strings
        hypotheses = [' '.join(t) if isinstance(t, list) else t for t in trans]

        if remove_bpe:
            hypotheses = [h.replace('@@ ', '') for h in hypotheses]
            references = [r.replace('@@ ', '') for r in references]

        results = {}

        # BLEU scores (BLEU-1 to BLEU-4)
        bleu_scores = eval_metrics.bleu(references, hypotheses)
        results.update(bleu_scores)

        # chrF score
        results['chrf'] = eval_metrics.chrf(references, hypotheses)

        # ROUGE score
        results['rouge'] = eval_metrics.rouge(references, hypotheses)

        # Token accuracy
        results['token_accuracy'] = eval_metrics.token_accuracy(references, hypotheses)

        # Sequence accuracy
        results['sequence_accuracy'] = eval_metrics.sequence_accuracy(references, hypotheses)

        # WER and related metrics (for sign language recognition)
        wer_scores = eval_metrics.wer_list(references, hypotheses)
        results.update(wer_scores)

        return results

    except ImportError as e:
        print(f"Warning: Could not import eval.metrics module: {e}")
        print("Falling back to basic metrics...")
        return eval_metrics_multi(trans, target_file, indices, remove_bpe)