FangSen9000
Attempted to submit 4 changes, although the reasoning degraded, the reasoning could still run.
1eb306c | # coding: utf-8 | |
| """ | |
| Enhanced evaluation module with multiple metrics | |
| """ | |
| from __future__ import absolute_import | |
| from __future__ import division | |
| from __future__ import print_function | |
| import os | |
| import sys | |
| sys.path.insert(0, os.path.dirname(__file__)) | |
| from utils import util, metric | |
| from models import evalu | |
| def eval_metrics_multi(trans, target_file, indices=None, remove_bpe=False): | |
| """ | |
| Evaluate with multiple metrics: BLEU, OTEM, UTEM | |
| Returns: | |
| dict with keys: bleu4, bleu1, bleu2, bleu3, otem2, utem4 | |
| """ | |
| target_valid_files = util.fetch_valid_ref_files(target_file) | |
| if target_valid_files is None: | |
| return { | |
| 'bleu4': 0.0, | |
| 'bleu1': 0.0, | |
| 'bleu2': 0.0, | |
| 'bleu3': 0.0, | |
| 'otem2': 0.0, | |
| 'utem4': 0.0 | |
| } | |
| if indices is not None: | |
| trans = [data[1] for data in sorted(zip(indices, trans), key=lambda x: x[0])] | |
| # Load references | |
| references = [] | |
| for ref_file in target_valid_files: | |
| import tensorflow as tf | |
| cur_refs = tf.gfile.Open(ref_file).readlines() | |
| if remove_bpe: | |
| cur_refs = [line.replace("@@ ", "") for line in cur_refs] | |
| cur_refs = [line.strip().split() for line in cur_refs] | |
| references.append(cur_refs) | |
| references = list(zip(*references)) | |
| # Process translations | |
| if remove_bpe: | |
| new_trans = [] | |
| for line in trans: | |
| line = (' '.join(line)).replace('@@ ', '').split() | |
| new_trans.append(line) | |
| trans = new_trans | |
| # Calculate multiple metrics | |
| results = {} | |
| # BLEU-4 (default) | |
| results['bleu4'] = metric.bleu(trans, references, n=4) | |
| # BLEU-1, BLEU-2, BLEU-3 | |
| results['bleu1'] = metric.bleu(trans, references, n=1) | |
| results['bleu2'] = metric.bleu(trans, references, n=2) | |
| results['bleu3'] = metric.bleu(trans, references, n=3) | |
| # OTEM-2 (Over-Translation Evaluation Metric) | |
| results['otem2'] = metric.otem(trans, references, n=2) | |
| # UTEM-4 (Under-Translation Evaluation Metric) | |
| results['utem4'] = metric.utem(trans, references, n=4) | |
| return results | |
| def eval_metrics_full(trans, target_file, indices=None, remove_bpe=False): | |
| """ | |
| Full evaluation with all available metrics from eval/metrics.py | |
| Requires sacrebleu and other dependencies | |
| Returns: | |
| dict with comprehensive metrics | |
| """ | |
| try: | |
| # Import eval metrics (may fail if dependencies not installed) | |
| from eval import metrics as eval_metrics | |
| target_valid_files = util.fetch_valid_ref_files(target_file) | |
| if target_valid_files is None: | |
| return {} | |
| if indices is not None: | |
| trans = [data[1] for data in sorted(zip(indices, trans), key=lambda x: x[0])] | |
| # Load references as strings | |
| import tensorflow as tf | |
| references = [] | |
| for ref_file in target_valid_files: | |
| cur_refs = [line.strip() for line in tf.gfile.Open(ref_file).readlines()] | |
| references.append(cur_refs) | |
| # For single reference, take first one | |
| references = references[0] if len(references) == 1 else references[0] | |
| # Convert translations to strings | |
| hypotheses = [' '.join(t) if isinstance(t, list) else t for t in trans] | |
| if remove_bpe: | |
| hypotheses = [h.replace('@@ ', '') for h in hypotheses] | |
| references = [r.replace('@@ ', '') for r in references] | |
| results = {} | |
| # BLEU scores (BLEU-1 to BLEU-4) | |
| bleu_scores = eval_metrics.bleu(references, hypotheses) | |
| results.update(bleu_scores) | |
| # chrF score | |
| results['chrf'] = eval_metrics.chrf(references, hypotheses) | |
| # ROUGE score | |
| results['rouge'] = eval_metrics.rouge(references, hypotheses) | |
| # Token accuracy | |
| results['token_accuracy'] = eval_metrics.token_accuracy(references, hypotheses) | |
| # Sequence accuracy | |
| results['sequence_accuracy'] = eval_metrics.sequence_accuracy(references, hypotheses) | |
| # WER and related metrics (for sign language recognition) | |
| wer_scores = eval_metrics.wer_list(references, hypotheses) | |
| results.update(wer_scores) | |
| return results | |
| except ImportError as e: | |
| print(f"Warning: Could not import eval.metrics module: {e}") | |
| print("Falling back to basic metrics...") | |
| return eval_metrics_multi(trans, target_file, indices, remove_bpe) | |