| { |
| "paper_id": "D11-1042", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:31:57.213380Z" |
| }, |
| "title": "Corroborating Text Evaluation Results with Heterogeneous Measures", |
| "authors": [ |
| { |
| "first": "Enrique", |
| "middle": [], |
| "last": "Amig\u00f3", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "UNED", |
| "location": { |
| "settlement": "Madrid" |
| } |
| }, |
| "email": "enrique@lsi.uned.es" |
| }, |
| { |
| "first": "Julio", |
| "middle": [], |
| "last": "Gonzalo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "UNED", |
| "location": { |
| "settlement": "Madrid" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Jes\u00fas", |
| "middle": [], |
| "last": "Gim\u00e9nez", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "jgimenez@lsi.upc.edu" |
| }, |
| { |
| "first": "Felisa", |
| "middle": [], |
| "last": "Verdejo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "UNED", |
| "location": { |
| "settlement": "Madrid" |
| } |
| }, |
| "email": "felisa@lsi.uned.es" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Automatically produced texts (e.g. translations or summaries) are usually evaluated with n-gram based measures such as BLEU or ROUGE, while the wide set of more sophisticated measures that have been proposed in the last years remains largely ignored for practical purposes. In this paper we first present an indepth analysis of the state of the art in order to clarify this issue. After this, we formalize and verify empirically a set of properties that every text evaluation measure based on similarity to human-produced references satisfies. These properties imply that corroborating system improvements with additional measures always increases the overall reliability of the evaluation process. In addition, the greater the heterogeneity of the measures (which is measurable) the higher their combined reliability. These results support the use of heterogeneous measures in order to consolidate text evaluation results.", |
| "pdf_parse": { |
| "paper_id": "D11-1042", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Automatically produced texts (e.g. translations or summaries) are usually evaluated with n-gram based measures such as BLEU or ROUGE, while the wide set of more sophisticated measures that have been proposed in the last years remains largely ignored for practical purposes. In this paper we first present an indepth analysis of the state of the art in order to clarify this issue. After this, we formalize and verify empirically a set of properties that every text evaluation measure based on similarity to human-produced references satisfies. These properties imply that corroborating system improvements with additional measures always increases the overall reliability of the evaluation process. In addition, the greater the heterogeneity of the measures (which is measurable) the higher their combined reliability. These results support the use of heterogeneous measures in order to consolidate text evaluation results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The automatic evaluation of textual outputs is a core issue in many Natural Language Processing (NLP) tasks such as Natural Language Generation, Machine Translation (MT) and Automatic Summarization (AS). State-of-the-art automatic evaluation methods all operate by rewarding similarities between automatically-produced candidate outputs and manually-produced reference solutions, socalled human references or models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Over the last decade, a wide variety of measures, based on different quality assumptions, have been proposed. Recent work suggests exploiting external knowledge sources and/or deep linguistic annotation, and measure combination (see Section 2). However, original measures based on lexical matching, such as BLEU (Papineni et al., 2001a ) and ROUGE (Lin, 2004) are still preferred as de facto standards in MT and AS, respectively. There are, in our opinion, two main reasons behind this fact. First, the use of a common measure certainly allows researchers to carry out objective comparisons between their work and other published results. Second, the advantages of novel measures are not easy to demonstrate in terms of correlation with human judgements.", |
| "cite_spans": [ |
| { |
| "start": 312, |
| "end": 335, |
| "text": "(Papineni et al., 2001a", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 348, |
| "end": 359, |
| "text": "(Lin, 2004)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our goal is not to answer which is the most reliable metric or to propose yet another novel measure. Rather than this, we first analyze in depth the state of the art, concluding that it is not easy to determine the reliability of a measure. In absence of a clear proof of the advantages of novel measures, system developers naturally tend to prefer well-known standard measures. Second, we formalize and check empirically two intrinsic properties that any evaluation measure based on similarity to human-produced references satisfies. Assuming that a measure satisfies a set of basic formal constraints, these properties imply that corroborating a system comparison with additional measures always increases the overall reliability of the evaluation process, even when the added measures have a low correlation with human judgements. In most papers, evaluation results are corroborated with similar n-gram based measures (eg. BLEU and ROUGE). However, according to our second property, the greater the heterogeneity of the measures (which is measurable) the higher their reliability. The practical implication is that, corroborating evaluation results with measures based on higher linguistic levels increases the heterogeneity, and therefore, the reliability of evaluation results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2 State of the Art", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Among NLP disciplines, MT probably has the widest set of automatic evaluation measures. The dominant approach to automatic MT evaluation is, today, based on lexical metrics (also called n-gram based metrics). These metrics work by rewarding lexical similarity between candidate translations and a set of manually-produced reference translations. Lexical metrics can be classified according to how they compute similarity. Some are based on edit distance, e.g., WER (Nie\u00dfen et al., 2000) , PER (Tillmann et al., 1997) , and TER (Snover et al., 2006) . Other metrics are based on computing lexical precision, e.g., BLEU (Papineni et al., 2001b) and NIST (Doddington, 2002) , lexical recall, e.g., ROUGE (Lin and Och, 2004a) and CDER (Leusch et al., 2006) , or a balance between the two, e.g., GTM (Melamed et al., 2003; Turian et al., 2003b) , ME-TEOR (Banerjee and Lavie, 2005) , BLANC (Lita et al., 2005) , SIA (Liu and Gildea, 2006) , MAXSIM (Chan and Ng, 2008) , and O l (Gim\u00e9nez, 2008) .", |
| "cite_spans": [ |
| { |
| "start": 465, |
| "end": 486, |
| "text": "(Nie\u00dfen et al., 2000)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 493, |
| "end": 516, |
| "text": "(Tillmann et al., 1997)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 527, |
| "end": 548, |
| "text": "(Snover et al., 2006)", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 618, |
| "end": 642, |
| "text": "(Papineni et al., 2001b)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 652, |
| "end": 670, |
| "text": "(Doddington, 2002)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 701, |
| "end": 721, |
| "text": "(Lin and Och, 2004a)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 731, |
| "end": 752, |
| "text": "(Leusch et al., 2006)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 795, |
| "end": 817, |
| "text": "(Melamed et al., 2003;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 818, |
| "end": 839, |
| "text": "Turian et al., 2003b)", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 850, |
| "end": 876, |
| "text": "(Banerjee and Lavie, 2005)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 879, |
| "end": 904, |
| "text": "BLANC (Lita et al., 2005)", |
| "ref_id": null |
| }, |
| { |
| "start": 911, |
| "end": 933, |
| "text": "(Liu and Gildea, 2006)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 936, |
| "end": 962, |
| "text": "MAXSIM (Chan and Ng, 2008)", |
| "ref_id": null |
| }, |
| { |
| "start": 973, |
| "end": 988, |
| "text": "(Gim\u00e9nez, 2008)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Individual measures", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The lexical measure BLEU has been criticized in many ways. Some drawbacks of BLEU are the lack of interpretability (Turian et al., 2003a) , the fact that it is not necessary to increase BLEU to improve systems (Callison-burch and Osborne, 2006) , the overscoring of statistical MT systems (Le and Przybocki, 2005) , the low reliability over rich morphology languages (Homola et al., 2009) , or even the fact that a poor system translation of a book can obtain higher BLEU results than a manually produced translation (Culy and Riehemann, 2003) .", |
| "cite_spans": [ |
| { |
| "start": 115, |
| "end": 137, |
| "text": "(Turian et al., 2003a)", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 210, |
| "end": 244, |
| "text": "(Callison-burch and Osborne, 2006)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 289, |
| "end": 313, |
| "text": "(Le and Przybocki, 2005)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 367, |
| "end": 388, |
| "text": "(Homola et al., 2009)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 517, |
| "end": 543, |
| "text": "(Culy and Riehemann, 2003)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Individual measures", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The reaction to these criticisms has been focused on the development of more sophisticated measures in which candidate and reference translations are automatically annotated and compared at different linguistic levels. Some of the features employed include parts of speech (Popovic and Ney, 2007; Gim\u00e9nez and M\u00e0rquez, 2007) , syntactic dependencies (Liu and Gildea, 2005; Gim\u00e9nez and M\u00e0rquez, 2007; Owczarzak et al., 2007a; Owczarzak et al., 2007b; Owczarzak et al., 2008; Chan and Ng, 2008; Kahn et al., 2009) , CCG parsing (Mehay and Brew, 2007) , syntactic constituents (Liu and Gildea, 2005; Gim\u00e9nez and M\u00e0rquez, 2007) , named entities (Reeder et al., 2001; Gim\u00e9nez and M\u00e0rquez, 2007) , semantic roles (Gim\u00e9nez and M\u00e0rquez, 2007) , discourse representations (Gim\u00e9nez, 2008) , and textual entailment features (Pad\u00f3 et al., 2009) . In general, when a higher linguistic level is incorporated, linguistic features at lower levels are preserved.", |
| "cite_spans": [ |
| { |
| "start": 273, |
| "end": 296, |
| "text": "(Popovic and Ney, 2007;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 297, |
| "end": 323, |
| "text": "Gim\u00e9nez and M\u00e0rquez, 2007)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 349, |
| "end": 371, |
| "text": "(Liu and Gildea, 2005;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 372, |
| "end": 398, |
| "text": "Gim\u00e9nez and M\u00e0rquez, 2007;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 399, |
| "end": 423, |
| "text": "Owczarzak et al., 2007a;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 424, |
| "end": 448, |
| "text": "Owczarzak et al., 2007b;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 449, |
| "end": 472, |
| "text": "Owczarzak et al., 2008;", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 473, |
| "end": 491, |
| "text": "Chan and Ng, 2008;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 492, |
| "end": 510, |
| "text": "Kahn et al., 2009)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 525, |
| "end": 547, |
| "text": "(Mehay and Brew, 2007)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 573, |
| "end": 595, |
| "text": "(Liu and Gildea, 2005;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 596, |
| "end": 622, |
| "text": "Gim\u00e9nez and M\u00e0rquez, 2007)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 640, |
| "end": 661, |
| "text": "(Reeder et al., 2001;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 662, |
| "end": 688, |
| "text": "Gim\u00e9nez and M\u00e0rquez, 2007)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 706, |
| "end": 733, |
| "text": "(Gim\u00e9nez and M\u00e0rquez, 2007)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 762, |
| "end": 777, |
| "text": "(Gim\u00e9nez, 2008)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 812, |
| "end": 831, |
| "text": "(Pad\u00f3 et al., 2009)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Individual measures", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The proposals for summarization evaluation are less numerous. Some proposals for AS tasks are based on syntactic units (Tratz and Hovy, 2008) , dependency triples (Owczarzak, 2009) or convolution kernels (Hirao et al., 2005) which reported some reliability improvement over ROUGE in terms of correlation with human judgements.", |
| "cite_spans": [ |
| { |
| "start": 119, |
| "end": 141, |
| "text": "(Tratz and Hovy, 2008)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 163, |
| "end": 180, |
| "text": "(Owczarzak, 2009)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 204, |
| "end": 224, |
| "text": "(Hirao et al., 2005)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Individual measures", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In general, however, it is not easy to determine clearly the contribution of deeper linguistic knowledge in those proposals. In the case of MT, improvements versus BLEU have been reported (Liu and Gildea, 2005; Kahn et al., 2009) , but not over a more elaborated metric such as METEOR (Mehay and Brew, 2007; Chan and Ng, 2008) . Besides, controversial results on their performance at sentence vs system level have been reported in shared evaluation tasks (Callison-Burch et al., 2008; Callison-Burch et al., 2009; Callison-Burch et al., 2010) .", |
| "cite_spans": [ |
| { |
| "start": 188, |
| "end": 210, |
| "text": "(Liu and Gildea, 2005;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 211, |
| "end": 229, |
| "text": "Kahn et al., 2009)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 285, |
| "end": 307, |
| "text": "(Mehay and Brew, 2007;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 308, |
| "end": 326, |
| "text": "Chan and Ng, 2008)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 455, |
| "end": 484, |
| "text": "(Callison-Burch et al., 2008;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 485, |
| "end": 513, |
| "text": "Callison-Burch et al., 2009;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 514, |
| "end": 542, |
| "text": "Callison-Burch et al., 2010)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Individual measures", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Several researchers have suggested integrating heterogeneous measures. Some of them optimize the measure combination function according to the metric's ability to emulate the behavior of human assessors (i.e., correlation with human assessments). For instance, using linear combinations (Pad\u00f3 et al., 2009; Liu and Gildea, 2007; Gim\u00e9nez and M\u00e0rquez, 2008) , Decision Trees (Akiba et al., 2001; Quirk, 2004) , regression based algorithms (Paul et al., 2007; Albrecht and Hwa, 2007a; Albrecht and Hwa, 2007b) or a variety of supervised machine learning algorithms (Quirk et al., 2005; Corston-Oliver et al., 2001; Kulesza and Shieber, 2004; Gamon et al., 2005; Amig\u00f3 et al., 2005) .", |
| "cite_spans": [ |
| { |
| "start": 287, |
| "end": 306, |
| "text": "(Pad\u00f3 et al., 2009;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 307, |
| "end": 328, |
| "text": "Liu and Gildea, 2007;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 329, |
| "end": 355, |
| "text": "Gim\u00e9nez and M\u00e0rquez, 2008)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 373, |
| "end": 393, |
| "text": "(Akiba et al., 2001;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 394, |
| "end": 406, |
| "text": "Quirk, 2004)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 437, |
| "end": 456, |
| "text": "(Paul et al., 2007;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 457, |
| "end": 481, |
| "text": "Albrecht and Hwa, 2007a;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 482, |
| "end": 506, |
| "text": "Albrecht and Hwa, 2007b)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 562, |
| "end": 582, |
| "text": "(Quirk et al., 2005;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 583, |
| "end": 611, |
| "text": "Corston-Oliver et al., 2001;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 612, |
| "end": 638, |
| "text": "Kulesza and Shieber, 2004;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 639, |
| "end": 658, |
| "text": "Gamon et al., 2005;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 659, |
| "end": 678, |
| "text": "Amig\u00f3 et al., 2005)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Combined measures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Some of these works report evidence on the contribution of combining heterogeneous measures. For instance, Albrecht and Hwa included syntax-based measures together with lexical measures, outperforming other combination schemes (Albrecht and Hwa, 2007a; Albrecht and Hwa, 2007b) . Liu and Gildea, after examining the contribution of each component metric, found that \"metrics showing different properties of a sentence are more likely to make a good combined metric\" (Liu and Gildea, 2007) . Akiba et al., which combined multiple editdistance features based on lexical, morphosyntactic and lexical semantic information, observed that their approach improved single editing distance for several data sets (Akiba et al., 2001 ). More evidence was provided by Corston and Oliver. They showed that results on the task of discriminating between manual and automatic translations improve when combining linguistic and n-gram based features. In addition, they showed that this mixed combination improved over the combination of linguistic or n-gram based measures alone (Corston-Oliver et al., 2001) . (Pad\u00f3 et al., 2009) reported a reliability improvement by including measures based on textual entailment in the set. In (Gim\u00e9nez and M\u00e0rquez, 2008) , a simple arithmetic mean of scores for combining measures at different linguistic levels was applied with remarkable results in recent shared evaluation tasks (Callison-Burch et al., 2010) .", |
| "cite_spans": [ |
| { |
| "start": 227, |
| "end": 252, |
| "text": "(Albrecht and Hwa, 2007a;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 253, |
| "end": 277, |
| "text": "Albrecht and Hwa, 2007b)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 466, |
| "end": 488, |
| "text": "(Liu and Gildea, 2007)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 703, |
| "end": 722, |
| "text": "(Akiba et al., 2001", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1062, |
| "end": 1091, |
| "text": "(Corston-Oliver et al., 2001)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1094, |
| "end": 1113, |
| "text": "(Pad\u00f3 et al., 2009)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 1214, |
| "end": 1241, |
| "text": "(Gim\u00e9nez and M\u00e0rquez, 2008)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1403, |
| "end": 1432, |
| "text": "(Callison-Burch et al., 2010)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Combined measures", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Meta-evaluation methods have been gradually introduced together with evaluation measures. For instance, Papineni et al. (2001b) evaluated the reliability of the BLEU metric according to its ability to emulate human assessors, as measured in terms of Pearson correlation with human assessments of adequacy and fluency at the document level. The measure NIST (Doddington, 2002) was meta-evaluated also in terms of correlation with human assessments, but over different document sources and for a varying number of references and segment sizes. Melamed et al. (2003) argued, at the time of introducing the GTM metric, that Pearson correlation coefficients can be affected by scale properties. They suggested using the non-parametric Spearman correlation coefficients instead. Lin and Och meta-evaluated ROUGE over both Pearson and Spearman correlation over a wide set of metrics, including NIST, WER, PER, and variants of ROUGE, BLEU and GTM. They obtained similar results in both cases (Lin and Och, 2004a) . Banerjee and Lavie (2005) argued that the reliability of metrics at the document level can be due to averaging effects but might not be robust across sentence translations. In order to address this issue, they computed the translation-bytranslation correlation with human assessments (i.e., correlation at the sentence level).", |
| "cite_spans": [ |
| { |
| "start": 104, |
| "end": 127, |
| "text": "Papineni et al. (2001b)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 357, |
| "end": 375, |
| "text": "(Doddington, 2002)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 542, |
| "end": 563, |
| "text": "Melamed et al. (2003)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 984, |
| "end": 1004, |
| "text": "(Lin and Och, 2004a)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1007, |
| "end": 1032, |
| "text": "Banerjee and Lavie (2005)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Meta-evaluation criteria", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "However, correlation with human judgements is not enough to determine the reliability of measures. First, correlation at sentence level (unlike correlation at system level) tends to be low and difficult to interpret. Second, correlation at system and segment levels can produce contradictory results. In (Amig\u00f3 et al., 2009) it is observed that higher linguistic levels in measures increases the correlation with human judgements at the system level at the cost of correlation at the segment level. As far as we know, a clear explanation for these phenomena has not been provided yet.", |
| "cite_spans": [ |
| { |
| "start": 304, |
| "end": 324, |
| "text": "(Amig\u00f3 et al., 2009)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Meta-evaluation criteria", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Third, a high correlation at system level does not ensure a high reliability. Culy and Rieheman observed that, although BLEU can achieve a high correlation at system level in some test suites, it over-scores a poor automatic translation of \"Tom Sawyer\" against a human produced translation (Culy and Riehemann, 2003) . This meta-evaluation criterion based on the ability to discern between manual and automatic translations have been referred to as human likeness (Amig\u00f3 et al., 2006) , in contrast to correlation with human judgements which is referred to as human acceptability. Examples of metameasures based on this criterion are ORANGE (Lin and Och, 2004b) and KING (Amig\u00f3 et al., 2005) . In addition, many of the approaches to metric combination described in Section 2.2 take human likeness as the optimization criterion (Corston-Oliver et al., 2001; Kulesza and Shieber, 2004; Gamon et al., 2005) . The main advantage of meta-evaluation based on human likeness is that, since human assessments are not required, metrics can be evaluated over larger test beds. However, the meta-evaluation in terms of human likeness is difficult to interpret.", |
| "cite_spans": [ |
| { |
| "start": 290, |
| "end": 316, |
| "text": "(Culy and Riehemann, 2003)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 464, |
| "end": 484, |
| "text": "(Amig\u00f3 et al., 2006)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 641, |
| "end": 661, |
| "text": "(Lin and Och, 2004b)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 671, |
| "end": 691, |
| "text": "(Amig\u00f3 et al., 2005)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 827, |
| "end": 856, |
| "text": "(Corston-Oliver et al., 2001;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 857, |
| "end": 883, |
| "text": "Kulesza and Shieber, 2004;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 884, |
| "end": 903, |
| "text": "Gamon et al., 2005)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Meta-evaluation criteria", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "In general, the state of the art includes a wide set of results that show the drawbacks of n-gram based measures as BLEU, and a wide set of proposals for new single and combined measures which are meta-evaluated in terms of human acceptability (i.e., their ability to emulate human judges, typically measured in terms of correlation with human judgements) or human-likeness (i.e., their ability to discern between automatic and human translations) (Amig\u00f3 et al., 2006) . However, the original measures BLEU and ROUGE are still preferred.", |
| "cite_spans": [ |
| { |
| "start": 448, |
| "end": 468, |
| "text": "(Amig\u00f3 et al., 2006)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The use of evaluation measures", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "We believe that one of the reasons is the lack of an in-depth study on to what extent providing additional evaluation results with other metrics contributes to the reliability of such results. The state of the art suggests that the use of heterogeneous measures can improve the evaluation reliability. However, as far as we know, there is no comprehensive analysis on the contribution of novel measures when corroborating evaluation results with additional measures.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The use of evaluation measures", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "In general, automatic evaluation measures applied in tasks like MT or AS are similarity measures between system outputs and human references. These measures are related with precision, recall or overlap over specific types of linguistic units. For instance, ROUGE measures n-gram recall. Other measures that work at higher linguistic levels apply precision, recall or overlap of linguistic components such as dependency relations, grammatical categories, semantic roles, etc.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity Based Evaluation Measures", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In order to delimit our hypothesis, let us first define what is a similarity measure in this context. Unfortunately, as far as we know, there is no formal concept covering the properties of current evaluation similarity measures. A close concept is that of \"metric\" or \"distance function\". But, actually, measures such as ROUGE or BLEU are not proper \"metrics\", because they do not satisfy the symmetry and the triangle inequality properties. Therefore, we need a new definition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity Based Evaluation Measures", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Being \u2126 the universe of system outputs s and gold-standards g, we assume that a similarity measure, in our context, is a function x : \u2126 2 \u2212\u2192 such that there exists a decomposition function f : \u2126 \u2212\u2192 {e 1 ..e n } (e.g., words or other linguistic units or relationships) satisfying the following constraints: (i) maximum similarity is achieved only when then the decomposition of the system output resembles exactly the gold-standard decomposition; and (ii) growing overlap or removing non overlapped ele-ments implies growing x. Formally, if x ranges from 0 to 1:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity Based Evaluation Measures", |
| "sec_num": "3" |
| }, |
| { |
| "text": "f (s) = f (g) \u2194 x(s, g) = 1 (f (s) = f (s ) \u222a {e \u2208 f (g) \\ f (s )}) \u2192 x(s, g) > x(s , g) (f (s) = f (s ) \u2212 {e \u2208 f (s ) \\ f (g)}) \u2192 x(s, g) > x(s , g)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity Based Evaluation Measures", |
| "sec_num": "3" |
| }, |
| { |
| "text": "For instance, a random function and the reversal of a similarity funtion (f (s) = 1 f (s) ) do not satisfy these constraints. While the F measure over Precision and Recall satisfies these constraints 1 , precision and recall in isolation do not satisfy all of them: maximum recall can be achieved without resembling the goldstandard text decomposition; and maximum precision can be achieved with only a few overlapped elements.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity Based Evaluation Measures", |
| "sec_num": "3" |
| }, |
| { |
| "text": "BLEU (Papineni et al., 2001a) computes the ngram precision while the metric ROUGE (Lin and Och, 2004a) computes the n-gram recall. However, in general, both metrics satisfy all the constraints, given that BLEU includes a brevity penalty and ROUGE penalizes or limits the system output length. The measure METEOR creates an alignment between the two strings (Banerjee and Lavie, 2005) . This overlap-based measure satisfies also the previous constraints. Measures based on edit distance over n-grams (Tillmann et al., 1997; Nie\u00dfen et al., 2000) or other linguistic units (Akiba et al., 2001; Popovic and Ney, 2007) match also our definition of similarity measure. The editing distance is minimum when the two compared text are equal. The more the evaluated text contains elements from the gold-standard the more the editing distance is reduced (higher similarity). The word ordering can be also expressed in terms of a decomposition function. A similar reasoning applies to every relevant measure in the state-of-the art.", |
| "cite_spans": [ |
| { |
| "start": 5, |
| "end": 29, |
| "text": "(Papineni et al., 2001a)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 82, |
| "end": 102, |
| "text": "(Lin and Och, 2004a)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 357, |
| "end": 383, |
| "text": "(Banerjee and Lavie, 2005)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 499, |
| "end": 522, |
| "text": "(Tillmann et al., 1997;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 523, |
| "end": 543, |
| "text": "Nie\u00dfen et al., 2000)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 570, |
| "end": 590, |
| "text": "(Akiba et al., 2001;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 591, |
| "end": 613, |
| "text": "Popovic and Ney, 2007)", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Similarity Based Evaluation Measures", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this paper, we provide empirical results for MT and AS. For MT, we use the data sets from the Arabic-to-English (AE) and Chinese-to-English (CE) NIST MT Evaluation campaigns in 2004 and -references 5 5 5 4 #systems 5 10 7 10 #system-outputs-assessed 5 10 6 5 #system-outputs 1,353 1,788 1,056 1,082 #outputs-assessed per-system 347 447 266 272 2005 2 . Both include two translations exercises: for the 2005 campaign we contacted each participant individually and asked for permission to use their data 3 . In our experiments, we take the sum of adequacy and fluency, both in a 1-5 scale, as a global measure of quality (LDC, 2005) . Thus, human assessments are in a 2-10 scale. For AS, we have used the AS test suites developed in the DUC 2005 and DUC 2006 evaluation campaigns 4 . This AS task was to generate a question focused summary of 250 words from a set of 25-50 documents to a complex question. Summaries were evaluated according to several criteria. Here, we will consider the responsiveness judgements, in which the quality score was an integer between 1 and 5. See Tables 1 and 2 for a brief quantitative description of these test beds.", |
| "cite_spans": [ |
| { |
| "start": 640, |
| "end": 651, |
| "text": "(LDC, 2005)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 189, |
| "end": 313, |
| "text": "-references 5 5 5 4 #systems 5 10 7 10 #system-outputs-assessed 5 10 6 5 #system-outputs 1,353 1,788 1,056", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 1098, |
| "end": 1112, |
| "text": "Tables 1 and 2", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data sets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "AE 2004 CE 2004 AE 2005 CE 2005 #human", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data sets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "As for evaluation measures, for MT we have used a rich set of 64 measures provided within the ASIYA Toolkit (Gim\u00e9nez and M\u00e0rquez, 2010) 5 . This includes measures operating at different linguistic levels: lexical, syntactic, and semantic. At the lexical level this set includes variants of 8 measures employed in the state of the art: BLEU, NIST, GTM, METEOR, ROUGE, WER, PER and TER. In addition, we have included a basic measure O l that computes the lexical overlap without considering word ordering. All these measures have similar granularity. They use n-grams of a varying length as the basic unit with additional information provided by linguistic tools. The underlying similarity criteria include precision, recall, overlap, or edit rate, and the decomposition functions include words, dependency tree nodes (DP HWC, DP-Or, etc.), constituency parsing (CP-STM), discourse roles (DR-Or), semantic roles (SR-Or), named entities, etc. Further details on the measure set may be found in the ASIYA technical manual (Gim\u00e9nez and M\u00e0rquez, 2010) . According to our computations, our measures cover high and low correlations at both levels. Correlation at system level spans between 0.63 and 0.95. Correlations at sentence level ranges from 0.18 up to 0.54. We will discriminate between two subsets of measures. The first one includes those that decompose the text into words, n-grams, stems or lexical semantic tags. This set includes BLEU, ROUGE, NIST, GTM, PER and WER families. We will refer to them as \"lexical\" measures. The second set are those that consider deeper linguistic levels such as parts of speech, syntactic dependencies, syntactic constituents, etc. We will refer to them as \"linguistic\" measures.", |
| "cite_spans": [ |
| { |
| "start": 108, |
| "end": 135, |
| "text": "(Gim\u00e9nez and M\u00e0rquez, 2010)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1018, |
| "end": 1045, |
| "text": "(Gim\u00e9nez and M\u00e0rquez, 2010)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measures", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In the case of automatic summarization (AS), we have employed the standard variants of ROUGE (Lin, 2004) . These 7 measures are ROUGE-{1..4}, ROUGE-SU, ROUGE-L and ROUGE-W. In addition we have included the reversed precision version for each variant and the F measure of both. Notice that the original ROUGE measures are oriented to recall. In total, we have 21 measures for the summarization task. All of them are based on n-gram overlap.", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 104, |
| "text": "(Lin, 2004)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measures", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "As discussed in Section 2, a number of recent publications address the problem of measure combination with successful results, specially when heterogeneous measures are combined. The following property clarifies this issue and justifies the use of heterogeneous measures when corroborating evaluation results. It asserts that the reliability of system improvements always increases when the evaluation result is corroborated by an additional similarity measure, regardless of the correlation achieved by the additional measure in isolation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive reliability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For the sake of clarity, in the rest of the paper, we will denote the similarity x(s, g) between system output s and human reference g by x(s). The quality of a system output s will be referred to as Q(s). Let us define the reliability R(X) of a measure set as the probability of a real improvement (as measured by human judges) when a score improvement is observed simultaneously for all measures in the set X. :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive reliability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "R(X) \u2261 P (Q(s) \u2265 Q(s )|x(s) \u2265 x(s ) \u2200x \u2208 X)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive reliability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "According to this definition, we may not be able to predict the quality of any system output (i.e. a translation) with a highly reliable measure set, but we can ensure a system improvement when all measures corroborate the result. Then the additive reliability property can be stated as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive reliability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "R(X \u222a {x}) \u2265 R(X)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive reliability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We could think of violating this property by adding, for instance, a measure consisting of a random function (x (s) = rand(0..1)) or a reversal of the original measure (x (s) = 1/x(s)). These kind of measures, however, would not satisfy the constraints defined in Section 3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive reliability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "This property is based on the idea that similarity with human references according to any aspect should not imply statistically a quality decrease. Although our test suites includes measures with low correlation at segment and system level, we can confirm empirically that all of them satisfy this property.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive reliability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We have developed the following experiment: taking all possible measure pairs in the test suites, we have compared their reliability as a set versus the maximal reliability of any of them (by computing the difference R(X) \u2212 max(R(x 1 ), R(x 2 )). Figure 1 shows the obtained distribution of this difference for our MT and AS test suites. Remarkably, in almost every case this difference is positive.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 247, |
| "end": 256, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Additive reliability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "This result has a key implication: Corroborating evaluation results with a new measure, even when it has lower correlation with human judgements, increases the reliability of results. Therefore, if the correlation with judgements is not determinant, the question is now what factor determines the contribution of the new measures. According to the following property, this factor is the heterogeneity of measures.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additive reliability", |
| "sec_num": "5" |
| }, |
| { |
| "text": "This property states that the reliability of any measure combination is lower bounded by the heterogeneity of the measure set. In other words, a single measure can be more or less reliable, but a system improvement according to all measures in an heterogeneous set is reliable.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Let us define the heterogeneity H(X) of a set of measures X as, given two system outputs s and s such that g = s = s = g (g is the reference text), the probability that there exist two measures that contradict each other. That is: Thus, given a set X of measures, the property states that there exists a strict growing function F such that:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "H(X) \u2261 P (\u2203x, x \u2208 X.x(s) > x(s ) \u2227 x (s) < x (s ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "R(X) \u2265 F (H(X)) and H(X) = 1 \u2192 R(X) = 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In other words, the more the similarity measures tend to contradict each other, the more a unanimous improvement over all similarity measures is reliable. Clearly, the harder it is that measures agree, the more meaningful it is when they do.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The first part is derived from the Additive Reliability property. Intuitively, any individual measure has zero heterogeneity. Increasing the heterogeneity implies joining measures or measure sets progressively. According to the Additive Reliability property, this joining implies a reliability increase. Therefore, the higher the heterogeneity, the higher the minimum Reliability achieved by the corresponding measure sets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The second part is derived from the Heterogeneity definition. If H(X) = 1 then, for any distinct pair of outputs that differ from the reference, there exist at least two measures in the set contradicting each other. That is, H(X) = 1 implies that:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2200s = s = g(\u2203x, x \u2208 X.x(s) > x(s ) \u2227 x (s) < x (s ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Therefore, if one output improves the other according to all measures, then the output must be equal than the reference. According to the first constraint of similarity measures, a text that is equal to the reference achieves the maximum score:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u00ac(\u2203x, x \u2208 X.x(s) > x(s ) \u2227 x (s) < x (s )) \u2192", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "g = s \u2192 f (g) = g(s) \u2192 \u2200x.x(s) \u2265 x(s )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Finally, if we assume that the reference (human produced texts) has a maximum quality, then it will have equal or higher quality than the other output.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "g = s \u2192 Q(s) \u2265 Q(s )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Therefore, the reliability of the measure set is maximal. In summary, if H(X) = 1 then:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "R(X) = P (Q(s) \u2265 Q(s )|x(s) \u2265 x(s ) \u2200x \u2208 X) = = P (Q(s) \u2265 Q(s )|s = g) = 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Figures 2 and 3 show the relationship between the heterogeneity of randomly selected measure sets and their reliability for the MT and summarization test suites. As the figures show, the higher the heterogeneity, the higher the reliability of the measure set. The results in AS are less pronounced due to the redundancy in ROUGE measure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Notice that the heterogeneity property does not necessarily imply a high correlation between reliability and heterogeneity. For instance, an ideal single measure would have zero heterogeneity and achieve maximum reliability, appearing in the top left area. The property rather brings us to the following situation: let us suppose that we have a set of single measures available which achieve a certain range of reliability. We can improve our system according to any of these measures. Without human assessments, we do not know what is the most reliable measure. But if we combine them, increasing the heterogeneity, the minimal reliability of the selected measures will be higher. This implies that combining heterogeneous measures (e.g. at high linguistic levels) that do not achieve high correlation in isolation, is better than corroborating results with any individual measure alone, such as ROUGE and BLEU, which is the common practice in the state of the art.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The main drawback of this property is that increasing the heterogeneity implies a sensitivity reduction. For instance, if H(X) = 0.9, then only for 10% of output pairs in the corpus there exists an improvement according to all measures. In other words, unanimous evaluation results from heterogeneous measures are reliable but harder to achieve for the system developer. The next section investigates on this issue.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Finally, Figure 4 shows that linguistic measures increase the heterogeneity of measure sets. We have generated sets of metrics of size 1 to 10 made up by lexical or lexical and linguistic metrics. As the figure shows, in the second case, the measure sets achieve a higher heterogeneity. 7 Score thresholds vs. Additive Reliability According to the previous properties, corroborating evaluation results with several measures increases the reliability of evaluation results at the cost of sensitivity. On the other hand, increasing the score threshold of a single measure should have a similar effect. Which is then the best methodology to improve reliability? In this section we provide experimental evidence on the relationship between both ways of increasing reliability: we have found that, corroborating evaluation results over single texts with additional measures is more reliable than requiring higher score differences according to any individual measure in the set. More specifically, we have found that the reliability of a measure set is higher than the reliability of each of the individual measures at a similar level of sensitivity.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 9, |
| "end": 17, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Formally, we define the sensitivity S(X) of a metric set X as the probability of finding a score improvement within text pairs with a real (i.e. human assessed) quality improvement:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "S(X) = P (x(s) \u2265 x(s )\u2200x \u2208 X|Q(s) \u2265 Q(s ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Being R th (x) and S th (x) the reliability and sensitivity of a single measure x for a certain increase score threshold th: ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "R th (x) = P (Q(s) \u2265 Q(s )|x(s) \u2212 x(s ) \u2265 th) S th (x) = P (x(s) \u2212 x(s ) \u2265 th|Q(s) \u2265 Q(s ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The property that we want to check is that, at the same sensitivity level, combining measures is more reliable than increasing the score threshold of single measures:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "S(X) = S th (x).x \u2208 X \u2212\u2192 R(X) \u2265 R th (x)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Note that if we had a perfect measure x p such that R(x p ) = S(x p ) = 1, then combining this measure with a low reliability measure x l would produce a lower sensitivity, but the maximal reliability would be preserved.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In order to confirm empirically this property, we have developed the following experiment: (i) We compute the reliability and sensitivity of randomly chosen measure sets over single text pairs. We have generated sets of 2,3,5,10,20 and 40 measures. In the case of summarization corpora we have combined up to 20 measures. In addition, we compute also the heterogeneity H(X) of each measure set; (ii) Experimenting with different values for the threshold th, we compute the reliability of single measures for all potential sensitivity levels; (iii) For each measure set, we compare the reliability of the measure set versus the reliability of single measures at the same sensitivity level. We will refer to this as the Reliability Gain: Reliability Gain = R(X) \u2212 max{R th (x)/x \u2208 X \u2227 S th (x) = S(X)} If there are several reliability values with the same sensitivity for a given single measures, we choose the highest reliability value for the single measure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Figures 5 and 6 illustrate the results for the MT and AS corpora. The horizontal axis represents the Heterogeneity of measure sets, while the vertical axis represents the reliability gain. Remarkably, the reliability gain is positive for all cases in our test suites. The maximum reliability gain is 0.34 in the case of MT and 0.08 for AS (note that summarization measures are more redundant in our corpora). In both test suites, the largest information gains are obtained with highly heterogeneous measure sets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In summary, given comparable measures in terms of reliability, corroborating evaluation results with several measures is more effective than optimizing systems according to the best measure in the set. This empirical property provides an additional evidence in favour of the use of heterogeneous measures and, in particular, of the use of linguistic measures in combination with standard lexical measures.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "are not exploited by the community. Our first conclusion is that it is not easy to determine the reliability of measures, which is highly corpus-dependent and often contradictory when comparing correlation with human judgements at segment vs. system levels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In order to tackle this issue, we have studied a number of properties that suggest the convenience of using heterogeneous measures to corroborate evaluation results. According to these properties, we can ensure that, even when if we can not determine the reliability of individual measures, corroborating a system improvement with additional measures always increases the reliability of the results. In addition, the more heterogeneous the measures employed (which is measurable), the higher the reliability of the results. But perhaps the most important practical finding is that the reliability at similar sensitivity levels by corroborating evaluation results with several measures is always higher than improving systems according to any of the combined measures in isolation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "These properties point to the practical advantages of considering linguistic knowledge (beyond lexical information) in measures, even if they do not achieve a high correlation with human judgements. Our experiments show that linguistic knowledge increases the heterogeneity of measure sets, which in turn increases the reliability of evaluation results when corroborating system comparisons with several measures.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Heterogeneity", |
| "sec_num": "6" |
| }, |
| { |
| "text": "There is an exception. In an extreme case, when recall is zero, removing non overlapped elements does not modify the F measure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.nist.gov/speech/tests/mt3 We are grateful to a number of groups and companies whoresponded positively: University of Southern California Information Sciences Institute (ISI), University of Maryland (UMD), Johns Hopkins University & University of Cambridge (JHU-CU), IBM, University of Edinburgh, University of Aachen (RWTH), National Research Council of Canada (NRC), Chinese Academy of Sciences Institute of Computing Technology (ICT), Instituto Trentino di Cultura -Centro per la Ricerca Scientifica e Tecnologica(ITC-IRST), MITRE. 4 http://duc.nist.gov/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.lsi.upc.edu/\u02dcnlp/Asiya", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "ConclusionsIn this paper, we have analyzed the state of the art in order to clarify why novel text evaluation measures", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work has been partially funded by the Spanish Government (Holopedia, TIN2010-21128-C02 and OpenMT-2, TIN2009-14675-C03) and the European Community's Seventh Framework Programme (FP7/2007-2013) under grant agreement number 247762 (FAUST project, FP7-ICT-2009-4-247762).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Using Multiple Edit Distances to Automatically Rank Machine Translation Output", |
| "authors": [ |
| { |
| "first": "Yasuhiro", |
| "middle": [], |
| "last": "Akiba", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenji", |
| "middle": [], |
| "last": "Imamura", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of Machine Translation Summit VIII", |
| "volume": "", |
| "issue": "", |
| "pages": "15--20", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yasuhiro Akiba, Kenji Imamura, and Eiichiro Sumita. 2001. Using Multiple Edit Distances to Automatically Rank Machine Translation Output. In Proceedings of Machine Translation Summit VIII, pages 15-20.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A Reexamination of Machine Learning Approaches for Sentence-Level MT Evaluation", |
| "authors": [ |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Albrecht", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Hwa", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "880--887", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joshua Albrecht and Rebecca Hwa. 2007a. A Re- examination of Machine Learning Approaches for Sentence-Level MT Evaluation. In Proceedings of the 45th Annual Meeting of the Association for Computa- tional Linguistics (ACL), pages 880-887.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Regression for Sentence-Level MT Evaluation with Pseudo References", |
| "authors": [ |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Albrecht", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Hwa", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "296--303", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joshua Albrecht and Rebecca Hwa. 2007b. Regression for Sentence-Level MT Evaluation with Pseudo Refer- ences. In Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics (ACL), pages 296-303.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "QARLA: a Framework for the Evaluation of Automatic Summarization", |
| "authors": [ |
| { |
| "first": "Enrique", |
| "middle": [], |
| "last": "Amig\u00f3", |
| "suffix": "" |
| }, |
| { |
| "first": "Julio", |
| "middle": [], |
| "last": "Gonzalo", |
| "suffix": "" |
| }, |
| { |
| "first": "Anselmo", |
| "middle": [], |
| "last": "Pe", |
| "suffix": "" |
| }, |
| { |
| "first": "Felisa", |
| "middle": [], |
| "last": "Verdejo", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "280--289", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Enrique Amig\u00f3, Julio Gonzalo, Anselmo Pe nas, and Fe- lisa Verdejo. 2005. QARLA: a Framework for the Evaluation of Automatic Summarization. In Proceed- ings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL), pages 280-289.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "MT Evaluation: Human-Like vs. Human Acceptable", |
| "authors": [ |
| { |
| "first": "Enrique", |
| "middle": [], |
| "last": "Amig\u00f3", |
| "suffix": "" |
| }, |
| { |
| "first": "Jes\u00fas", |
| "middle": [], |
| "last": "Gim\u00e9nez", |
| "suffix": "" |
| }, |
| { |
| "first": "Julio", |
| "middle": [], |
| "last": "Gonzalo", |
| "suffix": "" |
| }, |
| { |
| "first": "Llu\u00eds", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Joint 21st International Conference on Computational Linguistics and the 44th Annual Meeting of the Association for Computational Linguistics (COLING-ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "17--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Enrique Amig\u00f3, Jes\u00fas Gim\u00e9nez, Julio Gonzalo, and Llu\u00eds M\u00e0rquez. 2006. MT Evaluation: Human-Like vs. Hu- man Acceptable. In Proceedings of the Joint 21st In- ternational Conference on Computational Linguistics and the 44th Annual Meeting of the Association for Computational Linguistics (COLING-ACL), pages 17- 24.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "The contribution of linguistic features to automatic machine translation evaluation", |
| "authors": [ |
| { |
| "first": "Enrique", |
| "middle": [], |
| "last": "Amig\u00f3", |
| "suffix": "" |
| }, |
| { |
| "first": "Jes\u00fas", |
| "middle": [], |
| "last": "Gim\u00e9nez", |
| "suffix": "" |
| }, |
| { |
| "first": "Julio", |
| "middle": [], |
| "last": "Gonzalo", |
| "suffix": "" |
| }, |
| { |
| "first": "Felisa", |
| "middle": [], |
| "last": "Verdejo", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "1", |
| "issue": "", |
| "pages": "306--314", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Enrique Amig\u00f3, Jes\u00fas Gim\u00e9nez, Julio Gonzalo, and Fe- lisa Verdejo. 2009. The contribution of linguis- tic features to automatic machine translation evalua- tion. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th Interna- tional Joint Conference on Natural Language Process- ing of the AFNLP: Volume 1 -Volume 1, ACL '09, pages 306-314, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "METEOR: An Automatic Metric for MT Evaluation with Improved Correlation with Human Judgments", |
| "authors": [ |
| { |
| "first": "Satanjeev", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for MT and/or Summarization", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An Automatic Metric for MT Evaluation with Im- proved Correlation with Human Judgments. In Pro- ceedings of ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for MT and/or Summarization.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Reevaluating the role of bleu in machine translation research", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Miles", |
| "middle": [], |
| "last": "Osborne", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "249--256", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Callison-burch and Miles Osborne. 2006. Re- evaluating the role of bleu in machine translation re- search. In In EACL, pages 249-256.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Further meta-evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Cameron", |
| "middle": [], |
| "last": "Fordyce", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| }, |
| { |
| "first": "Josh", |
| "middle": [], |
| "last": "Schroeder", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Third Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "70--106", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Callison-Burch, Cameron Fordyce, Philipp Koehn, Christof Monz, and Josh Schroeder. 2008. Further meta-evaluation of machine translation. In Proceed- ings of the Third Workshop on Statistical Machine Translation, pages 70-106.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Findings of the 2009 workshop on statistical machine translation", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| }, |
| { |
| "first": "Josh", |
| "middle": [], |
| "last": "Schroeder", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Callison-Burch, Philipp Koehn, Christof Monz, and Josh Schroeder. 2009. Findings of the 2009 work- shop on statistical machine translation. In Proceedings of the Fourth Workshop on Statistical Machine Trans- lation.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Findings of the 2010 joint workshop on statistical machine translation and metrics for machine translation", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| }, |
| { |
| "first": "Kay", |
| "middle": [], |
| "last": "Peterson", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Przybocki", |
| "suffix": "" |
| }, |
| { |
| "first": "Omar", |
| "middle": [], |
| "last": "Zaidan", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Joint Fifth Workshop on Statistical Machine Translation and MetricsMATR", |
| "volume": "", |
| "issue": "", |
| "pages": "17--53", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Callison-Burch, Philipp Koehn, Christof Monz, Kay Peterson, Mark Przybocki, and Omar Zaidan. 2010. Findings of the 2010 joint workshop on sta- tistical machine translation and metrics for machine translation. In Proceedings of the Joint Fifth Workshop on Statistical Machine Translation and MetricsMATR, pages 17-53. Revised August 2010.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "MAXSIM: A maximum similarity metric for machine translation evaluation", |
| "authors": [ |
| { |
| "first": "Yee", |
| "middle": [], |
| "last": "Seng Chan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL-08: HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "55--62", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yee Seng Chan and Hwee Tou Ng. 2008. MAXSIM: A maximum similarity metric for machine translation evaluation. In Proceedings of ACL-08: HLT, pages 55-62.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A Machine Learning Approach to the Automatic Evaluation of Machine Translation", |
| "authors": [ |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Corston", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Oliver", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gamon", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the 39th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "140--147", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simon Corston-Oliver, Michael Gamon, and Chris Brockett. 2001. A Machine Learning Approach to the Automatic Evaluation of Machine Translation. In Pro- ceedings of the 39th Annual Meeting of the Association for Computational Linguistics (ACL), pages 140-147.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The Limits of N-gram Translation Evaluation Metrics", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Culy", |
| "suffix": "" |
| }, |
| { |
| "first": "Susanne", |
| "middle": [ |
| "Z" |
| ], |
| "last": "Riehemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of MT-SUMMIT IX", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher Culy and Susanne Z. Riehemann. 2003. The Limits of N-gram Translation Evaluation Metrics. In Proceedings of MT-SUMMIT IX, pages 1-8.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Automatic Evaluation of Machine Translation Quality Using N-gram Co-Occurrence Statistics", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Doddington", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 2nd International Conference on Human Language Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "138--145", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Doddington. 2002. Automatic Evaluation of Machine Translation Quality Using N-gram Co- Occurrence Statistics. In Proceedings of the 2nd Inter- national Conference on Human Language Technology, pages 138-145.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Sentence-Level MT evaluation without reference translations: beyond language modeling", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gamon", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Aue", |
| "suffix": "" |
| }, |
| { |
| "first": "Martine", |
| "middle": [], |
| "last": "Smets", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of EAMT", |
| "volume": "", |
| "issue": "", |
| "pages": "103--111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Gamon, Anthony Aue, and Martine Smets. 2005. Sentence-Level MT evaluation without refer- ence translations: beyond language modeling. In Pro- ceedings of EAMT, pages 103-111.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Linguistic Features for Automatic Evaluation of Heterogeneous MT Systems", |
| "authors": [ |
| { |
| "first": "Jes\u00fas", |
| "middle": [], |
| "last": "Gim\u00e9nez", |
| "suffix": "" |
| }, |
| { |
| "first": "Llu\u00eds", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the ACL Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "256--264", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jes\u00fas Gim\u00e9nez and Llu\u00eds M\u00e0rquez. 2007. Linguistic Features for Automatic Evaluation of Heterogeneous MT Systems. In Proceedings of the ACL Workshop on Statistical Machine Translation, pages 256-264.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Heterogeneous Automatic MT Evaluation Through Non-Parametric Metric Combinations", |
| "authors": [ |
| { |
| "first": "Jes\u00fas", |
| "middle": [], |
| "last": "Gim\u00e9nez", |
| "suffix": "" |
| }, |
| { |
| "first": "Llu\u00eds", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Third International Joint Conference on Natural Language Processing (IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "319--326", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jes\u00fas Gim\u00e9nez and Llu\u00eds M\u00e0rquez. 2008. Hetero- geneous Automatic MT Evaluation Through Non- Parametric Metric Combinations. In Proceedings of the Third International Joint Conference on Natural Language Processing (IJCNLP), pages 319-326.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Asiya: An Open Toolkit for Automatic Machine Translation (Meta-)Evaluation", |
| "authors": [ |
| { |
| "first": "Jes\u00fas", |
| "middle": [], |
| "last": "Gim\u00e9nez", |
| "suffix": "" |
| }, |
| { |
| "first": "Llu\u00eds", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "The Prague Bulletin of Mathematical Linguistics", |
| "volume": "1", |
| "issue": "94", |
| "pages": "77--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jes\u00fas Gim\u00e9nez and Llu\u00eds M\u00e0rquez. 2010. Asiya: An Open Toolkit for Automatic Machine Translation (Meta-)Evaluation. The Prague Bulletin of Mathemat- ical Linguistics, 1(94):77-86.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Empirical Machine Translation and its Evaluation", |
| "authors": [ |
| { |
| "first": "Jes\u00fas", |
| "middle": [], |
| "last": "Gim\u00e9nez", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jes\u00fas Gim\u00e9nez. 2008. Empirical Machine Transla- tion and its Evaluation. Ph.D. thesis, Universitat Polit\u00e8cnica de Catalunya.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Kernel-based approach for automatic evaluation of natural language generation technologies: Application to automatic summarization", |
| "authors": [ |
| { |
| "first": "Tsutomu", |
| "middle": [], |
| "last": "Hirao", |
| "suffix": "" |
| }, |
| { |
| "first": "Manabu", |
| "middle": [], |
| "last": "Okumura", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideki", |
| "middle": [], |
| "last": "Isozaki", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "145--152", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsutomu Hirao, Manabu Okumura, and Hideki Isozaki. 2005. Kernel-based approach for automatic evaluation of natural language generation technologies: Applica- tion to automatic summarization. In Proceedings of Human Language Technology Conference and Confer- ence on Empirical Methods in Natural Language Pro- cessing, pages 145-152, Vancouver, British Columbia, Canada, October. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "A simple automatic mt evaluation metric", |
| "authors": [ |
| { |
| "first": "Petr", |
| "middle": [], |
| "last": "Homola", |
| "suffix": "" |
| }, |
| { |
| "first": "Vladislav", |
| "middle": [], |
| "last": "Kubo\u0148", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Pecina", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Fourth Workshop on Statistical Machine Translation, StatMT '09", |
| "volume": "", |
| "issue": "", |
| "pages": "33--36", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Petr Homola, Vladislav Kubo\u0148, and Pavel Pecina. 2009. A simple automatic mt evaluation metric. In Proceed- ings of the Fourth Workshop on Statistical Machine Translation, StatMT '09, pages 33-36, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Expected Dependency Pair Match: Predicting translation quality with expected syntactic structure", |
| "authors": [ |
| { |
| "first": "Jeremy", |
| "middle": [ |
| "G" |
| ], |
| "last": "Kahn", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Snover", |
| "suffix": "" |
| }, |
| { |
| "first": "Mari", |
| "middle": [], |
| "last": "Ostendorf", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeremy G. Kahn, Matthew Snover, and Mari Ostendorf. 2009. Expected Dependency Pair Match: Predicting translation quality with expected syntactic structure. Machine Translation.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "A learning approach to improving sentence-level MT evaluation", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Kulesza", |
| "suffix": "" |
| }, |
| { |
| "first": "Stuart", |
| "middle": [ |
| "M" |
| ], |
| "last": "Shieber", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 10th International Conference on Theoretical and Methodological Issues in Machine Translation (TMI)", |
| "volume": "", |
| "issue": "", |
| "pages": "75--84", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Kulesza and Stuart M. Shieber. 2004. A learning approach to improving sentence-level MT evaluation. In Proceedings of the 10th International Conference on Theoretical and Methodological Issues in Machine Translation (TMI), pages 75-84.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Linguistic Data Annotation Specification: Assessment of Adequacy and Fluency in Translations", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ldc", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "LDC. 2005. Linguistic Data Annotation Spec- ification: Assessment of Adequacy and Flu- ency in Translations. Revision 1.5. Tech- nical report, Linguistic Data Consortium.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "NIST 2005 machine translation evaluation official results", |
| "authors": [ |
| { |
| "first": "Audrey", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Przybocki", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Official release of automatic evaluation scores for all submissions", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Audrey Le and Mark Przybocki. 2005. NIST 2005 ma- chine translation evaluation official results. In Official release of automatic evaluation scores for all submis- sions, August.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "CDER: Efficient MT Evaluation Using Block Movements", |
| "authors": [ |
| { |
| "first": "Gregor", |
| "middle": [], |
| "last": "Leusch", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicola", |
| "middle": [], |
| "last": "Ueffing", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of 11th Conference of the European Chapter of the Association for Computational Linguistics (EACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "241--248", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gregor Leusch, Nicola Ueffing, and Hermann Ney. 2006. CDER: Efficient MT Evaluation Using Block Move- ments. In Proceedings of 11th Conference of the Eu- ropean Chapter of the Association for Computational Linguistics (EACL), pages 241-248.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Automatic Evaluation of Machine Translation Quality Using Longest Common Subsequence and Skip-Bigram Statics", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Franz Josef", |
| "middle": [], |
| "last": "Och", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 42nd Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin and Franz Josef Och. 2004a. Auto- matic Evaluation of Machine Translation Quality Us- ing Longest Common Subsequence and Skip-Bigram Statics. In Proceedings of the 42nd Annual Meeting of the Association for Computational Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "ORANGE: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Franz Josef", |
| "middle": [], |
| "last": "Och", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 20th International Conference on Computational Linguistics (COLING)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin and Franz Josef Och. 2004b. ORANGE: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation. In Proceedings of the 20th International Conference on Computational Linguis- tics (COLING).", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Rouge: A Package for Automatic Evaluation of Summaries", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Text Summarization Branches Out: Proceedings of the ACL-04 Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "74--81", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin. 2004. Rouge: A Package for Auto- matic Evaluation of Summaries. In Marie-Francine Moens and Stan Szpakowicz, editors, Text Summariza- tion Branches Out: Proceedings of the ACL-04 Work- shop, pages 74-81, Barcelona, Spain, July. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "BLANC: Learning Evaluation Metrics for MT", |
| "authors": [ |
| { |
| "first": "Lucian", |
| "middle": [], |
| "last": "Vlad Lita", |
| "suffix": "" |
| }, |
| { |
| "first": "Monica", |
| "middle": [], |
| "last": "Rogati", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the Joint Conference on Human Language Technology and Empirical Methods in Natural Language Processing (HLT-EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "740--747", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lucian Vlad Lita, Monica Rogati, and Alon Lavie. 2005. BLANC: Learning Evaluation Metrics for MT. In Proceedings of the Joint Conference on Human Lan- guage Technology and Empirical Methods in Natural Language Processing (HLT-EMNLP), pages 740-747.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Syntactic Features for Evaluation of Machine Translation", |
| "authors": [ |
| { |
| "first": "Ding", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Gildea", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for MT and/or Summarization", |
| "volume": "", |
| "issue": "", |
| "pages": "25--32", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ding Liu and Daniel Gildea. 2005. Syntactic Features for Evaluation of Machine Translation. In Proceed- ings of ACL Workshop on Intrinsic and Extrinsic Eval- uation Measures for MT and/or Summarization, pages 25-32.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Stochastic Iterative Alignment for Machine Translation Evaluation", |
| "authors": [ |
| { |
| "first": "Ding", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Gildea", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Joint 21st International Conference on Computational Linguistics and the 44th Annual Meeting of the Association for Computational Linguistics (COLING-ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "539--546", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ding Liu and Daniel Gildea. 2006. Stochastic Iter- ative Alignment for Machine Translation Evaluation. In Proceedings of the Joint 21st International Confer- ence on Computational Linguistics and the 44th An- nual Meeting of the Association for Computational Linguistics (COLING-ACL), pages 539-546.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Source-Language Features and Maximum Correlation Training for Machine Translation Evaluation", |
| "authors": [ |
| { |
| "first": "Ding", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Gildea", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 2007 Meeting of the North American Chapter of the Association for Computational Linguistics (NAACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "41--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ding Liu and Daniel Gildea. 2007. Source-Language Features and Maximum Correlation Training for Ma- chine Translation Evaluation. In Proceedings of the 2007 Meeting of the North American Chapter of the Association for Computational Linguistics (NAACL), pages 41-48.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "BLEUATRE: Flattening Syntactic Dependencies for MT Evaluation", |
| "authors": [ |
| { |
| "first": "Dennis", |
| "middle": [], |
| "last": "Mehay", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brew", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 11th Conference on Theoretical and Methodological Issues in Machine Translation (TMI)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dennis Mehay and Chris Brew. 2007. BLEUATRE: Flattening Syntactic Dependencies for MT Evaluation. In Proceedings of the 11th Conference on Theoreti- cal and Methodological Issues in Machine Translation (TMI).", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Precision and Recall of Machine Translation", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Melamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Green", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [ |
| "P" |
| ], |
| "last": "Turian", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the Joint Conference on Human Language Technology and the North American Chapter of the Association for Computational Linguistics (HLT-NAACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Melamed, Ryan Green, and Joseph P. Turian. 2003. Precision and Recall of Machine Translation. In Proceedings of the Joint Conference on Human Lan- guage Technology and the North American Chapter of the Association for Computational Linguistics (HLT- NAACL).", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "An Evaluation Tool for Machine Translation: Fast Evaluation for MT Research", |
| "authors": [ |
| { |
| "first": "Sonja", |
| "middle": [], |
| "last": "Nie\u00dfen", |
| "suffix": "" |
| }, |
| { |
| "first": "Franz", |
| "middle": [ |
| "Josef" |
| ], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregor", |
| "middle": [], |
| "last": "Leusch", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of the 2nd International Conference on Language Resources and Evaluation (LREC)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sonja Nie\u00dfen, Franz Josef Och, Gregor Leusch, and Her- mann Ney. 2000. An Evaluation Tool for Machine Translation: Fast Evaluation for MT Research. In Pro- ceedings of the 2nd International Conference on Lan- guage Resources and Evaluation (LREC).", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Dependency-Based Automatic Evaluation for Machine Translation", |
| "authors": [ |
| { |
| "first": "Karolina", |
| "middle": [], |
| "last": "Owczarzak", |
| "suffix": "" |
| }, |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Van Genabith", |
| "suffix": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [], |
| "last": "Way", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of SSST, NAACL-HLT/AMTA Workshop on Syntax and Structure in Statistical Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "80--87", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karolina Owczarzak, Josef van Genabith, and Andy Way. 2007a. Dependency-Based Automatic Evalua- tion for Machine Translation. In Proceedings of SSST, NAACL-HLT/AMTA Workshop on Syntax and Struc- ture in Statistical Translation, pages 80-87.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Labelled Dependencies in Machine Translation Evaluation", |
| "authors": [ |
| { |
| "first": "Karolina", |
| "middle": [], |
| "last": "Owczarzak", |
| "suffix": "" |
| }, |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Van Genabith", |
| "suffix": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [], |
| "last": "Way", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the ACL Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "104--111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karolina Owczarzak, Josef van Genabith, and Andy Way. 2007b. Labelled Dependencies in Machine Transla- tion Evaluation. In Proceedings of the ACL Workshop on Statistical Machine Translation, pages 104-111.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Evaluating machine translation with lfg dependencies", |
| "authors": [ |
| { |
| "first": "Karolina", |
| "middle": [], |
| "last": "Owczarzak", |
| "suffix": "" |
| }, |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Van Genabith", |
| "suffix": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [], |
| "last": "Way", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Machine Translation", |
| "volume": "21", |
| "issue": "", |
| "pages": "95--119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karolina Owczarzak, Josef van Genabith, and Andy Way. 2008. Evaluating machine translation with lfg depen- dencies. Machine Translation, 21(2):95-119.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Depeval(summ): dependency-based evaluation for automatic summaries", |
| "authors": [ |
| { |
| "first": "Karolina", |
| "middle": [], |
| "last": "Owczarzak", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "ACL-IJCNLP '09: Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "1", |
| "issue": "", |
| "pages": "190--198", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karolina Owczarzak. 2009. Depeval(summ): dependency-based evaluation for automatic sum- maries. In ACL-IJCNLP '09: Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP: Volume 1, pages 190-198, Morristown, NJ, USA. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Robust machine translation evaluation with entailment features", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Pad\u00f3", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "297--305", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Pad\u00f3, Michael Galley, Dan Jurafsky, and Christopher D. Manning. 2009. Robust machine translation evaluation with entailment features. In Proceedings of the Joint Conference of the 47th An- nual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP, pages 297-305.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K. Papineni, S. Roukos, T. Ward, and W. Zhu. 2001a. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meet- ing of the Association for Computational Linguistics (ACL), pages 311-318, Philadelphia, jul.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Bleu: a method for automatic evaluation of machine translation, RC22176. Technical report", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2001b. Bleu: a method for automatic evalu- ation of machine translation, RC22176. Technical re- port, IBM T.J. Watson Research Center.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Reducing Human Assessments of Machine Translation Quality to Binary Classifiers", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Finch", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 11th Conference on Theoretical and Methodological Issues in Machine Translation (TMI)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Paul, Andrew Finch, and Eiichiro Sumita. 2007. Reducing Human Assessments of Machine Transla- tion Quality to Binary Classifiers. In Proceedings of the 11th Conference on Theoretical and Methodologi- cal Issues in Machine Translation (TMI).", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Word Error Rates: Decomposition over POS classes and Applications for Error Analysis", |
| "authors": [ |
| { |
| "first": "Maja", |
| "middle": [], |
| "last": "Popovic", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the Second Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "48--55", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maja Popovic and Hermann Ney. 2007. Word Error Rates: Decomposition over POS classes and Applica- tions for Error Analysis. In Proceedings of the Second Workshop on Statistical Machine Translation, pages 48-55, Prague, Czech Republic, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Dependency Treelet Translation: Syntactically Informed Phrasal SMT", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Quirk", |
| "suffix": "" |
| }, |
| { |
| "first": "Arul", |
| "middle": [], |
| "last": "Menezes", |
| "suffix": "" |
| }, |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Cherry", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "271--279", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Quirk, Arul Menezes, and Colin Cherry. 2005. De- pendency Treelet Translation: Syntactically Informed Phrasal SMT. In Proceedings of the 43rd Annual Meeting of the Association for Computational Linguis- tics (ACL), pages 271-279.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Training a Sentence-Level Machine Translation Confidence Metric", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Quirk", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 4th International Conference on Language Resources and Evaluation (LREC)", |
| "volume": "", |
| "issue": "", |
| "pages": "825--828", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Quirk. 2004. Training a Sentence-Level Machine Translation Confidence Metric. In Proceedings of the 4th International Conference on Language Resources and Evaluation (LREC), pages 825-828.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "The Naming of Things and the Confusion of Tongues: an MT Metric", |
| "authors": [ |
| { |
| "first": "Florence", |
| "middle": [], |
| "last": "Reeder", |
| "suffix": "" |
| }, |
| { |
| "first": "Keith", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [], |
| "last": "Doyon", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "White", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the Workshop on MT Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "55--59", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Florence Reeder, Keith Miller, Jennifer Doyon, and John White. 2001. The Naming of Things and the Confu- sion of Tongues: an MT Metric. In Proceedings of the Workshop on MT Evaluation \"Who did what to whom?\" at Machine Translation Summit VIII, pages 55-59.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "A Study of Translation Edit Rate with Targeted Human Annotation", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Snover", |
| "suffix": "" |
| }, |
| { |
| "first": "Bonnie", |
| "middle": [], |
| "last": "Dorr", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Linnea", |
| "middle": [], |
| "last": "Micciulla", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Makhoul", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas (AMTA)", |
| "volume": "", |
| "issue": "", |
| "pages": "223--231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Snover, Bonnie Dorr, Richard Schwartz, Lin- nea Micciulla, and John Makhoul. 2006. A Study of Translation Edit Rate with Targeted Human Anno- tation. In Proceedings of the 7th Conference of the Association for Machine Translation in the Americas (AMTA), pages 223-231.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Accelerated DP based Search for Statistical Translation", |
| "authors": [ |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "Tillmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Vogel", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Zubiaga", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Sawaf", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Proceedings of European Conference on Speech Communication and Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christoph Tillmann, Stefan Vogel, Hermann Ney, A. Zu- biaga, and H. Sawaf. 1997. Accelerated DP based Search for Statistical Translation. In Proceedings of European Conference on Speech Communication and Technology.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Summarization evaluation using transformed basic elements", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Tratz", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of TAC-08", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Tratz and Eduard Hovy. 2008. Summarization evaluation using transformed basic elements. In In Proceedings of TAC-08. Gaithersburg, Maryland.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Evaluation of machine translation and its evaluation", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Turian", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [ |
| "Dan" |
| ], |
| "last": "Melamed", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of MT Summit IX", |
| "volume": "", |
| "issue": "", |
| "pages": "386--393", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph Turian, Luke Shen, and I. Dan Melamed. 2003a. Evaluation of machine translation and its evaluation. In In Proceedings of MT Summit IX, pages 386-393.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Evaluation of Machine Translation and its Evaluation", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [ |
| "P" |
| ], |
| "last": "Turian", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [ |
| "Dan" |
| ], |
| "last": "Melamed", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of MT SUMMIT IX", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph P. Turian, Luke Shen, and I. Dan Melamed. 2003b. Evaluation of Machine Translation and its Evaluation. In Proceedings of MT SUMMIT IX.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Additive reliability for metric pairs.", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Heterogeneity vs. reliability in MT test suites.\u00ac(g = s = s = g) \u2192 g = s \u2228 g = s", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Heterogeneity vs. reliability in summarization test suites.", |
| "uris": null |
| }, |
| "FIGREF3": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Heterogeneity of lexical measures vs. lexical and linguistic measures.", |
| "uris": null |
| }, |
| "FIGREF4": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Heterogeneity vs. reliability Gain for MT test suites.", |
| "uris": null |
| }, |
| "FIGREF5": { |
| "num": null, |
| "type_str": "figure", |
| "text": "Heterogeneity vs. reliability Gain for MT test suites.", |
| "uris": null |
| }, |
| "TABREF0": { |
| "text": "Description of the test beds from 2004 and 2005 NIST MT evaluation campaigns used in the experiments throughout the paper.", |
| "content": "<table><tr><td>DUC 2005 DUC 2006</td></tr></table>", |
| "html": null, |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF1": { |
| "text": "", |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table", |
| "num": null |
| } |
| } |
| } |
| } |