| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:42:22.718495Z" |
| }, |
| "title": "Learning to Evaluate Translation Beyond English BLEURT Submissions to the WMT Metrics 2020 Shared Task", |
| "authors": [ |
| { |
| "first": "Thibault", |
| "middle": [], |
| "last": "Sellam", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "tsellam@google.com" |
| }, |
| { |
| "first": "Amy", |
| "middle": [], |
| "last": "Pu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "puamy@google.com" |
| }, |
| { |
| "first": "Hyung", |
| "middle": [], |
| "last": "Won", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Gehrmann", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "gehrmann@google.com" |
| }, |
| { |
| "first": "Qijun", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "qijuntan@google.com" |
| }, |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "Freitag", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "freitag@google.com" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "dipanjand@google.com" |
| }, |
| { |
| "first": "Ankur", |
| "middle": [ |
| "P" |
| ], |
| "last": "Parikh", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "aparikh@google.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The quality of machine translation systems has dramatically improved over the last decade, and as a result, evaluation has become an increasingly challenging problem. This paper describes our contribution to the WMT 2020 Metrics Shared Task, the main benchmark for automatic evaluation of translation. We make several submissions based on BLEURT, a previously published metric which uses transfer learning. We extend the metric beyond English and evaluate it on 14 language pairs for which fine-tuning data is available, as well as 4 \"zero-shot\" language pairs, for which we have no labelled examples. Additionally, we focus on English to German and demonstrate how to combine BLEURT's predictions with those of YISI and use alternative reference translations to enhance the performance. Empirical results show that the models achieve competitive results on the WMT Metrics 2019 Shared Task, indicating their promise for the 2020 edition.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The quality of machine translation systems has dramatically improved over the last decade, and as a result, evaluation has become an increasingly challenging problem. This paper describes our contribution to the WMT 2020 Metrics Shared Task, the main benchmark for automatic evaluation of translation. We make several submissions based on BLEURT, a previously published metric which uses transfer learning. We extend the metric beyond English and evaluate it on 14 language pairs for which fine-tuning data is available, as well as 4 \"zero-shot\" language pairs, for which we have no labelled examples. Additionally, we focus on English to German and demonstrate how to combine BLEURT's predictions with those of YISI and use alternative reference translations to enhance the performance. Empirical results show that the models achieve competitive results on the WMT Metrics 2019 Shared Task, indicating their promise for the 2020 edition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The recent progress in machine translation models has led researchers to question the use of ngram overlap metrics such as BLEU, which focus solely on surface-level aspects of the generated text, and thus may correlate poorly with human evaluation (Papineni et al., 2002; Lin, 2004; Ma et al., 2019; Mathur et al., 2020; Belz and Reiter, 2006; Callison-Burch et al., 2006) . This has led to a surge of interest for more flexible metrics that use machine learning to capture semantic-level information (Celikyilmaz et al., 2020) . Popular examples of such metrics include YISI-1 (Lo, 2019) , ESIM (Mathur et al., 2019) , BERTSCORE (Zhang et al., 2020) , the Sentence Mover's Similarity (Zhao et al., 2019; Clark et al., 2019) , and BLEURT (Sellam et al., 2020) . These metrics utilize contextual embeddings from large models such as BERT (Devlin et al., 2019) which have been shown to capture linguistic information beyond surface-level aspects (Tenney et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 248, |
| "end": 271, |
| "text": "(Papineni et al., 2002;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 272, |
| "end": 282, |
| "text": "Lin, 2004;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 283, |
| "end": 299, |
| "text": "Ma et al., 2019;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 300, |
| "end": 320, |
| "text": "Mathur et al., 2020;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 321, |
| "end": 343, |
| "text": "Belz and Reiter, 2006;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 344, |
| "end": 372, |
| "text": "Callison-Burch et al., 2006)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 501, |
| "end": 527, |
| "text": "(Celikyilmaz et al., 2020)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 578, |
| "end": 588, |
| "text": "(Lo, 2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 596, |
| "end": 617, |
| "text": "(Mathur et al., 2019)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 630, |
| "end": 650, |
| "text": "(Zhang et al., 2020)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 685, |
| "end": 704, |
| "text": "(Zhao et al., 2019;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 705, |
| "end": 724, |
| "text": "Clark et al., 2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 738, |
| "end": 759, |
| "text": "(Sellam et al., 2020)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 837, |
| "end": 858, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 944, |
| "end": 965, |
| "text": "(Tenney et al., 2019)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The WMT Metrics 2020 Shared Task is the reference benchmark for evaluating these metrics in the context of machine translation. It tests the evaluation of systems that are to-English (X \u2192 En) and to other languages (X \u2192 Y), which requires a multilingual approach. An additional challenge for learned metrics is that human ratings are not available for all language pairs, and therefore, the models must use unlabeled data and perform zero-shot generalization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We describe several learned metrics based on BLEURT (Sellam et al., 2020) , originally developed for English data. We first extend BLEURT to the multilingual setup, and show that our approach achieves competitive results on the WMT Metrics 2019 Shared Task. 1 We also present several simple BERT-based baselines, which we submit for analysis. Finally, we focus on English to German and enhance BLEURT's performance by combining its predictions with those of YISI (Lo, 2019) as well as by using alternative references.", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 73, |
| "text": "(Sellam et al., 2020)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 258, |
| "end": 259, |
| "text": "1", |
| "ref_id": null |
| }, |
| { |
| "start": 463, |
| "end": 473, |
| "text": "(Lo, 2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Task Reference-based NLG evaluation seeks to assign a score to a triplet of sentences (input, reference, candidate) , where input is a sentence in the source language, reference is a reference translation kept secret at inference time, and candidate is a translation produced by an MT system. Similar to BLEU (Papineni et al., 2002) and the previous editions of the WMT Metrics shared task, we omit the input and treat the task as a regression problem : we aim to learn a function f : (x,x) \u2192 y that predicts a quality score y for a candidate sentencex = (x 1 , ..,x p ) given a reference sentence x = (x 1 , .., x q ). The function is supervised on a corpus of N human ratings", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 115, |
| "text": "(input, reference, candidate)", |
| "ref_id": null |
| }, |
| { |
| "start": 309, |
| "end": 332, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Notations", |
| "sec_num": "2" |
| }, |
| { |
| "text": "{(x i ,x i , y i )} N n=1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Notations", |
| "sec_num": "2" |
| }, |
| { |
| "text": "BLEURT Most experiments presented in this paper are based on BLEURT, a metric that leverages transfer learning to achieve high accuracy and increase robustness (Sellam et al., 2020) . BLEURT is a BERT-based regression model (Devlin et al., 2019) . It embeds sentence pairs into a fixed-width vector v BERT = BERT(x,x) with a pre-trained Transformer, and feeds this vector to a linear layer:", |
| "cite_spans": [ |
| { |
| "start": 160, |
| "end": 181, |
| "text": "(Sellam et al., 2020)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 224, |
| "end": 245, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Notations", |
| "sec_num": "2" |
| }, |
| { |
| "text": "y = f (x,x) = W v BERT + b", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Notations", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where W and b are the weight matrix and bias vector respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Notations", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In its original (English) version, BLEURT is trained in three stages. (1) It is initialized from a publicly available BERT checkpoint. (2) The model is then \"warmed up\" by exposing it to millions of sentence pairs (x,x), obtained by randomly perturbing sentences from Wikipedia. During this phase, the model learns to predict a wide range of similarity scores that include existing metrics (BERTSCORE, BLEU, ROUGE), scores from an entailment model, and the likelihood thatx was generated from x with a roundtrip translation by a given translation model. We denote this stage as mid-training. (3) In the final stage, the model is fine-tuned on human ratings from WMT Metrics (Bojar et al., 2017; Ma et al., 2018 Ma et al., , 2019 , using a regression loss", |
| "cite_spans": [ |
| { |
| "start": 674, |
| "end": 694, |
| "text": "(Bojar et al., 2017;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 695, |
| "end": 710, |
| "text": "Ma et al., 2018", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 711, |
| "end": 728, |
| "text": "Ma et al., , 2019", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Notations", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2113 supervised = 1 N N n=1 y i \u2212\u0177 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Notations", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We found that English BLEURT achieved competitive performance on four academic datasets, WebNLG (Gardent et al., 2017) , and the WMT Metrics Shared Task years 2017 to 2019.", |
| "cite_spans": [ |
| { |
| "start": 96, |
| "end": 118, |
| "text": "(Gardent et al., 2017)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Notations", |
| "sec_num": "2" |
| }, |
| { |
| "text": "3 Extending BLEURT Beyond English", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Notations", |
| "sec_num": "2" |
| }, |
| { |
| "text": "An approach to extend BLEURT would be to use MBERT, the public version of BERT pre-trained on 104 languages, and \"mid-train\" with non-English signals as described above. Yet, the evidence we gathered from early experiments were inconclusive. On the other hand, we did observe that models trained on several languages were often more accurate than monolingual models, possibly due to the larger amount of fine-tuning data. Thus, we opted for a simpler approach where we start with a multilingual BERT model and finetune it on all the human ratings data available for all languages (X \u2192 Y and X \u2192 En). In most cases, we found that such models could perform zero-shot evaluation: if a language Y does not have human ratings data, the metric can still perform evaluation in this target language as long as the base multilingual BERT model contains unlabeled data for Y, as observed in the past literature (Karthikeyan et al., 2019; Pires et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 901, |
| "end": 927, |
| "text": "(Karthikeyan et al., 2019;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 928, |
| "end": 947, |
| "text": "Pires et al., 2019)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modeling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We experiment with two pre-trained multilingual models: MBERT and MBERT-WMT, a custom multilingual variant of BERT. The MBERT-WMT model is larger that MBERT (24 Transformer layers instead of 12), and it was pre-trained on 19 languages of the WMT Metrics shared task 2015 to 2020.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modeling", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We trained MBERT-WMT model with an MLM loss (Devlin et al., 2019) , using a combination of public datasets: Wikipedia, the WMT 2019 News Crawl (Barrault et al.) , the C4 variant of Common Crawl (Raffel et al., 2020) , OPUS (Tiedemann, 2012), Nunavut Hansard (Joanis et al., 2020) , WikiTitles 2 , and ParaCrawl (Espl\u00e0-Gomis et al., 2019). We trained a new WordPiece vocabulary (Schuster and Nakajima, 2012; Wu et al., 2016) , since the original vocabulary of mBERT does not support the alphabets of Pashto, Khmer and Inuktitut. The model was trained for 1 million steps with the LAMB optimizer (You et al., 2020) , using the learning rate 0.0018 and batch size 4096 on 64 TPU v3 chips.", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 65, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 143, |
| "end": 160, |
| "text": "(Barrault et al.)", |
| "ref_id": null |
| }, |
| { |
| "start": 194, |
| "end": 215, |
| "text": "(Raffel et al., 2020)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 258, |
| "end": 279, |
| "text": "(Joanis et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 377, |
| "end": 406, |
| "text": "(Schuster and Nakajima, 2012;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 407, |
| "end": 423, |
| "text": "Wu et al., 2016)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 594, |
| "end": 612, |
| "text": "(You et al., 2020)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Details of MBERT-WMT pre-training", |
| "sec_num": null |
| }, |
| { |
| "text": "Datasets At the time of writing, no human ratings data is available for WMT Metrics 2020. Therefore, we use the human ratings from WMT Metrics years 2015 to 2019 for both training and evaluation. We do so in two stages. In the first stage, we use 2015 to 2018 for training (216,541 sentence pairs in 8 languages), setting 10% aside for early stopping. We use 2019 as a development set, to choose hyper-parameters and to support high-level modeling decisions. In the second stage, we use 2015 to 2019, that is, all the data available, for training and uniformly sample 10% of the data for early stopping and hyper-parameter tuning. This adds 289,895 sentence pairs and 4 additional languages to our training set, approximately doubling the size of the training data. We report our results on the first setup, but submit our predictions to the shared task using the second setup.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Hyper-parameters We run grid search on the learning rate and export the best model, using values {5e-6, 8e-6, 9e-6, 1e-5, 2e-5, 3e-5}. We use batch size 32 and evaluate the model every 1,000 steps on a 10% held-out data set to prevent over-fitting. During preliminary experiments, we additionally experimented with the batch size, dropout rate, frequency of continuous evaluation, balance of languages, pre-training schemes, Word-Piece vocabularies, and model architecture.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "English BLEURT We fine-tune a new BLEURT checkpoint, following the methodology described above. The main difference with Sellam et al. (2020) is that we incorporate the to-English ratings of year 2019, which were not previously available.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additional Models and Baselines", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Monolingual baselines based on BERT We experiment with three baselines and submit the results to the WMT Metrics Shared Task for analysis. BERT-L2-BASE and BERT-L2-LARGE are two regression models based on BERT and trained on to-English ratings. We use the same setup as English BLEURT, but we omit the mid-training phase. A similar approach was described in Shimanaka et al. (2019) . BERT-CHINESE-L2 is similar to BERT-L2-BASE, but it uses BERT-CHINESE and it is fine-tuned on to-Chinese ratings.", |
| "cite_spans": [ |
| { |
| "start": 358, |
| "end": 381, |
| "text": "Shimanaka et al. (2019)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additional Models and Baselines", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Other Systems We compare our setups to other state-of-the-art learned metrics: BERTSCORE (Zhang et al., 2020) , and Yisi (Lo, 2019) all apply rules on top of BERT embeddings while ESIM (Mathur et al., 2019 ) is a neural sentence similarity model. PRISM (Thompson and Post, 2020) trains a multilingual translation model that is used as a zero-shot paraphrasing system. All the aforementioned systems take sentences pairs as input. Concurrent work has investigated incorporating the source with great success (Rei et al., 2020) . We leave this line of research for future work.", |
| "cite_spans": [ |
| { |
| "start": 89, |
| "end": 109, |
| "text": "(Zhang et al., 2020)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 121, |
| "end": 131, |
| "text": "(Lo, 2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 185, |
| "end": 205, |
| "text": "(Mathur et al., 2019", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 253, |
| "end": 278, |
| "text": "(Thompson and Post, 2020)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 507, |
| "end": 525, |
| "text": "(Rei et al., 2020)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additional Models and Baselines", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Tables 1 and 2 show the results in the X \u2192 En direction, at the segment-and system-level respectively. In the majority of cases, one of the BLEURT configurations yields the strongest results. The original BLEURT metric seems to perform better at the segment-level. At the system-level it may be dominated by PRISM (3 out of 7 language pairs) or by one of the simpler BERT-based models (4 out of 7 language pairs).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Tables 3 and 4 present the results for the other languages. MBERT-WMT yields solid results at the segment-level (it achieves the highest correlations for 7 out of 11 language pairs), in particular for the \"zero-shot\" setups, En \u2192 Gu, En \u2192 Kk, and En \u2192 Lt. It outperforms MBERT consistently, except for En \u2192 Ru and En \u2192 Zh where it lags behind the other metrics. The results are consistent at the system-level.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Based on these results, we make two \"competitive\" submissions. We present BLEURT as described above, which we ran on all the X \u2192 En sentence pairs. Additionally, we submitted a multilingual system that combines MBERT-WMT (for all languages except Chinese) and BERT-CHINESE-L2 (for Chinese). We ran the multilingual system for all language pairs including to-English, as the large amount of non-English fine-tuning data made available in 2019 may benefit this setup too. We also release the predictions of BERT-BASE-L2, BERT-LARGE-L2, and MBERT for analysis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Strategy for the WMT Metrics Shared Task", |
| "sec_num": null |
| }, |
| { |
| "text": "For English\u2192German, the organizers of WMT20 provide three different reference translations: two standard references and one additional paraphrased reference. Given this novel setup, we investigate how to combine our predictions. Moreover, we use a similar framework to ensemble the predictions of different metrics. In particular, we average the predictions of BLEURT, YISI-1 and YISI-2. All three metrics are different in their approaches. While BLEURT and YISI-1 are reference-based metrics, YISI-2 is reference- free and calculates its score by comparing translations only to the source sentence. BLEURT is fine-tuned on previous human ratings, while YISI-1 is based on the cosine similarity between BERT embeddings of the reference and the candidate. In the remainder of this section, we report BLEURT results using the MBERT-WMT setup unless specified otherwise. 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Additional Improvements on English\u2192German", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Before combining BLEURT and YISI, we perform a series of modifications to YISI-1 and evaluate their impact on English\u2192German.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modifications to YiSi-1", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Experimental Setup All experimental results are summarized in Table 5 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 62, |
| "end": 69, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Modifications to YiSi-1", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We report both segment-level (DARR) and system-level (Kendall \u03c4 ) correlations. To replicate the multireference setup of 2020, we compute correlations 3 We use a different checkpoint from the one described in Section 4. The model was trained for 880K steps instead of 1 million, and it uses a sequence length of 256 tokens instead of 128.", |
| "cite_spans": [ |
| { |
| "start": 151, |
| "end": 152, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modifications to YiSi-1", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "with the standard WMT references as well as the paraphrased reference from Freitag et al. (2020) . Improving YiSi's Predictions Our baseline is similar to the YISI-1 submission from WMT 2019 (Lo, 2019): we run YISI-1 with the public multilingual MBERT checkpoint. We then experiment with the underlying checkpoint. We continued pre-training MBERT on the in-domain German NewsCrawl dataset. The resulting model +pre-train NewsCrawl layer 9 increases the correlation for both reference translations. We improve the correlation further on the paraphrased reference by using the 8th instead of the 9th layer.", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 96, |
| "text": "Freitag et al. (2020)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modifications to YiSi-1", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Other experiments We tried pre-training BERT on forward translated sentences from German NewsCrawl, to adapt the word embeddings to MT outputs. We also trained a BERT model from scratch on the German NewsCrawl data. These experiments did not result in higher correlations with human ratings. Table 5 : Agreement with human ratings on the WMT19 Metrics Shared Task for English\u2192German. The first set of results are generated by using the standard reference translations for WMT 2019. The second set of results is generated by using the paraphrased reference translations. YiSi-2 is reference free and only uses the source sentences.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 292, |
| "end": 299, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Modifications to YiSi-1", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We describe our two submissions to WMT 2020, YISI-COMB and ALL-COMB, which result from our efforts to use multiple references for automatic evaluation. YISI-COMB is a multi-reference version of the YISI score (Lo, 2019) aimed at achieving better system-level correlations. ALL-COMB leverages metrics from BLEURT, YISI-1, and YISI-2 on multiple references to achieve better segment-level correlation.", |
| "cite_spans": [ |
| { |
| "start": 209, |
| "end": 219, |
| "text": "(Lo, 2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Combining BLEURT, YISI-1 and YISI-2 on Multiple References", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "YISI-COMB YISI scores are F 1 scores of YISI precision and YISI recall. For the YISI-COMB submission, we take the minimum of the YISI recalls for the three different references as the multireference recall, and the maximum of the YISI precision as the multi-reference precision. Using the same notations as in (Lo, 2019) , the final score is the F 1 of the recall and precision computed with \u03b1 = 0.7 (see Figure 1 ). This submission aims to maximize the system-level correlation.", |
| "cite_spans": [ |
| { |
| "start": 310, |
| "end": 320, |
| "text": "(Lo, 2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 405, |
| "end": 413, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Combining BLEURT, YISI-1 and YISI-2 on Multiple References", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "As shown in Table 5 , YISI-1 has the highest system-level correlation on paraphrased references. Given that we used \u03b1 = 0.7, YISI scores are quite similar to YISI recalls (when \u03b1 = 1.0, YISI scores are equal to YISI recalls). YISI-1 scores for paraphrased references are usually much lower than those of standard references, therefore taking the minimum recall is oftentimes equivalent to taking the YISI recall from the paraphrased references. Furthermore, we found that using the maximum precision, in combination with aggregating recalls, usually performs the best. Correlations with respect to different \u03b1 settings for Yisi-1. The system-level correlation is highest when \u03b1 = 0.7, which is the \u03b1 we use for the submission.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Combining BLEURT, YISI-1 and YISI-2 on Multiple References", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We combined the predictions of YISI-1 with those of BLEURT and YISI-2. YISI-2 usually performs worse than the reference-based metrics, but we found that incorporating its predictions can help. Having three different metrics (BLEURT, YISI-1, YISI-2) and three different reference translations, we take all seven predictions and average the scores for each segment. The combined prediction ALL-COMB outperforms every single metric at the segment level, though the system-level correlation drops in comparison to the best YISI-1 score on paraphrased references. This submission aims to maximize the segmentlevel correlation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ALL-COMB", |
| "sec_num": null |
| }, |
| { |
| "text": "We submit the following systems to the WMT Metrics shared task:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 BLEURT as previously published, fine-tuned on the human ratings of the WMT Metrics shared task 2015 to 2019, to-English.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 A multi-lingual extensions of BLEURT based on a 20 languages variant of MBERT and BERT-CHINESE.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 Three baseline systems based on BERT-BASE, BERT-LARGE, and MBERT.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 Two combination methods for English to German that use YiSi and alternative references, YISI-COMB and ALL-COMB.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://linguatools.org/tools/ corpora/wikipedia-parallel-titles-corpora/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": " 1 We use the following languages for fine-tuning and/or testing: Chinese, Czech, German, English, Estonian, Finnish, French, Gujarati, Kazakh, Lithuanian, Russian, and Turkish. In addition, we also pre-train on Inuktitut, Japanese, Khmer, Pastho, Polish, Romanian, and Tamil.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "acknowledgement", |
| "sec_num": null |
| }, |
| { |
| "text": "Thanks to Xavier Garcia and Ran Tian for advice and proof-reading.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "7" |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Shervin Malmasi, et al. Findings of the 2019 conference on machine translation", |
| "authors": [ |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Marta", |
| "middle": [ |
| "R" |
| ], |
| "last": "Costa-Juss\u00e0", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Federmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Fishel", |
| "suffix": "" |
| }, |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Huck", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of WMT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lo\u00efc Barrault, Ond\u0159ej Bojar, Marta R Costa-Juss\u00e0, Christian Federmann, Mark Fishel, Yvette Gra- ham, Barry Haddow, Matthias Huck, Philipp Koehn, Shervin Malmasi, et al. Findings of the 2019 con- ference on machine translation. In Proceedings of WMT.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Comparing automatic and human evaluation of nlg systems", |
| "authors": [ |
| { |
| "first": "Anja", |
| "middle": [], |
| "last": "Belz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ehud", |
| "middle": [], |
| "last": "Reiter", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anja Belz and Ehud Reiter. 2006. Comparing auto- matic and human evaluation of nlg systems. In Pro- ceedings of EACL.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Results of the wmt17 metrics shared task", |
| "authors": [ |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| }, |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Kamran", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of WMT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ond\u0159ej Bojar, Yvette Graham, and Amir Kamran. 2017. Results of the wmt17 metrics shared task. In Proceedings of WMT.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Re-evaluation the role of bleu in machine translation research", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Miles", |
| "middle": [], |
| "last": "Osborne", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Callison-Burch, Miles Osborne, and Philipp Koehn. 2006. Re-evaluation the role of bleu in ma- chine translation research. In Proceedings of EACL.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Evaluation of text generation: A survey", |
| "authors": [ |
| { |
| "first": "Asli", |
| "middle": [], |
| "last": "Celikyilmaz", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Asli Celikyilmaz, Elizabeth Clark, and Jianfeng Gao. 2020. Evaluation of text generation: A survey. arXiv.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Sentence mover's similarity: Automatic evaluation for multi-sentence texts", |
| "authors": [ |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Asli", |
| "middle": [], |
| "last": "Celikyilmaz", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elizabeth Clark, Asli Celikyilmaz, and Noah A Smith. 2019. Sentence mover's similarity: Automatic eval- uation for multi-sentence texts. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of NAACL HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing. In Proceedings of NAACL HLT.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Paracrawl: Web-scale parallel corpora for the languages of the eu", |
| "authors": [ |
| { |
| "first": "Miquel", |
| "middle": [], |
| "last": "Espl\u00e0-Gomis", |
| "suffix": "" |
| }, |
| { |
| "first": "Mikel", |
| "middle": [ |
| "L" |
| ], |
| "last": "Forcada", |
| "suffix": "" |
| }, |
| { |
| "first": "Gema", |
| "middle": [], |
| "last": "Ram\u00edrez-S\u00e1nchez", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of Machine Translation Summit XVII", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Miquel Espl\u00e0-Gomis, Mikel L Forcada, Gema Ram\u00edrez-S\u00e1nchez, and Hieu Hoang. 2019. Paracrawl: Web-scale parallel corpora for the languages of the eu. In Proceedings of Machine Translation Summit XVII Volume 2: Translator, Project and User Tracks.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "BLEU might be Guilty but References are not Innocent", |
| "authors": [ |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "Freitag", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Isaac", |
| "middle": [], |
| "last": "Caswell", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Markus Freitag, David Grangier, and Isaac Caswell. 2020. BLEU might be Guilty but References are not Innocent. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "The webnlg challenge: Generating text from rdf data", |
| "authors": [ |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Gardent", |
| "suffix": "" |
| }, |
| { |
| "first": "Anastasia", |
| "middle": [], |
| "last": "Shimorina", |
| "suffix": "" |
| }, |
| { |
| "first": "Shashi", |
| "middle": [], |
| "last": "Narayan", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Perez-Beltrachini", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of INLG", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Claire Gardent, Anastasia Shimorina, Shashi Narayan, and Laura Perez-Beltrachini. 2017. The webnlg challenge: Generating text from rdf data. In Pro- ceedings of INLG.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Darlene Stewart, and Jeffrey Micher. 2020. The nunavut hansard inuktitut-english parallel corpus 3.0 with preliminary machine translation results", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Joanis", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Knowles", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Kuhn", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Larkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Littell", |
| "suffix": "" |
| }, |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric Joanis, Rebecca Knowles, Roland Kuhn, Samuel Larkin, Patrick Littell, Chi-kiu Lo, Darlene Stew- art, and Jeffrey Micher. 2020. The nunavut hansard inuktitut-english parallel corpus 3.0 with prelimi- nary machine translation results.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Cross-lingual ability of multilingual bert: An empirical study", |
| "authors": [ |
| { |
| "first": "Kaliyaperumal", |
| "middle": [], |
| "last": "Karthikeyan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Mayhew", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaliyaperumal Karthikeyan, Zihan Wang, Stephen Mayhew, and Dan Roth. 2019. Cross-lingual abil- ity of multilingual bert: An empirical study. In Pro- ceedings of ICLR.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Rouge: A package for automatic evaluation of summaries", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Workshop on Text Summarization Branches Out", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Workshop on Text Sum- marization Branches Out.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Yisi-a unified semantic mt quality evaluation and estimation metric for languages with different levels of available resources", |
| "authors": [ |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of WMT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chi-kiu Lo. 2019. Yisi-a unified semantic mt quality evaluation and estimation metric for languages with different levels of available resources. In Proceed- ings of WMT.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Results of the wmt18 metrics shared task: Both characters and embeddings achieve good performance", |
| "authors": [ |
| { |
| "first": "Qingsong", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of WMT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qingsong Ma, Ond\u0159ej Bojar, and Yvette Graham. 2018. Results of the wmt18 metrics shared task: Both char- acters and embeddings achieve good performance. In Proceedings of WMT.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Results of the wmt19 metrics shared task: Segment-level and strong mt systems pose big challenges", |
| "authors": [ |
| { |
| "first": "Qingsong", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Johnny", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of WMT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qingsong Ma, Johnny Wei, Ond\u0159ej Bojar, and Yvette Graham. 2019. Results of the wmt19 metrics shared task: Segment-level and strong mt systems pose big challenges. In Proceedings of WMT.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Tangled up in bleu: Reevaluating the evaluation of automatic machine translation evaluation metrics", |
| "authors": [ |
| { |
| "first": "Nitika", |
| "middle": [], |
| "last": "Mathur", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitika Mathur, Tim Baldwin, and Trevor Cohn. 2020. Tangled up in bleu: Reevaluating the evaluation of automatic machine translation evaluation metrics. Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Putting evaluation in context: Contextual embeddings improve machine translation evaluation", |
| "authors": [ |
| { |
| "first": "Nitika", |
| "middle": [], |
| "last": "Mathur", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitika Mathur, Timothy Baldwin, and Trevor Cohn. 2019. Putting evaluation in context: Contextual em- beddings improve machine translation evaluation. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "How multilingual is multilingual bert? arXiv", |
| "authors": [ |
| { |
| "first": "Telmo", |
| "middle": [], |
| "last": "Pires", |
| "suffix": "" |
| }, |
| { |
| "first": "Eva", |
| "middle": [], |
| "last": "Schlinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Garrette", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Telmo Pires, Eva Schlinger, and Dan Garrette. 2019. How multilingual is multilingual bert? arXiv.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
| "authors": [ |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Raffel", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharan", |
| "middle": [], |
| "last": "Narang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Matena", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanqi", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter J", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "In Journal of Machine Learning Research", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text trans- former. In Journal of Machine Learning Research.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Comet: A neural framework for mt evaluation", |
| "authors": [ |
| { |
| "first": "Ricardo", |
| "middle": [], |
| "last": "Rei", |
| "suffix": "" |
| }, |
| { |
| "first": "Craig", |
| "middle": [], |
| "last": "Stewart", |
| "suffix": "" |
| }, |
| { |
| "first": "Ana", |
| "middle": [ |
| "C" |
| ], |
| "last": "Farinha", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ricardo Rei, Craig Stewart, Ana C Farinha, and Alon Lavie. 2020. Comet: A neural framework for mt evaluation. arXiv.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Japanese and Korean voice search", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaisuke", |
| "middle": [], |
| "last": "Nakajima", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of ICASSP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Schuster and Kaisuke Nakajima. 2012. Japanese and Korean voice search. In Proceedings of ICASSP.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Bleurt: Learning robust metrics for text generation", |
| "authors": [ |
| { |
| "first": "Thibault", |
| "middle": [], |
| "last": "Sellam", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Ankur P", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thibault Sellam, Dipanjan Das, and Ankur P Parikh. 2020. Bleurt: Learning robust metrics for text gen- eration. Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Machine translation evaluation with bert regressor. arXiv", |
| "authors": [ |
| { |
| "first": "Hiroki", |
| "middle": [], |
| "last": "Shimanaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomoyuki", |
| "middle": [], |
| "last": "Kajiwara", |
| "suffix": "" |
| }, |
| { |
| "first": "Mamoru", |
| "middle": [], |
| "last": "Komachi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hiroki Shimanaka, Tomoyuki Kajiwara, and Mamoru Komachi. 2019. Machine translation evaluation with bert regressor. arXiv.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Bert rediscovers the classical nlp pipeline", |
| "authors": [ |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Tenney", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Ellie", |
| "middle": [], |
| "last": "Pavlick", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ian Tenney, Dipanjan Das, and Ellie Pavlick. 2019. Bert rediscovers the classical nlp pipeline. In Pro- ceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Automatic machine translation evaluation in many languages via zero-shot paraphrasing", |
| "authors": [ |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Thompson", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian Thompson and Matt Post. 2020. Automatic ma- chine translation evaluation in many languages via zero-shot paraphrasing. Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Parallel data, tools and interfaces in opus", |
| "authors": [ |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of The 8th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J\u00f6rg Tiedemann. 2012. Parallel data, tools and inter- faces in opus. In Proceedings of The 8th Language Resources and Evaluation Conference.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation. arXiv", |
| "authors": [ |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Norouzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Qin", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Klaus", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Klingner", |
| "suffix": "" |
| }, |
| { |
| "first": "Apurva", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "Melvin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaobing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Gouws", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshikiyo", |
| "middle": [], |
| "last": "Kato", |
| "suffix": "" |
| }, |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideto", |
| "middle": [], |
| "last": "Kazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Keith", |
| "middle": [], |
| "last": "Stevens", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Kurian", |
| "suffix": "" |
| }, |
| { |
| "first": "Nishant", |
| "middle": [], |
| "last": "Patil", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, \u0141ukasz Kaiser, Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2016. Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation. arXiv.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Large batch optimization for deep learning: Training bert in 76 minutes", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "You", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Sashank", |
| "middle": [], |
| "last": "Reddi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Hseu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjiv", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Srinadh", |
| "middle": [], |
| "last": "Bhojanapalli", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Demmel", |
| "suffix": "" |
| }, |
| { |
| "first": "Kurt", |
| "middle": [], |
| "last": "Keutzer", |
| "suffix": "" |
| }, |
| { |
| "first": "Cho-Jui", |
| "middle": [], |
| "last": "Hsieh", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. 2020. Large batch optimization for deep learning: Training bert in 76 minutes. In Proceedings of ICLR.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Bertscore: Evaluating text generation with bert", |
| "authors": [ |
| { |
| "first": "Tianyi", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Varsha", |
| "middle": [], |
| "last": "Kishore", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [], |
| "last": "Kilian", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Weinberger", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. 2020. Bertscore: Eval- uating text generation with bert. Proceedings of ICLR.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Moverscore: Text generation evaluating with contextualized embeddings and earth mover distance", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxime", |
| "middle": [], |
| "last": "Peyrard", |
| "suffix": "" |
| }, |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Christian", |
| "suffix": "" |
| }, |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Meyer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Eger", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Zhao, Maxime Peyrard, Fei Liu, Yang Gao, Chris- tian M Meyer, and Steffen Eger. 2019. Moverscore: Text generation evaluating with contextualized em- beddings and earth mover distance. Proceedings of EMNLP.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "text": "Figure 1: Correlations with respect to different \u03b1 settings for Yisi-1. The system-level correlation is highest when \u03b1 = 0.7, which is the \u03b1 we use for the submission.", |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td/><td>de-en</td><td>fi-en</td><td>gu-en</td><td>kk-en</td><td>lt-en</td><td>ru-en</td><td>zh-en</td><td>avg</td></tr><tr><td>YISI</td><td>0.949</td><td>0.989</td><td>0.924</td><td>0.994</td><td>0.981</td><td>0.979</td><td>0.979</td><td>0.971</td></tr><tr><td>YISI1-SRL</td><td>0.950</td><td>0.989</td><td>0.918</td><td>0.994</td><td>0.983</td><td>0.978</td><td>0.977</td><td>0.969</td></tr><tr><td>ESIM</td><td>0.941</td><td>0.971</td><td>0.885</td><td>0.986</td><td>0.989</td><td>0.968</td><td>0.988</td><td>0.961</td></tr><tr><td>BERTSCORE</td><td>0.949</td><td>0.987</td><td>0.981</td><td>0.980</td><td>0.962</td><td>0.921</td><td>0.983</td><td>0.966</td></tr><tr><td>PRISM</td><td>0.954</td><td>0.983</td><td>0.764</td><td>0.998</td><td>0.995</td><td>0.914</td><td>0.992</td><td>0.943</td></tr><tr><td>BLEURT Configurations, English-only</td><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>BERT-L2-BASE</td><td>0.938</td><td>0.992</td><td>0.930</td><td>0.992</td><td>0.991</td><td>0.976</td><td>0.997</td><td>0.974</td></tr><tr><td>BERT-L2-LARGE</td><td>0.940</td><td>0.987</td><td>0.819</td><td>0.992</td><td>0.990</td><td>0.985</td><td>0.993</td><td>0.958</td></tr><tr><td>BLEURT</td><td>0.943</td><td>0.989</td><td>0.865</td><td>0.996</td><td>0.995</td><td>0.984</td><td>0.990</td><td>0.966</td></tr><tr><td>BLEURT Configurations, Multi-lingual</td><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>MBERT</td><td>0.937</td><td>0.976</td><td>0.863</td><td>0.984</td><td>0.978</td><td>0.959</td><td>0.978</td><td>0.954</td></tr><tr><td>MBERT-WMT</td><td>0.950</td><td>0.991</td><td>0.815</td><td>0.989</td><td>0.992</td><td>0.968</td><td>0.980</td><td>0.955</td></tr></table>", |
| "text": "Segment-level agreement with human ratings on the WMT19 Metrics Shared Task on the to-English language pairs. The metric is WMT's Direct Assessment metric, a robust variant of Kendall \u03c4 . The scores for YISI, YISI1-SRL, and ESIM come fromMa et al. (2019). The scores for BERTSCORE and PRISM come fromThompson and Post (2020).", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "content": "<table/>", |
| "text": "System-level agreement with human ratings on the WMT19 Metrics Shared Task on the to-English language pairs. The metric is Pearson's correlation. The scores for YISI, YISI1-SRL, and ESIM come fromMa et al. (2019). The scores for BERTSCORE and PRISM come fromThompson and Post (2020).", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "content": "<table><tr><td/><td>en-cs</td><td>en-de</td><td>en-fi</td><td>en-gu</td><td>en-kk</td><td>en-lt</td><td>en-ru</td><td>en-zh</td><td>de-cs</td><td>de-fr</td><td>fr-de</td><td>avg</td></tr><tr><td>YISI1</td><td>0.962</td><td>0.991</td><td>0.971</td><td>0.909</td><td>0.985</td><td>0.963</td><td>0.992</td><td>0.951</td><td>0.973</td><td>0.969</td><td>0.908</td><td>0.961</td></tr><tr><td>YISI1-SRL</td><td>-</td><td>0.991</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>0.948</td><td>-</td><td>-</td><td>0.912</td><td>-</td></tr><tr><td>ESIM</td><td>-</td><td>0.991</td><td>0.957</td><td>-</td><td>0.980</td><td>0.989</td><td>0.989</td><td>0.931</td><td>0.980</td><td>0.950</td><td>0.942</td><td>-</td></tr><tr><td>BERTSCORE</td><td>0.981</td><td>0.990</td><td>0.970</td><td>0.922</td><td>0.981</td><td>0.978</td><td>0.989</td><td>0.925</td><td>0.969</td><td>0.971</td><td>0.899</td><td>0.961</td></tr><tr><td>PRISM</td><td>0.958</td><td>0.988</td><td>0.949</td><td>0.624</td><td>0.978</td><td>0.937</td><td>0.918</td><td>0.898</td><td>0.976</td><td>0.936</td><td>0.911</td><td>0.916</td></tr><tr><td>BLEURT Configurations</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>BERT-CHINESE-L2</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>0.953</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>MBERT</td><td>0.942</td><td>0.987</td><td>0.953</td><td>0.949</td><td>0.982</td><td>0.950</td><td>0.947</td><td>0.949</td><td>0.972</td><td>0.970</td><td>0.924</td><td>0.957</td></tr><tr><td>MBERT-WMT</td><td>0.993</td><td>0.991</td><td>0.987</td><td>0.959</td><td>0.993</td><td>0.989</td><td>0.888</td><td>0.953</td><td>0.986</td><td>0.988</td><td>0.962</td><td>0.972</td></tr></table>", |
| "text": "Segment-level agreement with human ratings on the WMT19 Metrics Shared Task on non-English language pairs. The metric is WMT's Direct Assessment metric, a robust variant of Kendall \u03c4 . Languages without fine-tuning data are denoted in italics. The scores for YISI, YISI1-SRL, and ESIM come fromMa et al. (2019). The scores for BERTSCORE and PRISM come fromThompson and Post (2020).", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td/><td/><td/><td/><td colspan=\"2\">sys-level seg-level</td></tr><tr><td>Ref</td><td>Metric</td><td>model</td><td/><td colspan=\"2\">Kendall \u03c4 DARR</td></tr><tr><td>std</td><td colspan=\"3\">BLEURT MBERT-WMT \u00b6</td><td>0.896</td><td>0.420</td></tr><tr><td/><td/><td colspan=\"2\">MBERT (WMT19 subm.)</td><td>0.810</td><td>0.351</td></tr><tr><td>std</td><td>YiSi-1</td><td colspan=\"2\">+pre-train NewsCrawl layer 9</td><td>0.870</td><td>0.373</td></tr><tr><td/><td/><td colspan=\"2\">+pre-train NewsCrawl layer 8 \u2020</td><td>0.853</td><td>0.376</td></tr><tr><td>para</td><td colspan=\"3\">BLEURT MBERT-WMT \u00b6</td><td>0.852</td><td>0.413</td></tr><tr><td/><td/><td colspan=\"2\">MBERT (WMT19 subm.)</td><td>0.844</td><td>0.316</td></tr><tr><td>para</td><td>YiSi-1</td><td colspan=\"2\">+pre-train NewsCrawl layer 9</td><td>0.887</td><td>0.365</td></tr><tr><td/><td/><td colspan=\"2\">+pre-train NewsCrawl layer 8 \u2020</td><td>0.896</td><td>0.373</td></tr><tr><td>src</td><td>YiSi-2</td><td>MBERT</td><td>\u00b6</td><td>0.307</td><td>0.106</td></tr><tr><td>2std+para</td><td colspan=\"3\">YiSi-comb comb of 3 ( \u2020 systems) all-comb avg of 7 ( \u2020 & \u00b6 systems)</td><td>0.905 0.878</td><td>0.399 0.454</td></tr></table>", |
| "text": "System-level agreement with human ratings on the WMT19 Metrics Shared Task on non-English language pairs. The metric is Pearson's correlation. Languages without finetuning data are denoted in italics. The scores for YISI, YISI1-SRL, and ESIM come fromMa et al. (2019). The scores for BERTSCORE and PRISM come fromThompson and Post (2020).", |
| "num": null, |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |