| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:42:40.075309Z" |
| }, |
| "title": "Machine Translation Reference-less Evaluation using YiSi-2 with Bilingual Mappings of Massive Multilingual Language Model", |
| "authors": [ |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Multilingual Text Processing Digital Technologies Research Centre National Research Council Canada (NRC-CNRC", |
| "location": { |
| "addrLine": "1200 Montreal Road", |
| "postCode": "K1A 0R6", |
| "settlement": "Ottawa", |
| "region": "ON", |
| "country": "Canada" |
| } |
| }, |
| "email": "chikiu.lo@nrc-cnrc.gc.ca" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Larkin", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Multilingual Text Processing Digital Technologies Research Centre National Research Council Canada (NRC-CNRC", |
| "location": { |
| "addrLine": "1200 Montreal Road", |
| "postCode": "K1A 0R6", |
| "settlement": "Ottawa", |
| "region": "ON", |
| "country": "Canada" |
| } |
| }, |
| "email": "samuel.larkin@nrc-cnrc.gc.ca" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We present a study on using YiSi-2 with massive multilingual pretrained language models for machine translation (MT) reference-less evaluation. Aiming at finding better semantic representation for semantic MT evaluation, we first test YiSi-2 with contextual embeddings extracted from different layers of two different pretrained models, multilingual BERT and XLM-RoBERTa. We also experiment with learning bilingual mappings that transform the vector subspace of the source language to be closer to that of the target language in the pretrained model to obtain more accurate cross-lingual semantic similarity representations. Our results show that YiSi-2's correlation with human direct assessment on translation quality is greatly improved by replacing multilingual BERT with XLM-RoBERTa and projecting the source embeddings into the target embedding space using a cross-lingual linear projection (CLP) matrix learnt from a small development set.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We present a study on using YiSi-2 with massive multilingual pretrained language models for machine translation (MT) reference-less evaluation. Aiming at finding better semantic representation for semantic MT evaluation, we first test YiSi-2 with contextual embeddings extracted from different layers of two different pretrained models, multilingual BERT and XLM-RoBERTa. We also experiment with learning bilingual mappings that transform the vector subspace of the source language to be closer to that of the target language in the pretrained model to obtain more accurate cross-lingual semantic similarity representations. Our results show that YiSi-2's correlation with human direct assessment on translation quality is greatly improved by replacing multilingual BERT with XLM-RoBERTa and projecting the source embeddings into the target embedding space using a cross-lingual linear projection (CLP) matrix learnt from a small development set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The machine translation quality estimation as a metric (QE as a metric) task was first introduced in WMT 2019 (Ma et al., 2019; Fonseca et al., 2019) to encourage the exploration of reference-less evaluation metrics. QE as a metric task shifts the use case of the QE systems from assisting professional translators to estimate post-editing efforts to assisting MT developers or general MT users to discriminate the translation quality of different MT systems without the presence of a human reference translation. YiSi-2, the reference-less variants of the YiSi metric (Lo, 2019) , was the only metric who participated in evaluating all the translation directions in WMT 2019 QE as a metric shared task.", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 127, |
| "text": "(Ma et al., 2019;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 128, |
| "end": 149, |
| "text": "Fonseca et al., 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 569, |
| "end": 579, |
| "text": "(Lo, 2019)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The QE as a metric task is very similar to Task 1 (Sentence-level direct assessment) of WMT20's quality estimation shared task where metric performance is evaluated in terms of correlation at the sentence-level with human direct assessment scores on translation quality. The subtle but crucial difference between the WMT20 QE Task 1 and the QE as a metric task is that QE systems for the former task is trained specifically to estimate the quality of a single MT system whereas QE metrics for the latter task is generalized for multiple machine translation systems. The QE systems for WMT20's QE Task 1 have access to the MT system that generate the translations while the referenceless metrics for the latter task have no information on the MT systems being evaluated.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In WMT 2019 metrics shared task, pretrained multilingual BERT (Devlin et al., 2018) was used in YiSi for both MT reference-based (YiSi-1) and reference-less (YiSi-2) evaluation in all tested translation directions where monolingual pretrained BERT model was not available for the target language (such as Czech, German, etc.). Since then, another massive multilingual pretrained language model, XLM-RoBERTa (Conneau et al., 2020) , has been published. We evaluate the use of contextual embeddings extracted from each of the intermediate layers of the two models in MT reference-less evaluation.", |
| "cite_spans": [ |
| { |
| "start": 62, |
| "end": 83, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 407, |
| "end": 429, |
| "text": "(Conneau et al., 2020)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In addition, despite using the same pretrained embedding model of last year, YiSi-2 showed a significant performance degradation when comparing to YiSi-1. For example, segment-level correlation with human direct assessment for evaluating En-glish\u2192Czech drops from 0.475 (YiSi-1) to 0.069 (YiSi-2). This shows that the cross-lingual semantic representation in pretrained multilingual BERT is not as accurate as the monolingual semantic representation for each language. In other words, we observed the language clustering effect where a clear segregation of vector subspace among different languages in the multilingual contextual em-bedding model. Inspired by Zhao et al. (2020) , we employ a weakly-supervised bilingual mapping learnt from a small development set that transforms the contextual embeddings of the source sentence to the target subspace for better cross-lingual semantic similarity evaluation.", |
| "cite_spans": [ |
| { |
| "start": 660, |
| "end": 678, |
| "text": "Zhao et al. (2020)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we show that YiSi-2's correlation with human direct assessment on translation quality is greatly improved by replacing multilingual BERT with XLM-RoBERTa large using the optimal intermediate layer (7 th layer count from the last) and projecting the source embeddings into the target embedding space using a cross-lingual linear projection matrix learnt from a small development set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "YiSi (Lo, 2019 ) is a unified semantic MT quality evaluation and estimation metric for languages with different levels of available resources. YiSi-1 measures the similarity between a machine translation and human references by aggregating weighted distributional (lexical) semantic similarities, and optionally incorporating shallow semantic structures. Improvements in YiSi-1 for WMT 2020 metrics shared task is detailed in (Lo, 2020) .", |
| "cite_spans": [ |
| { |
| "start": 5, |
| "end": 14, |
| "text": "(Lo, 2019", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 426, |
| "end": 436, |
| "text": "(Lo, 2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "YiSi-2", |
| "sec_num": "2" |
| }, |
| { |
| "text": "YiSi-2 is the bilingual, reference-less version, which uses bilingual word embeddings to evaluate cross-lingual lexical semantic similarity between the input and MT output.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "YiSi-2", |
| "sec_num": "2" |
| }, |
| { |
| "text": "YiSi-2 relies on a cross-lingual language representation to evaluate the cross-lingual lexical semantic similarity. Previously, it used pretrained multilingual BERT (Devlin et al., 2018) for this purpose. BERT captures the sentence context in the embeddings, such that the embedding of the same subword unit in different sentences would be different from each other and be better represented in the embedding space. Since multilingual BERT is trained on the concatenation of nonparallel data from each language, the circular dependency deadlock between parallel resource and cross-lingual semantic similarity is broken (Lo and Simard, 2019) . Multilingual BERT covers the 104 largest languages in Wikipedia. XLM-RoBERTa (Conneau et al., 2020) (XLM-R) is also a massive multilingual pretrained language model. Similar to BERT, XLM-R is also trained with a masked language model task on the concatenation of non-parallel data. The differences between XLM-R and BERT are 1) XLM-R is trained on the CommonCrawl corpus which is significantly larger than the Wikipedia training data used by BERT; 2) instead of a uniform data sampling rate used in BERT, XLM-R uses a language sampling rate that is proportional to the amount of data available in the training set. Because of these differences, XLM-R performs better on low resource languages than multilingual BERT. XLM-R covers 100 languages. In this work, we use XLM-R large for the best performance on crosslingual semantic similarity.", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 186, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 619, |
| "end": 640, |
| "text": "(Lo and Simard, 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Massive Multilingual Pretrained Language Models", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "As suggested by Devlin et al. (2018) ; Peters et al. 2018; Zhang et al. 2020, we experimented using contextual embeddings extracted from different layers of the multilingual language encoder to find out the layer that best represents the semantic space of the language.", |
| "cite_spans": [ |
| { |
| "start": 16, |
| "end": 36, |
| "text": "Devlin et al. (2018)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Massive Multilingual Pretrained Language Models", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Since Inuktitut is neither covered by pretrained multilingual BERT nor XLM-RoBERTa, we trained our own Inuktitut-English XLM (Lample and Conneau, 2019) using the Nunavut Hansard 3.0 (NH) parallel corpus (Joanis et al., 2020) . The model was trained with masked language model and translation language model tasks. The Inuktitut-English XLM model has 12 layers with 8 heads and embedding size of 512.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 151, |
| "text": "(Lample and Conneau, 2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 203, |
| "end": 224, |
| "text": "(Joanis et al., 2020)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inuktitut-English Cross-lingual Language Model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In the WMT 2019 metrics shared task (Ma et al., 2019) , we saw a very significant performance degradation between YiSi-1 and YiSi-2. This shows that current multilingual language models construct a shared multilingual space in an unsupervised manner without any direct bilingual signal, in which representations of context in the same language are likely to cluster together in part of the subspace and there is a language segregation in the shared multilingual space. Inspired by Artetxe et al. (2016) and Zhao et al. (2020) , we obtain subword token pairs from the news translation task development set for each language (each contains around 1k to 3k sentence pairs) aligned by maximum alignment of their semantic similarities. We then train a cross-lingual linear projection (Zhao et al., 2020) that transforms the source embeddings into the target embeddings subspace. ", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 53, |
| "text": "(Ma et al., 2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 481, |
| "end": 502, |
| "text": "Artetxe et al. (2016)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 507, |
| "end": 525, |
| "text": "Zhao et al. (2020)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 779, |
| "end": 798, |
| "text": "(Zhao et al., 2020)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cross-lingual Linear Projection", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We use WMT 2019 metrics task evaluation set (Ma et al., 2019) for our development experiments. The official human judgments for translation quality of WMT 2019 were collected using reference-based direct assessment.", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 61, |
| "text": "(Ma et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Since we use exactly the same correlation analysis as the official metrics shared evaluation and the 2019 version of YiSi performed consistently well among participants in WMT 2019, we only compare our results with the 2019 version of YiSi and BLEU. Our results are directly comparable with those reported in Ma et al. (2019) . direct assessment on WMT 2019 zh-en, en-cs, en-de, en-fi, en-gu and en-kk news translation test set of YiSi-2 using contextual embeddings extracted from different layers of the multilingual pretrained language models. On the x-axis, layer \u2212n means YiSi-2 based on the embeddings of the n th layer, counting from the last, of XLM-RoBERTa large (blue circles), multilingual BERT (red triangles) and layer \u22127 of of XLM-RoBERTa large with source embeddings projected to target language space using CLP (blue star). Figure 4 : Segment-level Kendall's \u03c4 correlation with human direct assessment on WMT 2019 en-lt, en-ru, en-zh, de-cs, de-fr and fr-de news translation test set of YiSi-2 using contextual embeddings extracted from different layers of the multilingual pretrained language models. On the x-axis, layer \u2212n means YiSi-2 based on the embeddings of the n th layer, counting from the last, of XLM-RoBERTa large (blue circles), multilingual BERT (red triangles) and layer \u22127 of of XLM-RoBERTa large with source embeddings projected to target language space using CLP (blue star). input de fi gu kk lt ru zh en en en en en en en en output en en en en en en en cs de fi gu kk lt ru zh Reference-based evaluation metric YiSi-1 (2019) . .901 QE as a metric YiSi-2 (2020 ) .898 .959 .739 .981 .935 .461 .980 .773 .963 .906 .890 .977 .761 .473 .449 YiSi-2 (2019 ", |
| "cite_spans": [ |
| { |
| "start": 309, |
| "end": 325, |
| "text": "Ma et al. (2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1615, |
| "end": 1627, |
| "text": "YiSi-2 (2020", |
| "ref_id": null |
| }, |
| { |
| "start": 1628, |
| "end": 1717, |
| "text": ") .898 .959 .739 .981 .935 .461 .980 .773 .963 .906 .890 .977 .761 .473 .449 YiSi-2 (2019", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 839, |
| "end": 847, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 1410, |
| "end": 1578, |
| "text": "input de fi gu kk lt ru zh en en en en en en en en output en en en en en en en cs de fi gu kk lt ru zh Reference-based evaluation metric", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In Figure 1 , 2, 3 and 4, we plot the change of segment-level Kendall's \u03c4 correlation for YiSi-2 across different layers of XLM-R and multilingual BERT models. We identify a common trend, YiSi-2 using embeddings extracted from XLM-R significantly outperforms YiSi-2 using embeddings extracted from multilingual BERT. From figure 1, we see that, on average, on all translation directions, the optimal layer of representation in XLM-R for YiSi-2 is layer \u22127. Learning the cross-lingual linear projection matrix to transform the source embeddings into the target language subspace shows a greater improvement overall. This is our \"YiSi-2 (2020)\" submission to the QE as a metric task. Table 1 and 2 show the Kendall's \u03c4 correlation with the segment-level human direct assessment relative ranking on the WMT 2019 evaluation set. YiSi-2 (2020) shows consistent and significant improvements when comparing to the previous version of YiSi-2 across all translation directions.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 11, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 682, |
| "end": 689, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Segment-level correlation with human judgment", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Although YiSi-2 (2020) still performs worse than YiSi-1, YiSi-2 (2020) correlates better with human judgment than the reference-based metric, sentBLEU, and its performances are comparable to those of the character-based YiSi variant, YiSi-0, on evaluating translation quality for most of the translation directions. Table 3 and 4 show the Person's \u03c1 correlation with the system-level human direct assessment relative ranking on the WMT 2019 evaluation set. Similar to the segment-level results, although YiSi-2 (2020) still performs significantly worse than YiSi-1, we observe significant improvements, compared to the previous version of YiSi-2, consistently across all translation directions. We also show that by replacing the multilingual BERT with XLM-R and using bilingual mappings to better align the source and target language subspaces in XLM-R, YiSi-2 (2020) correlates better with human judgment than the reference-based metric, BLEU, on evaluating translation quality for most of the translation directions.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 316, |
| "end": 323, |
| "text": "Table 3", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Segment-level correlation with human judgment", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We have presented an improved version of YiSi-2 that uses XLM-RoBERTa and a cross-lingual linear projection of the source embedding to the target language subspace to better capture the semantic representation across languages. Our results show that YiSi-2 correlates better with human judgement on evaluating translation quality than BLEU for most of the evaluation conditions. This improved version of YiSi-2 is submitted to the WMT 2020 Metrics shared task QE as a metric track. For evaluating Inuktitut\u2194English where one of the language (Inuktitut) is not covered by XLM-R, we build our own XLM cross-lingual language model with the parallel training data. Potential research directions definitely include improving massive multilingual pretrained language model to close the performance gap between YiSi-1 and YiSi-2 and expanding the language coverage of these models in post-hoc and unsupervised manner.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "4" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Learning principled bilingual mappings of word embeddings while preserving monolingual invariance", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2289--2294", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1250" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2016. Learning principled bilingual mappings of word em- beddings while preserving monolingual invariance. In Proceedings of the 2016 Conference on Empiri- cal Methods in Natural Language Processing, pages 2289-2294, Austin, Texas. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Unsupervised cross-lingual representation learning at scale", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartikay", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Wenzek", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8440--8451", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.747" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Pro- ceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 8440- 8451, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "BERT: pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: pre-training of deep bidirectional transformers for language under- standing. CoRR, abs/1810.04805.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Findings of the WMT 2019 shared tasks on quality estimation", |
| "authors": [ |
| { |
| "first": "Erick", |
| "middle": [], |
| "last": "Fonseca", |
| "suffix": "" |
| }, |
| { |
| "first": "Lisa", |
| "middle": [], |
| "last": "Yankovskaya", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "T" |
| ], |
| "last": "Andr\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Martins", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Fishel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Federmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "3", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5401" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Erick Fonseca, Lisa Yankovskaya, Andr\u00e9 F. T. Martins, Mark Fishel, and Christian Federmann. 2019. Find- ings of the WMT 2019 shared tasks on quality es- timation. In Proceedings of the Fourth Conference on Machine Translation (Volume 3: Shared Task Pa- pers, Day 2), pages 1-10, Florence, Italy. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "The Nunavut Hansard Inuktitut-English parallel corpus 3.0 with preliminary machine translation results", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Joanis", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Knowles", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Kuhn", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Larkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Littell", |
| "suffix": "" |
| }, |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Darlene", |
| "middle": [], |
| "last": "Stewart", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Micher", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "2562--2572", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric Joanis, Rebecca Knowles, Roland Kuhn, Samuel Larkin, Patrick Littell, Chi-kiu Lo, Darlene Stewart, and Jeffrey Micher. 2020. The Nunavut Hansard Inuktitut-English parallel corpus 3.0 with prelimi- nary machine translation results. In Proceedings of The 12th Language Resources and Evaluation Con- ference, pages 2562-2572, Marseille, France. Euro- pean Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Crosslingual language model pretraining", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems (NeurIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample and Alexis Conneau. 2019. Cross- lingual language model pretraining. Advances in Neural Information Processing Systems (NeurIPS).", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "YiSi -a unified semantic MT quality evaluation and estimation metric for languages with different levels of available resources", |
| "authors": [ |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "507--513", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5358" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chi-kiu Lo. 2019. YiSi -a unified semantic MT quality evaluation and estimation metric for languages with different levels of available resources. In Proceed- ings of the Fourth Conference on Machine Transla- tion (Volume 2: Shared Task Papers, Day 1), pages 507-513, Florence, Italy. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Extended study on using pretrained language models and YiSi-1 for machine translation evaluation", |
| "authors": [ |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifth Conference on Machine Translation: Shared Task Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chi-kiu Lo. 2020. Extended study on using pretrained language models and YiSi-1 for machine translation evaluation. In Proceedings of the Fifth Conference on Machine Translation: Shared Task Papers.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Fully unsupervised crosslingual semantic textual similarity metric based on BERT for identifying parallel data", |
| "authors": [ |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Simard", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "206--215", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K19-1020" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chi-kiu Lo and Michel Simard. 2019. Fully unsuper- vised crosslingual semantic textual similarity metric based on BERT for identifying parallel data. In Pro- ceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pages 206- 215, Hong Kong, China. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Results of the wmt19 metrics shared task: Segment-level and strong mt systems pose big challenges", |
| "authors": [ |
| { |
| "first": "Qingsong", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Johnny", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "62--90", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qingsong Ma, Johnny Wei, Ond\u0159ej Bojar, and Yvette Graham. 2019. Results of the wmt19 metrics shared task: Segment-level and strong mt systems pose big challenges. In Proceedings of the Fourth Confer- ence on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 62-90, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1202" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 2227- 2237, New Orleans, Louisiana. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Bertscore: Evaluating text generation with bert", |
| "authors": [ |
| { |
| "first": "Tianyi", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Varsha", |
| "middle": [], |
| "last": "Kishore", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kilian", |
| "middle": [ |
| "Q" |
| ], |
| "last": "Weinberger", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. Bertscore: Eval- uating text generation with bert. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "On the limitations of cross-lingual encoders as exposed by reference-free machine translation evaluation", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxime", |
| "middle": [], |
| "last": "Peyrard", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "West", |
| "suffix": "" |
| }, |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Eger", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1656--1671", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.151" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Zhao, Goran Glava\u0161, Maxime Peyrard, Yang Gao, Robert West, and Steffen Eger. 2020. On the lim- itations of cross-lingual encoders as exposed by reference-free machine translation evaluation. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 1656- 1671, Online. Association for Computational Lin- guistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "Segment-level Kendall's \u03c4 correlation with human direct assessment averaged over all WMT 2019 news translation test sets of YiSi-2 using contextual embeddings extracted from different layers of the multilingual pretrained language models. On the x-axis, layer \u2212n means, YiSi-2 based on the embeddings of the n th layer, counting from the last, of XLM-RoBERTa large (blue circles), multilingual BERT (red triangles) and layer \u22127 of of XLM-RoBERTa large with source embeddings projected to target language space using CLP (blue star).", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "num": null, |
| "text": "Segment-level Kendall's \u03c4 correlation with human direct assessment on WMT 2019 de-en, fi-en, gu-en, kk-en, lt-en and ru-en news translation test set of YiSi-2 using contextual embeddings extracted from different layers of the multilingual pretrained language models. On the x-axis, layer \u2212n means YiSi-2 based on the embeddings of the n th layer, counting from the last, of XLM-RoBERTa large (blue circles), multilingual BERT (red triangles) and layer \u22127 of of XLM-RoBERTa large with source embeddings projected to target language space using CLP (blue star).", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "num": null, |
| "text": "Segment-level Kendall's \u03c4 correlation with human", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF0": { |
| "num": null, |
| "text": "Segment-level Kendall's \u03c4 correlation of metric scores with the WMT 2019 official human direct assessment judgments.(2020( ) .116 .271 .249 .370 .281 .121 .340 .299 .329 .459 .512 .459 .314 .078 .158 YiSi-2 (2019", |
| "content": "<table><tr><td>input</td><td>de</td><td>fi</td><td>gu</td><td>kk</td><td>lt</td><td>ru</td><td>zh</td><td>en</td><td>en</td><td>en</td><td>en</td><td>en</td><td>en</td><td>en</td><td>en</td></tr><tr><td>output</td><td>en</td><td>en</td><td>en</td><td>en</td><td>en</td><td>en</td><td>en</td><td>cs</td><td>de</td><td>fi</td><td>gu</td><td>kk</td><td>lt</td><td>ru</td><td>zh</td></tr><tr><td colspan=\"4\">Reference-based evaluation metric</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"8\">YiSi-1 (2019) .164 .347 .312 .440 .376 .217 .426</td><td colspan=\"8\">.475 .351 .537 .551 .546 .470 .585 .355</td></tr><tr><td>YiSi-0</td><td colspan=\"7\">.117 .271 .263 .402 .289 .178 .355</td><td colspan=\"8\">.406 .304 .483 .539 .494 .402 .535 .266</td></tr><tr><td>sentBLEU</td><td colspan=\"7\">.056 .233 .188 .377 .262 .125 .323</td><td colspan=\"8\">.367 .248 .396 .465 .392 .334 .469 .270</td></tr><tr><td>QE as a metric</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>YiSi-2</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr></table>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "num": null, |
| "text": "", |
| "content": "<table><tr><td colspan=\"4\">: Segment-level Kendall's \u03c4 correlation of met-</td></tr><tr><td colspan=\"4\">ric scores with the WMT 2019 official human direct as-</td></tr><tr><td>sessment judgments.</td><td/><td/><td/></tr><tr><td>input</td><td>de</td><td>de</td><td>fr</td></tr><tr><td>output</td><td>cs</td><td>fr</td><td>de</td></tr><tr><td colspan=\"4\">Reference-based evaluation metric</td></tr><tr><td colspan=\"4\">YiSi-1 (2019) .376 .349 .310</td></tr><tr><td>YiSi-0</td><td colspan=\"3\">.331 .296 .277</td></tr><tr><td>sentBLEU</td><td colspan=\"3\">.203 .235 .179</td></tr><tr><td>QE as a metric</td><td/><td/><td/></tr><tr><td colspan=\"4\">YiSi-2 (2020) .355 .294 .226</td></tr><tr><td colspan=\"4\">YiSi-2 (2019) .199 .186 .066</td></tr></table>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "num": null, |
| "text": "System-level Pearson's \u03c1 correlation of metric scores with the WMT 2019 official human direct assessment judgments.", |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "num": null, |
| "text": "", |
| "content": "<table><tr><td colspan=\"4\">: System-level Pearson's \u03c1 correlation of metric</td></tr><tr><td colspan=\"4\">scores with the WMT 2019 official human direct assess-</td></tr><tr><td>ment judgments.</td><td/><td/><td/></tr><tr><td>input</td><td>de</td><td>de</td><td>fr</td></tr><tr><td>output</td><td>cs</td><td>fr</td><td>de</td></tr><tr><td colspan=\"4\">Reference-based evaluation metric</td></tr><tr><td colspan=\"4\">YiSi-1 (2019) .973 .969 .908</td></tr><tr><td>YiSi-0</td><td colspan=\"3\">.978 .952 .820</td></tr><tr><td>BLEU</td><td colspan=\"3\">.941 .891 .864</td></tr><tr><td>QE as a metric</td><td/><td/><td/></tr><tr><td colspan=\"4\">YiSi-2 (2020) .860 .853 .461</td></tr><tr><td colspan=\"4\">YiSi-2 (2019) .606 .721 .530</td></tr></table>", |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |