| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:43:55.866664Z" |
| }, |
| "title": "Improving Parallel Data Identification using Iteratively Refined Sentence Alignments and Bilingual Mappings of Pre-trained Language Models", |
| "authors": [ |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Multilingual Text Processing Digital Technologies Research Centre National Research Council Canada (NRC-CNRC", |
| "location": { |
| "addrLine": "1200 Montreal Road", |
| "postCode": "K1A 0R6", |
| "settlement": "Ottawa", |
| "region": "ON", |
| "country": "Canada" |
| } |
| }, |
| "email": "chikiu.lo@nrc-cnrc.gc.ca" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Joanis", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Multilingual Text Processing Digital Technologies Research Centre National Research Council Canada (NRC-CNRC", |
| "location": { |
| "addrLine": "1200 Montreal Road", |
| "postCode": "K1A 0R6", |
| "settlement": "Ottawa", |
| "region": "ON", |
| "country": "Canada" |
| } |
| }, |
| "email": "eric.joanis@nrc-cnrc.gc.ca" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The National Research Council of Canada's team submissions to the parallel corpus filtering task at the Fifth Conference on Machine Translation are based on two key components: (1) iteratively refined statistical sentence alignments for extracting sentence pairs from document pairs and (2) a crosslingual semantic textual similarity metric based on a pretrained multilingual language model, XLM-RoBERTa, with bilingual mappings learnt from a minimal amount of clean parallel data for scoring the parallelism of the extracted sentence pairs. The translation quality of the neural machine translation systems trained and fine-tuned on the parallel data extracted by our submissions improved significantly when compared to the organizers' LASER-based baseline, a sentence-embedding method that worked well last year. For realigning the sentences in the document pairs (component 1), our statistical approach has outperformed the current state-of-the-art neural approach in this low-resource context.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The National Research Council of Canada's team submissions to the parallel corpus filtering task at the Fifth Conference on Machine Translation are based on two key components: (1) iteratively refined statistical sentence alignments for extracting sentence pairs from document pairs and (2) a crosslingual semantic textual similarity metric based on a pretrained multilingual language model, XLM-RoBERTa, with bilingual mappings learnt from a minimal amount of clean parallel data for scoring the parallelism of the extracted sentence pairs. The translation quality of the neural machine translation systems trained and fine-tuned on the parallel data extracted by our submissions improved significantly when compared to the organizers' LASER-based baseline, a sentence-embedding method that worked well last year. For realigning the sentences in the document pairs (component 1), our statistical approach has outperformed the current state-of-the-art neural approach in this low-resource context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The aim of the Fifth Conference on Machine Translation (WMT20) shared task on parallel corpus filtering (Koehn et al., 2020) is essentially the same as the two previous editions (Koehn et al., 2018b : identifying high-quality sentence pairs in a noisy corpus crawled from the web using ParaCrawl (Koehn et al., 2018a) , in order to train machine translation (MT) systems on the clean data.", |
| "cite_spans": [ |
| { |
| "start": 104, |
| "end": 124, |
| "text": "(Koehn et al., 2020)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 178, |
| "end": 198, |
| "text": "(Koehn et al., 2018b", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 296, |
| "end": 317, |
| "text": "(Koehn et al., 2018a)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This year, the low-resource language pairs being tested are Khmer-English (km-en) and Pashto-English (ps-en) . Specifically, participating systems must produce a score for each sentence pair in the test corpora indicating the quality of that pair. Then samples containing the top-scoring 5M words are used to train MT systems. While using the filtered parallel data to train a FAIRseq neural machine translation (NMT) system remains the same as last year, the organisers are no longer building statistical machine translation (SMT) systems as part of the task evaluation. Instead, as an alternative evaluation, the filtered parallel corpus is used to fine-tune an MBART (Liu et al., 2020) pretrained NMT system. Participants were ranked based on the performance of these MT systems on a test set of Wikipedia translations , as measured by BLEU (Papineni et al., 2002) . A few small sources of parallel data, covering different domains, were provided for each of the two low-resource languages. Much larger monolingual corpora were also provided for each language (en, km and ps). In addition to the task of computing quality scores for the purpose of filtering, there is also a sub-task of re-aligning the sentence pairs from the original crawled document pairs.", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 81, |
| "text": "Khmer-English (km-en)", |
| "ref_id": null |
| }, |
| { |
| "start": 86, |
| "end": 108, |
| "text": "Pashto-English (ps-en)", |
| "ref_id": null |
| }, |
| { |
| "start": 670, |
| "end": 688, |
| "text": "(Liu et al., 2020)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 844, |
| "end": 867, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Cleanliness or quality of parallel corpora for MT systems is affected by a wide range of factors, e.g., the parallelism of the sentence pairs, the fluency of the sentences in the output language, etc. Previous work (Goutte et al., 2012; Simard, 2014) showed that different types of errors in the parallel training data degrade MT quality in different ways. Crosslingual semantic textual similarity is one of the most important properties of high-quality sentence pairs. Lo et al. (2016) scored cross-lingual semantic textual similarity in two ways, either using a semantic MT quality estimation metric, or by first translating one of the sentences using MT, and then comparing the result to the other sentence, using a semantic MT evaluation metric. At the WMT18 parallel corpus filtering task, Lo et al. (2018) 's supervised submissions were developed for the same MT evaluation pipeline using a new semantic MT metric, YiSi-1 (Lo, 2019 ) (see also section 2.3). At the WMT19 parallel corpus filtering task, Bernier-Colborne and Lo (2019) exploited the quality estimation metric YiSi-2 using bilingual word embeddings learnt in a supervised manner (Luong et al., 2015) from clean parallel training data or a weakly supervised manner (Artetxe et al., 2016) from bilingual dictionary. Lo and Simard (2019) further showed that using YiSi-2 with multilingual BERT (Devlin et al., 2019) on fully unsupervised parallel corpus filtering (i.e. without access of any parallel training data) achieved similar results to those in Bernier-Colborne and Lo (2019) .", |
| "cite_spans": [ |
| { |
| "start": 215, |
| "end": 236, |
| "text": "(Goutte et al., 2012;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 237, |
| "end": 250, |
| "text": "Simard, 2014)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 470, |
| "end": 486, |
| "text": "Lo et al. (2016)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 795, |
| "end": 811, |
| "text": "Lo et al. (2018)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 928, |
| "end": 937, |
| "text": "(Lo, 2019", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1149, |
| "end": 1169, |
| "text": "(Luong et al., 2015)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1234, |
| "end": 1256, |
| "text": "(Artetxe et al., 2016)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1284, |
| "end": 1304, |
| "text": "Lo and Simard (2019)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1361, |
| "end": 1382, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1541, |
| "end": 1550, |
| "text": "Lo (2019)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This year, the National Research Council of Canada (NRC) team submitted one system to the parallel corpus filtering task and one to the alignment task. The two systems share the same components in scoring the parallelism of the noisy sentence pairs, i.e., the pre-filtering rules and the quality estimation metric YiSi-2. For the parallel corpus aligning task, we use an iterative statistical alignment method to align sentences from the given document pairs before passing the aligned sentences to the scoring pipeline.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our internal results show that MT systems trained on pre-aligned sentences filtered by our scoring pipeline outperform those trained on the organizers' LASER-based baseline by 0.2-1.4 BLEU. Training MT systems on re-aligned sentences using our iterative statistical alignment method achieve further gains of 0.3-1.8 BLEU.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There are a wide range of factors that determine whether a sentence pair is good for training MT systems. Some of the more important properties of a good training corpus include:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System architecture", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 High parallelism in the sentence pairs, which affects translation adequacy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System architecture", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 High fluency and grammaticality, especially for sentences in the output language, which affect translation fluency.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System architecture", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 High vocabulary coverage, especially in the input language, which helps make the translation system more robust.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System architecture", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 High variety of sentence lengths, which should also improve robustness.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System architecture", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In previous years, we explicitly tried to maximize all four of these properties, but this year we focused only on the first two in the scoring presented in section 2.3 below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System architecture", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our iterative statistical sentence alignment method as detailed in Joanis et al. (2020) uses ssal, a reimplementation and extension of Moore (2002) which is part of the Portage statistical machine translation toolkit (Larkin et al., 2010) .", |
| "cite_spans": [ |
| { |
| "start": 67, |
| "end": 87, |
| "text": "Joanis et al. (2020)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 135, |
| "end": 147, |
| "text": "Moore (2002)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 217, |
| "end": 238, |
| "text": "(Larkin et al., 2010)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative statistical sentence alignment", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "First, we train an IBM-HMM model (Och and Ney, 2003) on the clean parallel training data and the subsampled noisy corpora (see Table 1 for statistics) and use it to align paragraphs in the given document pairs, as Moore 2002does. The subsampled noisy corpora are those obtained by applying our filtering baseline as described in sections 2.2 and 2.3 (and denoted as \"nrc.baseline\" in table 2). Then, we segment the paragraphs in both languages into sentences using the Portage sentence splitter. Finally, we align sentences within aligned paragraphs using the IBM model again. In this process, both the data used in training the IBM-HMM model and the noisy document pairs for alignment are punctuation tokenized using the Portage tokenizer.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 127, |
| "end": 134, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Iterative statistical sentence alignment", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In past work on sentence alignment (Joanis et al. (2020) and other unpublished experiments), we have found that first aligning paragraphs and then aligning sentences within aligned paragraphs outperforms approaches that align sentences without paying attention to paragraph boundaries.", |
| "cite_spans": [ |
| { |
| "start": 35, |
| "end": 56, |
| "text": "(Joanis et al. (2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative statistical sentence alignment", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The pre-filtering steps of our submissions are mostly the same as those in Bernier-Colborne and Lo (2019). We remove: copying from the source Khmer or Pashto sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial filtering", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The core of our sentence pair scoring component is the semantic MT quality estimation metric, YiSi-2. YiSi (Lo, 2019 ) is a unified semantic MT quality evaluation and estimation metric for languages with different levels of available resources. YiSi-1 measures the similarity between a machine translation and human references by aggregating weighted distributional (lexical) semantic similarities, and optionally incorporating shallow semantic structures. YiSi-2 is the bilingual, referenceless version, which uses bilingual word embeddings to evaluate cross-lingual lexical semantic similarity between the input and MT output or, in this task, between the source and target sentences. YiSi-2 relies on a crosslingal language representation to evaluate the crosslingual lexical semantic similarity. Previously, it used pre-trained multilingual BERT (Devlin et al., 2019) for this purpose. In this work, we instead experiment with XLM-RoBERTa (Conneau et al., 2020) because (1) at the time this work was done, it was the only pretrained multilingual language encoder that covers both Khmer, Pashto and English; and (2) it shows better performance with lower-resource languages than BERT.", |
| "cite_spans": [ |
| { |
| "start": 107, |
| "end": 116, |
| "text": "(Lo, 2019", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 850, |
| "end": 871, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 943, |
| "end": 965, |
| "text": "(Conneau et al., 2020)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence pair scoring", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "As suggested by Devlin et al. (2019) ; Peters et al. (2018); Zhang et al. (2020), we experiment with using contextual embeddings extracted from different layers of the multilingual language encoder to find out the layer that best represents the semantic space of the language.", |
| "cite_spans": [ |
| { |
| "start": 16, |
| "end": 36, |
| "text": "Devlin et al. (2019)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence pair scoring", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "YiSi is semantic oriented. In the past, we noticed that YiSi-based scoring functions failed to filter out sentence pairs with disfluent target text. 2020, we use the left-to-right LM scores obtained from XLM-RoBERTa while computing the crosslingual lexical semantic similarity. The advantages of using the same pretrained model for computing the crosslingual lexical semantic similarity and the language model scores are 1) it costs less in both memory and computation; 2) it is more portable to languages other than English. We combined the LM scores in the probability domain linearly with the semantic similarity scores with a weight of 0.1 assigned to the LM scores.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence pair scoring", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "In the WMT19 metrics shared task (Ma et al., 2019) , we saw a very significant performance degradation between YiSi-1 and YiSi-2. This suggests that current multilingual language models construct a shared multilingual space in an unsupervised manner without any direct bilingual signal, in which representations of context in the same language are likely to cluster together in part of the subspace and there is a language segregation in the shared multilingual space. Inspired by Artetxe et al. (2016) and Zhao et al. (2020) , we sample 5k clean sentence pairs and use the token pairs aligned by maximum alignment of their semantic similarity to train a cross-lingual linear projection that would transform the source embeddings into the target embeddings subspace. Lo and Larkin (2020) provide a detailed correlation analysis of YiSi-2 with all the improvements mentioned above and human judgment on MT reference-less evaluation.", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 50, |
| "text": "(Ma et al., 2019)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 481, |
| "end": 502, |
| "text": "Artetxe et al. (2016)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 507, |
| "end": 525, |
| "text": "Zhao et al. (2020)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 767, |
| "end": 787, |
| "text": "Lo and Larkin (2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence pair scoring", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We used the software provided by the task organizers to extract the 5M-word samples from the original test corpora according to the scores generated by each alignment and/or filtering system. We then trained a FAIRseq MT system or fine-tuned an MBART pretrained NMT using the extracted subsamples. The MT systems were then evaluated on the official dev set (\"dev-test\"). We exhaustively experimented with the last few layers of both XLM-RoBERTa base and XLM-RoBERTa large in order to find out the model and layer best representing crosslingual semantic similarity. Figure 1 shows the plots of the change in BLEU scores of each MT system using the embeddings extracted from the n th layer, counting from the last, of the multilingual LM for evaluating crosslingual lexical semantic similarity. In general, we see a trend of rising performance as we roll back from the last layer. The performance peaks at some point and starts to fall when we roll back too far from the end. For XLM-RoBERTa base , the peak performance of the MT systems is achieved by the 3 rd or 4 th last layer (out of 12 layers). For XLM-RoBERTa large , the peak performance of the MT systems is achieved by the 8 th last layer (out of 24 layers). The peak performance of MT systems trained on sentences filtered by XLM-RoBERTa large based YiSi-2 is better than that by XLM-RoBERTa base based YiSi-2. Table 2 shows the results of the experiments described in section 2.3. First, we show an improved version of the organizers' baseline by simply adding our initial filtering rules. This shows that our initial filtering rules are able to catch bad parallel sentences which are hard to filter by an embedding-based filtering system.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 565, |
| "end": 573, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1370, |
| "end": 1377, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments and results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Next, we see that using YiSi-2 with XLM-RoBERTa large 's 8th last layer as parallelism scoring function outperforms the LASER baseline by 0.1-0.9 BLEU in different translation directions and MT architectures. This is our \"nrc.baseline\" system, and the baseline used for filtering the noisy corpus in training the IBM-HMM alignment model for the \"nrc.alignment\" system. Adding the LM score to the scoring function shows small improvements. Learning the cross-lingual linear projection matrix to transform the source embeddings in the target language subspace shows more improvements overall. This is our \"nrc.filtering\" submission to the parallel corpus filtering task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "At last, we show that using our iterative statistical alignment method to redo the alignment of sentences from the given document pairs improves the translation quality of the resulting MT systems significantly. This is our \"nrc.alignment\" submission to the parallel corpus filtering task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this paper, we presented the NRC's two submissions to the WMT20 Parallel Corpus Filtering and Alignment for Low-Resource Conditions task. Our experiments show that YiSi-2 is a scoring function of parallelism that is very competitive, and that a statistical sentence alignment method is still able to provide better alignment results than neural ones in low resource situations. Further analysis is required to understand the characteristics of the sentence pairs aligned by the baseline vecalign and our iterative statistical sentence alignment and how the latter achieves better translation quality for the trained MT systems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "It is worth highlighting that in this task, as well as in our Inuktitut-English corpus alignment work (Joanis et al., 2020) , a well-tuned statistical sentence-alignment system outperformed a stateof-the-art neural one. We hypothesise that this is a low-resource effect, but further work is still needed to explore the best low-resource corpus alignment methods. In particular, we intend to integrate YiSi-2 into our sentence aligner to test whether it's our iterative alignment methodology that makes the difference or the fact that the underlying scoring function is statistical (we use IBM-HMM models for sentence pair scoring in our aligner). It's possible that the statistical approach might continue to win here, because in the low-resource context there might not be enough training data to tune the orders of magnitude more parameters of the neural models; a counter-argument is that YiSi-2 did better on the scoring task than statistical scoring functions. Our future work will explore the tradeoffs between these two approaches, and consider hybrid methods.", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 123, |
| "text": "(Joanis et al., 2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "https://github.com/aboSamoor/pycld2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank Samuel Larkin and Marc Tessier for their help in setting up the FAIRseq and MBART baselines using the LASER scores; and Patrick Littell for discussion and feedback on the Pashto test set. We also thank the reviewers for their comments and suggestions, and Roland Kuhn for his comments and feedback on the paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Learning principled bilingual mappings of word embeddings while preserving monolingual invariance", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2289--2294", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1250" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2016. Learning principled bilingual mappings of word em- beddings while preserving monolingual invariance. In Proceedings of the 2016 Conference on Empiri- cal Methods in Natural Language Processing, pages 2289-2294, Austin, Texas. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "NRC parallel corpus filtering system for WMT 2019", |
| "authors": [ |
| { |
| "first": "Gabriel", |
| "middle": [], |
| "last": "Bernier-Colborne", |
| "suffix": "" |
| }, |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "3", |
| "issue": "", |
| "pages": "252--260", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5434" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gabriel Bernier-Colborne and Chi-kiu Lo. 2019. NRC parallel corpus filtering system for WMT 2019. In Proceedings of the Fourth Conference on Machine Translation (Volume 3: Shared Task Papers, Day 2), pages 252-260, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Lowresource corpus filtering using multilingual sentence embeddings", |
| "authors": [ |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuqing", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "3", |
| "issue": "", |
| "pages": "261--266", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5435" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vishrav Chaudhary, Yuqing Tang, Francisco Guzm\u00e1n, Holger Schwenk, and Philipp Koehn. 2019. Low- resource corpus filtering using multilingual sentence embeddings. In Proceedings of the Fourth Confer- ence on Machine Translation (Volume 3: Shared Task Papers, Day 2), pages 261-266, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Unsupervised cross-lingual representation learning at scale", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartikay", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Wenzek", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8440--8451", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.747" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Pro- ceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 8440- 8451, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "The impact of sentence alignment errors on phrasebased machine translation performance", |
| "authors": [ |
| { |
| "first": "Cyril", |
| "middle": [], |
| "last": "Goutte", |
| "suffix": "" |
| }, |
| { |
| "first": "Marine", |
| "middle": [], |
| "last": "Carpuat", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Tenth Conference of the Association for Machine Translation in the Americas", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cyril Goutte, Marine Carpuat, and George Foster. 2012. The impact of sentence alignment errors on phrase- based machine translation performance. In Proceed- ings of the Tenth Conference of the Association for Machine Translation in the Americas.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "The FLORES evaluation datasets for low-resource machine translation: Nepali-English and Sinhala-English", |
| "authors": [ |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng-Jen", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Juan", |
| "middle": [], |
| "last": "Pino", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "6098--6111", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1632" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francisco Guzm\u00e1n, Peng-Jen Chen, Myle Ott, Juan Pino, Guillaume Lample, Philipp Koehn, Vishrav Chaudhary, and Marc'Aurelio Ranzato. 2019. The FLORES evaluation datasets for low-resource ma- chine translation: Nepali-English and Sinhala- English. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Process- ing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 6098-6111, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "The Nunavut Hansard Inuktitut-English parallel corpus 3.0 with preliminary machine translation results", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Joanis", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Knowles", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Kuhn", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Larkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Littell", |
| "suffix": "" |
| }, |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Darlene", |
| "middle": [], |
| "last": "Stewart", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Micher", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "2562--2572", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric Joanis, Rebecca Knowles, Roland Kuhn, Samuel Larkin, Patrick Littell, Chi-kiu Lo, Darlene Stewart, and Jeffrey Micher. 2020. The Nunavut Hansard Inuktitut-English parallel corpus 3.0 with prelimi- nary machine translation results. In Proceedings of The 12th Language Resources and Evaluation Con- ference, pages 2562-2572, Marseille, France. Euro- pean Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Findings of the WMT 2020 shared task on parallel corpus filtering and alignment", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "El-Kishky", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng-Jen", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifth Conference on Machine Translation: Shared Task Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn, Vishrav Chaudhary, Ahmed El-Kishky, Naman Goyal, Peng-Jen Chen, and Francisco Guzm\u00e1n. 2020. Findings of the WMT 2020 shared task on parallel corpus filtering and alignment. In Proceedings of the Fifth Conference on Machine Translation: Shared Task Papers.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Findings of the WMT 2019 shared task on parallel corpus filtering for low-resource conditions", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Guzm\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Juan", |
| "middle": [], |
| "last": "Pino", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "3", |
| "issue": "", |
| "pages": "54--72", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5404" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn, Francisco Guzm\u00e1n, Vishrav Chaud- hary, and Juan Pino. 2019. Findings of the WMT 2019 shared task on parallel corpus filtering for low-resource conditions. In Proceedings of the Fourth Conference on Machine Translation (Volume 3: Shared Task Papers, Day 2), pages 54-72, Flo- rence, Italy. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "LINDAT/CLARIN digital library at the Institute of Formal and Applied Linguistics (\u00daFAL), Faculty of Mathematics and Physics", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Heafield", |
| "suffix": "" |
| }, |
| { |
| "first": "Mikel", |
| "middle": [ |
| "L" |
| ], |
| "last": "Forcada", |
| "suffix": "" |
| }, |
| { |
| "first": "Miquel", |
| "middle": [], |
| "last": "Espl\u00e0-Gomis", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergio", |
| "middle": [], |
| "last": "Ortiz-Rojas", |
| "suffix": "" |
| }, |
| { |
| "first": "Gema", |
| "middle": [ |
| "Ram\u00edrez" |
| ], |
| "last": "S\u00e1nchez", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "V\u00edctor", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "S\u00e1nchez Cartagena", |
| "suffix": "" |
| }, |
| { |
| "first": "Marta", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Marek", |
| "middle": [], |
| "last": "Ba\u00f1\u00f3n", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "St\u0159elec", |
| "suffix": "" |
| }, |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Samiotou", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kamran", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn, Kenneth Heafield, Mikel L. For- cada, Miquel Espl\u00e0-Gomis, Sergio Ortiz-Rojas, Gema Ram\u00edrez S\u00e1nchez, V\u00edctor M. S\u00e1nchez Cartagena, Barry Haddow, Marta Ba\u00f1\u00f3n, Marek St\u0159elec, Anna Samiotou, and Amir Kamran. 2018a. ParaCrawl corpus version 1.0. LINDAT/CLARIN digital library at the Institute of Formal and Applied Linguistics (\u00daFAL), Faculty of Mathematics and Physics, Charles University.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Findings of the WMT 2018 shared task on parallel corpus filtering", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Huda", |
| "middle": [], |
| "last": "Khayrallah", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Heafield", |
| "suffix": "" |
| }, |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Forcada", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn, Huda Khayrallah, Kenneth Heafield, and Mikel Forcada. 2018b. Findings of the WMT 2018 shared task on parallel corpus filtering. In Pro- ceedings of the Third Conference on Machine Trans- lation, Volume 2: Shared Task Papers, Brussels, Bel- gium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Lessons from NRC's Portage system at WMT", |
| "authors": [ |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Larkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Boxing", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "Ulrich", |
| "middle": [], |
| "last": "Germann", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Joanis", |
| "suffix": "" |
| }, |
| { |
| "first": "Howard", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Kuhn", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Joint Fifth Workshop on Statistical Machine Translation and MetricsMATR", |
| "volume": "", |
| "issue": "", |
| "pages": "127--132", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel Larkin, Boxing Chen, George Foster, Ulrich Germann, Eric Joanis, Howard Johnson, and Roland Kuhn. 2010. Lessons from NRC's Portage system at WMT 2010. In Proceedings of the Joint Fifth Workshop on Statistical Machine Translation and MetricsMATR, pages 127-132, Uppsala, Sweden. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Multilingual denoising pretraining for neural machine translation", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiatao", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Xian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Marjan", |
| "middle": [], |
| "last": "Ghazvininejad", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020. Multilingual denoising pre- training for neural machine translation.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "YiSi -a unified semantic MT quality evaluation and estimation metric for languages with different levels of available resources", |
| "authors": [ |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "507--513", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-5358" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chi-kiu Lo. 2019. YiSi -a unified semantic MT quality evaluation and estimation metric for languages with different levels of available resources. In Proceed- ings of the Fourth Conference on Machine Transla- tion (Volume 2: Shared Task Papers, Day 1), pages 507-513, Florence, Italy. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "CNRC at SemEval-2016 task 1: Experiments in crosslingual semantic textual similarity", |
| "authors": [ |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Cyril", |
| "middle": [], |
| "last": "Goutte", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Simard", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016)", |
| "volume": "", |
| "issue": "", |
| "pages": "668--673", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S16-1102" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chi-kiu Lo, Cyril Goutte, and Michel Simard. 2016. CNRC at SemEval-2016 task 1: Experiments in crosslingual semantic textual similarity. In Proceed- ings of the 10th International Workshop on Seman- tic Evaluation (SemEval-2016), pages 668-673, San", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "2020. MT referenceless evaluation using YiSi-2 with bilingual mappings of massive multilingual language model", |
| "authors": [ |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Larkin", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the Fifth Conference on Machine Translation: Shared Task Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chi-kiu Lo and Samuel Larkin. 2020. MT reference- less evaluation using YiSi-2 with bilingual mappings of massive multilingual language model. In Proceed- ings of the Fifth Conference on Machine Transla- tion: Shared Task Papers.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Fully unsupervised crosslingual semantic textual similarity metric based on BERT for identifying parallel data", |
| "authors": [ |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Simard", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "206--215", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K19-1020" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chi-kiu Lo and Michel Simard. 2019. Fully unsuper- vised crosslingual semantic textual similarity metric based on BERT for identifying parallel data. In Pro- ceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pages 206- 215, Hong Kong, China. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Accurate semantic textual similarity for cleaning noisy parallel corpora using semantic machine translation evaluation metric: The NRC supervised submissions to the parallel corpus filtering task", |
| "authors": [ |
| { |
| "first": "Chi-Kiu", |
| "middle": [], |
| "last": "Lo", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Simard", |
| "suffix": "" |
| }, |
| { |
| "first": "Darlene", |
| "middle": [], |
| "last": "Stewart", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Larkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Cyril", |
| "middle": [], |
| "last": "Goutte", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Littell", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation: Shared Task Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "908--916", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chi-kiu Lo, Michel Simard, Darlene Stewart, Samuel Larkin, Cyril Goutte, and Patrick Littell. 2018. Ac- curate semantic textual similarity for cleaning noisy parallel corpora using semantic machine translation evaluation metric: The NRC supervised submissions to the parallel corpus filtering task. In Proceedings of the Third Conference on Machine Translation: Shared Task Papers, pages 908-916, Belgium, Brus- sels. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Bilingual word representations with monolingual quality in mind", |
| "authors": [ |
| { |
| "first": "Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 1st Workshop on Vector Space Modeling for Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "151--159", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/W15-1521" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thang Luong, Hieu Pham, and Christopher D. Man- ning. 2015. Bilingual word representations with monolingual quality in mind. In Proceedings of the 1st Workshop on Vector Space Modeling for Natural Language Processing, pages 151-159, Denver, Col- orado. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Results of the WMT19 metrics shared task: Segment-level and strong MT systems pose big challenges", |
| "authors": [ |
| { |
| "first": "Qingsong", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Johnny", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation", |
| "volume": "2", |
| "issue": "", |
| "pages": "62--90", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qingsong Ma, Johnny Wei, Ond\u0159ej Bojar, and Yvette Graham. 2019. Results of the WMT19 metrics shared task: Segment-level and strong MT sys- tems pose big challenges. In Proceedings of the Fourth Conference on Machine Translation (Volume 2: Shared Task Papers, Day 1), pages 62-90, Flo- rence, Italy. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Fast and accurate sentence alignment of bilingual corpora", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Robert", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Moore", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the Conference of the Association for Machine Translation in the Americas", |
| "volume": "", |
| "issue": "", |
| "pages": "135--144", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert C. Moore. 2002. Fast and accurate sentence alignment of bilingual corpora. In Proceedings of the Conference of the Association for Machine Trans- lation in the Americas, pages 135-144.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "A systematic comparison of various statistical alignment models", |
| "authors": [ |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Franz", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Computational Linguistics", |
| "volume": "29", |
| "issue": "1", |
| "pages": "19--51", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/089120103321337421" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Franz Josef Och and Hermann Ney. 2003. A systematic comparison of various statistical alignment models. Computational Linguistics, 29(1):19-51.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "fairseq: A fast, extensible toolkit for sequence modeling", |
| "authors": [ |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexei", |
| "middle": [], |
| "last": "Baevski", |
| "suffix": "" |
| }, |
| { |
| "first": "Angela", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", |
| "volume": "", |
| "issue": "", |
| "pages": "48--53", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-4009" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A fast, extensible toolkit for sequence modeling. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics (Demonstrations), pages 48-53, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computa- tional Linguistics, Philadelphia, Pennsylvania, USA.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1202" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 2227- 2237, New Orleans, Louisiana. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Language models are unsupervised multitask learners", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "OpenAI Blog", |
| "volume": "1", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI Blog, 1(8):9.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Clean data for training statistical MT: the case of MT contamination", |
| "authors": [ |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Simard", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the Eleventh Conference of the Association for Machine Translation in the Americas", |
| "volume": "", |
| "issue": "", |
| "pages": "69--82", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michel Simard. 2014. Clean data for training statisti- cal MT: the case of MT contamination. In Proceed- ings of the Eleventh Conference of the Association for Machine Translation in the Americas, pages 69- 82, Vancouver, BC, Canada.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Bertscore: Evaluating text generation with BERT", |
| "authors": [ |
| { |
| "first": "Tianyi", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Varsha", |
| "middle": [], |
| "last": "Kishore", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kilian", |
| "middle": [ |
| "Q" |
| ], |
| "last": "Weinberger", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. Bertscore: Eval- uating text generation with BERT. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "On the limitations of cross-lingual encoders as exposed by reference-free machine translation evaluation", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxime", |
| "middle": [], |
| "last": "Peyrard", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "West", |
| "suffix": "" |
| }, |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Eger", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1656--1671", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.151" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Zhao, Goran Glava\u0161, Maxime Peyrard, Yang Gao, Robert West, and Steffen Eger. 2020. On the lim- itations of cross-lingual encoders as exposed by reference-free machine translation evaluation. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 1656- 1671, Online. Association for Computational Lin- guistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Zhao et al. (2020), we experiment with improving the sentence pair scoring function by linearly combining YiSi score with the language model (LM) scores of the target text obtained from the multilingual language model used in YiSi. However, instead of using an additional pretrained language model-GPT-2 (Radford et al., 2019)as inZhao et al." |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "BLEU scores on the Khmer-English dev set for (a) FAIRseq and (b) MBART and the Pashto-English dev set for (c) FAIRseq and (d) MBART trained on 5M-word parallel subsample extracted according to the scoring functions as shown: on the x-axis, layer = \u2212n means YiSi-2 based on the embeddings of the n th layer, counting from the last, of XLM-RoBERTa base (blue circles) or XLM-RoBERTa large (red triangles)." |
| }, |
| "TABREF1": { |
| "html": null, |
| "text": "Data used to train the IBM-HMM model used in the iterative statistical sentence alignment.", |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "html": null, |
| "text": "BLEU scores of selected systems. The two final submitted systems are labelled nrc.filtering and nrc.alignment.", |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |