| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T04:34:09.790866Z" |
| }, |
| "title": "NICT-5's Submission To WAT 2021: MBART Pre-training And In-Domain Fine Tuning For Indic Languages", |
| "authors": [ |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Institute of Information and Communications Technology", |
| "location": { |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "raj.dabre@nict.go.jp" |
| }, |
| { |
| "first": "Abhisek", |
| "middle": [], |
| "last": "Chakrabarty", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "National Institute of Information and Communications Technology", |
| "location": { |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "abhisek.chakra@nict.go.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper we describe our submission to the multilingual Indic language translation task \"MultiIndicMT\" under the team name \"NICT-5\". This task involves translation from 10 Indic languages into English and vice-versa. The objective of the task was to explore the utility of multilingual approaches using a variety of in-domain and out-of-domain parallel and monolingual corpora. Given the recent success of multilingual NMT pre-training we decided to explore pre-training an MBART model on a large monolingual corpus collection covering all languages in this task followed by multilingual fine-tuning on small in-domain corpora. Firstly, we observed that a small amount of pretraining followed by fine-tuning on small bilingual corpora can yield large gains over when pre-training is not used. Furthermore, multilingual fine-tuning leads to further gains in translation quality which significantly outperforms a very strong multilingual baseline that does not rely on any pre-training.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper we describe our submission to the multilingual Indic language translation task \"MultiIndicMT\" under the team name \"NICT-5\". This task involves translation from 10 Indic languages into English and vice-versa. The objective of the task was to explore the utility of multilingual approaches using a variety of in-domain and out-of-domain parallel and monolingual corpora. Given the recent success of multilingual NMT pre-training we decided to explore pre-training an MBART model on a large monolingual corpus collection covering all languages in this task followed by multilingual fine-tuning on small in-domain corpora. Firstly, we observed that a small amount of pretraining followed by fine-tuning on small bilingual corpora can yield large gains over when pre-training is not used. Furthermore, multilingual fine-tuning leads to further gains in translation quality which significantly outperforms a very strong multilingual baseline that does not rely on any pre-training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Neural machine translation (NMT) (Bahdanau et al., 2014) is known to give state-of-the-art translations for a variety of language pairs. NMT is known to perform poorly for language pairs for which parallel corpora are scarce. This happens due to lack of translation knowledge as well as due to overfitting which is inevitable in a low-resource setting. Fortunately, transfer learning via cross-lingual transfer (Zoph et al., 2016; , multilingualism (Firat et al., 2016; , back-translation (Sennrich et al., 2016) or monolingual pre-training Mao et al., 2020) can significantly improve translation quality in a low-resource situation.", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 56, |
| "text": "(Bahdanau et al., 2014)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 411, |
| "end": 430, |
| "text": "(Zoph et al., 2016;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 449, |
| "end": 469, |
| "text": "(Firat et al., 2016;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 489, |
| "end": 512, |
| "text": "(Sennrich et al., 2016)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 541, |
| "end": 558, |
| "text": "Mao et al., 2020)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Cross-lingual transfer learning involves pretraining a model using a parallel corpus for a resource-rich language pair XX \u2212 Y Y and then fine-tuning on a parallel corpus for a resource-poor language pair AA \u2212 BB. Naturally the improvements in translation quality will be impacted by if XX = AA or Y Y = BB 1 and it is often better to have a shared target language. Cross-lingual transfer despite its simplicity and effectiveness relies on shared source or target languages for effective transfer and thus depending on methods that use monolingual corpora are preferable. This also applies to vanilla multilingual training which does not rely on monolingual corpora. Another reason for focusing on utilizing monolingual corpora is that they are extremely abundant when compared to parallel corpora and they contain a large amount of language modeling information. In this regard, back-translation and multilingual pre-training are two of the most reliable methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "While back-translation is easy to use, it involves the translation of millions of monolingual sentences and quite often it is necessary to perform multiple iterations of the back-translation process to yield the best results (Hoang et al., 2018) which means that it is quite resource intensive. This leaves us with multilingual pre-training using methods such as BART/MBART which we use for developing our translation system. The advantage of BART/MBART is that we need to pre-train these models once and then fine-tune not only for machine translation but also for any natural language generation task such as summarization (Shi et al., 2021) . These models can be upgraded to include additional language pairs in the future by simply resuming pre-training (Tang et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 225, |
| "end": 245, |
| "text": "(Hoang et al., 2018)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 625, |
| "end": 643, |
| "text": "(Shi et al., 2021)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 758, |
| "end": 777, |
| "text": "(Tang et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we describe our simple approach involving MBART pre-training and fine-tuning. First, we use the official monolingual corpora to train an MBART model spanning all 11 languages in the shared task. Following this we fine-tune the MBART model using the officially provided indomain corpora in two different ways: bilingual fine-tuning and multilingual fine-tuning. Additionally we also train multilingual models without any pre-training. The multilingual models are one-tomany (English to Indic) and many-to-one (Indic to English) in nature. The bilingual fine-tuning and non pre-trained multilingual model serve as strong baselines which significantly outperform the organizers weak bilingual baselines. Our multilingual fine-tuning models exhibit the best translation quality out of all our models which shows the power of effectively combining monolingual corpora with multilingualism.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We refer readers to the workshop overview paper (Nakazawa et al., 2021) for a better understanding of the task and the comparison of our results with those of other participants.", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 71, |
| "text": "(Nakazawa et al., 2021)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The techniques used in this paper revolve around multilingualism, sequence-to-sequence pretraining and transfer learning. Firat et al. (2016) proposed multilingual neural translation using multiple encoders and decoders which was then simplified by Johnson et al. (2017) to require a single encoder and decoder to be shared among multiple language pairs. Due to the simplicity of the latter approach, most modern multilingual models are based on it and in this paper we also use the same approach. Multilingualism involves implicit transfer learning but a more explicit way to do the same is to use fine-tuning (Zoph et al., 2016) . However all these aforementioned approaches rely on bilingual data which is not always readily available. This can be remedied by the use of monolingual corpora for backtranslation (Sennrich et al., 2016) or for pre-training Mao et al., 2020) . As backtranslation is resource intensive, given that it involves translation of a large amount of monolingual corpora, pre-training is more attractive as a pre-trained model can be used for a variety of natural language generation tasks. In this paper we combine sequence-to-sequence pre-training followed by multilingual fine-tuning. For an overview of multilingual NMT we refer readers to a survey paper on multilingualism and low-resource NMT in general .", |
| "cite_spans": [ |
| { |
| "start": 122, |
| "end": 141, |
| "text": "Firat et al. (2016)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 249, |
| "end": 270, |
| "text": "Johnson et al. (2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 611, |
| "end": 630, |
| "text": "(Zoph et al., 2016)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 814, |
| "end": 837, |
| "text": "(Sennrich et al., 2016)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 858, |
| "end": 875, |
| "text": "Mao et al., 2020)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For our submissions we focused on combining multilingual denoising pre-training (MBART) and multilingual fine tuning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Approaches", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We follow the multilingual NMT training approach proposed by Johnson et al. (2017) . Consider a multilingual parallel corpora collection spanning corpora for", |
| "cite_spans": [ |
| { |
| "start": 61, |
| "end": 82, |
| "text": "Johnson et al. (2017)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual NMT Training", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "N language pairs L i src \u2212 L i tgt for i \u2208 [1, N ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual NMT Training", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The sizes of the parallel corpora are typically different, often radically different, in which case it is important to balance corpora sizes to prevent the model from focusing too much on some language pairs. Johnson et al. (2017) showed that training by oversampling smaller corpora to match the size of the largest corpus is the best approach. However, since then newer corpora balancing approaches have been proposed and the most recent effective method is known as the temperature based sampling approach (Aharoni et al., 2019) . Suppose that the size of the i th corpus is s i which means the probability of sampling a sentence pair from each corpus is p i = s i S where S = i s i . Using this default sampling probability is biased towards larger corpora so first the probability values are tempered using a temperature T . The resultant probabilities p t i are obtained as follows: Aharoni et al. (2019) showed that a value of T = 5 works well in practice which is what we use in our experiments. During training, sentence pairs are sampled from each corpus following which the source sentence is prepended with a token < 2L i tgt > which indicates that the source sentence should be translated into L i tgt . Thereafter, the pre-processed source sentence and target sentence are fed to the NMT model which learns how to translate between multiple language pairs.", |
| "cite_spans": [ |
| { |
| "start": 209, |
| "end": 230, |
| "text": "Johnson et al. (2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 509, |
| "end": 531, |
| "text": "(Aharoni et al., 2019)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 889, |
| "end": 910, |
| "text": "Aharoni et al. (2019)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual NMT Training", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "p t i = p 1 T i j p 1 T j (1) When T = 1, p t i = p i and when T = \u221e, p t i = 1 N .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual NMT Training", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Liu et al. (2020) extended the BART model by denoising pre-training the BART model on 25 languages instead of 2 which leads to an MBART model. The main advantage of an MBART model is that it can be fine-tuned with corpora for a variety of language pairs which naturally includes many zero-shot pairs. The way to train an MBART model is by \"corrupting\" an input sentence, feeding it to the encoder and then training the model to predict the original sentence. Corruption can be done in a variety of ways and in this paper we use 'text infilling' approach which finds random spans of the source tokens and replaces them with a token such as < M ASK > till a certain percentage of the sentence is masked. The length of the span is sampled from a Poisson distribution with a mean of \u03bb. determined an optimal value of \u03bb = 3.5 which we also use. The denoising objective helps the MBART model learn about using context to translate and also helps it acquire language modeling information. After an MBART model is trained it is fine-tuned on a bilingual or multilingual parallel corpus which is then used for translation. The language modeling priors help account for missing translation knowledge in low-resource settings which leads to large improvements in translation quality over baselines which only use parallel corpora.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MBART Pre-training and Fine-Tuning", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Our goal was to study how far the translation quality can be pushed via MBART pre-training and multilingual fine-tuning. To do so, we describe the datasets, implementation details, evaluation metrics and the models trained.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The languages involved in the task are: Bengali, Gujarati, Hindi, Kannada, Malayalam, Marathi, Oriya, Punjabi, Tamil, Telugu and English. We used the official parallel corpora 2 provided by the organizers. The 11-way evaluation development and test sets come from the PMI dataset 3 . Although the organizers provided corpora from other sources as well, we decided to restrict ourselves to the PMI part of the parallel corpora to avoid the need for data selection. Instead we relied on pre-training to compensate for using smaller amount of parallel corpora. For MBART pre-training we used the AI4Bharat's monolingual corpora known as In-dicCorp 4 . Note that MBART pre-training supposes the monolingual data is available as documents however since we only use the masking denoising approach, sentence level corpora 5 are sufficient. The IndicCorp covers an additional language Assamese which is not in this shared task. Nevertheless, we use the monolingual corpus for this language as well because it can potentially improve translation involving Bengali given their similarity. However, the small size of Assamese data (1.39M lines) relative to the Bengali data (39.9M lines) should not significantly affect the final outcome for translation involving Benglai 6 . The monolingual corpora stats are given in Table 1 and the bilingual corpora stats are given in Table 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1308, |
| "end": 1315, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 1361, |
| "end": 1368, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets and Preprocessing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Regarding pre-processing, we do not perform anything specific and instead let our implementation handle everything via its internal mechanisms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Preprocessing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We implement the methods mentioned in Section 3 in our in-house toolkit which we make publicly available 7 . This toolkit is based on the Hugging-Face transformers library (Wolf et al., 2020) v4.3.2. Note that the MBART implementation in the library shares the encoder embedding, decoder embedding and decoder softmax projection layers. We implement denoising, temperature based data sampling and multilingual training ourselves. We also use the HuggingFace transformer tokenizer library to train tokenizers. These tokenizers are wrappers around Byte Pair Encoding (BPE) (Gage, 1994) or SentencePiece (SPM) (Kudo and Richardson, 2018 ) models and we choose 8 the latter as opposed to the former which is used by the original MBART implementation.", |
| "cite_spans": [ |
| { |
| "start": 172, |
| "end": 191, |
| "text": "(Wolf et al., 2020)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 571, |
| "end": 583, |
| "text": "(Gage, 1994)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 607, |
| "end": 633, |
| "text": "(Kudo and Richardson, 2018", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We first trained a tokenizer with a joint vocabulary size of 64,000 sub-words which is learned on the IndicCorp monolingual data. We consider this vocabulary size to be sufficient for all languages. For pre-training, we use hyperparameters corresponding to the \"transformer big\" (Vaswani et al., 2017) with a few exceptions such as dropout of 0.1, positional embeddings instead of positional encodings and a maximum learning rate of 0.001. When performing batching we truncate all sequences longer than 256 subwords. Our MBART model is pretrained on 48 NVIDIA V-100 GPUs using the distributed data parallel mechanism in PyTorch. Due to lack of time we only trained for 150,000 batches which corresponded to roughly 1 epoch over the entire monolingual data. After pre-training we train unidirectional models using the bilingual data on a single GPU. We train the one-to-many (English to Indic) and many-to-one (Indic to English) models on the multilingual data on 8 GPUs. For both cases we use a dropout of 0.3 and train till convergence on the development BLEU score and choose the model with the best development set BLEU score for decoding the test set. In our initial experiments we did additional exploration to choose the particular checkpoint which yields best average development BLEU score over all language pairs for decoding the test set. We found that the results are inferior compared to when the best model is chosen language pairwise. We use beam search for decoding with a beam size of 4 and a length penalty of 0.8 9 . For unidirectional models this is strightforward but for multilingual models train till convergence on the global development set BLEU score, an average of BLEU scores for each language pair. Different from most previous works, instead of decoding a single final model, we choose a particular model for a language pair with the highest development set BLEU score for that pair. Therefore, we treat multilingualism as a way get a (potentially) different model per language pair leading to the best BLEU scores for that pair and not as a way to get a single model that gives the best performance for each language pair.", |
| "cite_spans": [ |
| { |
| "start": 279, |
| "end": 301, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training and Evaluation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "For evaluation, as we have mentioned before, we use BLEU (Papineni et al., 2002) as the primary evaluation metric. WAT also uses metrics such as RIBES (Isozaki et al., 2010) , AM-FM (Zhang et al., 2021) and human evaluation (Nakazawa et al., 2019 (Nakazawa et al., , 2020 (Nakazawa et al., , 2021 . All these metrics focus on different aspects of translations and may lead to different rankings for submissions, however this multi-metric evaluation helps us understand that there may not be one perfect model. To avoid confusing the reader with a clutter of scores, we only show BLEU scores and we refer the reader to the evaluation page where all scores and rankings 10 can be seen 11 .", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 80, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 151, |
| "end": 173, |
| "text": "(Isozaki et al., 2010)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 182, |
| "end": 202, |
| "text": "(Zhang et al., 2021)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 224, |
| "end": 246, |
| "text": "(Nakazawa et al., 2019", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 247, |
| "end": 271, |
| "text": "(Nakazawa et al., , 2020", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 272, |
| "end": 296, |
| "text": "(Nakazawa et al., , 2021", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training and Evaluation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We trained the following models:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models Trained", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "\u2022 A pre-trained MBART model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models Trained", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "\u2022 Unidirectional models for each language pair trained from scratch or via fine-tuning the MBART model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models Trained", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "\u2022 One-to-many (English to Indic) and many-toone (Indic to English) multilingual models trained from scratch or via fine-tuning the MBART model. Table 3 : Evaluation results of all language pairs. All scores are taken from the leaderboard. Our best results are in bold. Differences in BLEU smaller than 0.5 are not significant in most cases. Table 3 contains the results of the unidirectional 12 , and multilingual models. We also show the the best submissions for reference.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 144, |
| "end": 151, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 341, |
| "end": 348, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Models Trained", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "It is clear from the results that multilingual models are vastly superior than unidirectional models which shows that multilingualism is very helpful in a low-resource setting. Secondly, comparing with corpora sizes (see Table 2 ), it can be seen that the gains in BLEU are (roughly) inversely proportional to the size of the parallel corpora.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 221, |
| "end": 228, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Without Fine-tuning", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In the case of Indic to English translation, MBART+unidirectional models are significantly better than many-to-one models. We can attribute this phenomenon to the fact that the PMI corpus has a limited number of English sentences and even though combining all corpora might seem to increase the number of English sentences, most of them are redundant which causes some form of overfitting. This is remedied by the MBART model with incorporates additional language modeling information through the monolingual corpora.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non Fine-Tuned Multilingual Models vs Fine-Tuned Unidirectional Models", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "On the other hand, for English to Indic translation, the one-to-many models are often comparable if not better than the fine-tuned unidirectional models. Fine-tuning significantly outperforms non fine-tuned unidirectional models which means pretraining is useful. However, given that multilingual training is better, this indicates that it may not be necessary to perform pre-training for one-to-many translation. Remember that the English side of the text contains a large number of redundant sentences and this may be one of the reasons for this kind of behavior. We think that this deserves some future investigation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non Fine-Tuned Multilingual Models vs Fine-Tuned Unidirectional Models", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Ultimately, multilingual fine-tuning of an MBART model leads to the best translation quality for all language pairs, except two (Gujarati to English and English to Telugu). This approach combines the best of both worlds and the outcome is not surprising. Our MBART models consisted of only 6 layers and was trained for only 1 epoch and this may not be enough to incorporate knowledge from the full monolingual corpus. We also did not perform any hyperparameter tuning with parameters such as dropout and learning rate 13 We expect that a larger model with more careful hyperparameter tuning should lead to even better results. However, we are confident that a multilingual fine-tuned model will reign supreme.", |
| "cite_spans": [ |
| { |
| "start": 518, |
| "end": 520, |
| "text": "13", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual Fine-tuning", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "For Indic to English translation the several submissions outperformed ours and we think that this is because the other participants have indicated that they have performed data selection, backtranslation and script mapping. In our case we only performed pre-training and fine-tuning with PMI data. Although MBART pre-training is helpful, it can never compare with the power of a large parallel corpus obtained via careful data selection and script manipulation. While for PMI, the largest parallel corpus, Hindi-English, contains roughly 50,000 lines, the full Hindi-English corpus is larger than 2M lines and most pairs have more than 500,000 lines. In the future we will try training with larger parallel corpora and script mapping to see what kind of results we get. On the other hand for English to Indic translation, the gap between the the best submissions and ours is much smaller than for the reverse direction. This also shows that, at least for this task, multilingualism benefits translation into English a lot more than it benefits translation from English.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison With Other Submissions", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "In this paper we have described our NMT systems and results for the MultiIndicMT task in WAT 2021. We worked on MBART pre-training and multilingual fine-tuning which we found to significantly outperform unidirectional models with and without pre-training and multilingual models without pre-training. We did not train our MBART models for more than 1 epoch and used only the PMI data for fine-tuning instead of the whole parallel corpus. We did not try any additional methods such as back-translation either. Despite this, our results are competitive and despite the simplicity of our methods our results do not lag far behind those of the best systems that use advanced methods such as data selection, domain adaptation, back-translation etc. This also means that we have a lot of room for improvement in the future.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "If XX \u2212 Y Y and AA \u2212 BB are the same pairs then it is known as domain adaptation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://lotus.kuee.kyoto-u.ac.jp/WAT/indic-multilingual/ index.html 3 http://data.statmt.org/pmindia 4 https://indicnlp.ai4bharat.org/corpora 5 The IndicCorp is supposed to be document level but the downloadable version is sentence level.6 However, this may significantly improve translation involving Assamese thanks to the Bengali data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/prajdabre/yanmtt8 We choose SPM because SPM can work with unsegmented, untokenized raw text for any language. Inside the transformers library, the AlbertTokenizer acts as a wrapper for the SPM model. Our implementation also allows the usage of the BPE model but we do not use it in this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We have not tuned these decoding hyperparameters and our BLEU scores may improve.10 As can be seen, the rankings of translation can change depending on the metric which indicates that multi-metric ranking is important 11 http://lotus.kuee.kyoto-u.ac.jp/WAT/evaluation/index. html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The unidirectional scores without fine-tuning are actually organizer baselines but we were the ones who actually developed them so we use the scores as is.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We used a high learning rate which may not have been a good idea in retrospect.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Massively multilingual neural machine translation", |
| "authors": [ |
| { |
| "first": "Roee", |
| "middle": [], |
| "last": "Aharoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Melvin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Orhan", |
| "middle": [], |
| "last": "Firat", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "3874--3884", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1388" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roee Aharoni, Melvin Johnson, and Orhan Firat. 2019. Massively multilingual neural machine translation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Compu- tational Linguistics: Human Language Technolo- gies, Volume 1 (Long and Short Papers), pages 3874-3884, Minneapolis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Neural Machine Translation by Jointly Learning to Align and Translate. arXiv e-prints", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1409.0473" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2014. Neural Machine Translation by Jointly Learning to Align and Translate. arXiv e-prints, page arXiv:1409.0473.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A survey of multilingual neural machine translation", |
| "authors": [ |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenhui", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "" |
| }, |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Kunchukuttan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ACM Comput. Surv", |
| "volume": "", |
| "issue": "5", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3406095" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raj Dabre, Chenhui Chu, and Anoop Kunchukuttan. 2020. A survey of multilingual neural machine translation. ACM Comput. Surv., 53(5).", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Exploiting multilingualism through multistage finetuning for low-resource neural machine translation", |
| "authors": [ |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "" |
| }, |
| { |
| "first": "Atsushi", |
| "middle": [], |
| "last": "Fujita", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenhui", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1410--1416", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1146" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raj Dabre, Atsushi Fujita, and Chenhui Chu. 2019. Exploiting multilingualism through multistage fine- tuning for low-resource neural machine translation. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 1410- 1416, Hong Kong, China. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Multi-way, multilingual neural machine translation with a shared attention mechanism", |
| "authors": [ |
| { |
| "first": "Orhan", |
| "middle": [], |
| "last": "Firat", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "The 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "866--875", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/n16-1101" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Orhan Firat, Kyunghyun Cho, and Yoshua Bengio. 2016. Multi-way, multilingual neural machine trans- lation with a shared attention mechanism. In NAACL HLT 2016, The 2016 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, San Diego California, USA, June 12-17, 2016, pages 866-875. The Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "A new algorithm for data compression", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Gage", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "C Users J", |
| "volume": "12", |
| "issue": "2", |
| "pages": "23--38", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip Gage. 1994. A new algorithm for data compres- sion. C Users J., 12(2):23-38.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Iterative backtranslation for neural machine translation", |
| "authors": [ |
| { |
| "first": "Duy", |
| "middle": [], |
| "last": "Vu Cong", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| }, |
| { |
| "first": "Gholamreza", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Haffari", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2nd Workshop on Neural Machine Translation and Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "18--24", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-2703" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vu Cong Duy Hoang, Philipp Koehn, Gholamreza Haffari, and Trevor Cohn. 2018. Iterative back- translation for neural machine translation. In Pro- ceedings of the 2nd Workshop on Neural Machine Translation and Generation, pages 18-24, Mel- bourne, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Automatic evaluation of translation quality for distant language pairs", |
| "authors": [ |
| { |
| "first": "Hideki", |
| "middle": [], |
| "last": "Isozaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsutomu", |
| "middle": [], |
| "last": "Hirao", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| }, |
| { |
| "first": "Katsuhito", |
| "middle": [], |
| "last": "Sudoh", |
| "suffix": "" |
| }, |
| { |
| "first": "Hajime", |
| "middle": [], |
| "last": "Tsukada", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "944--952", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hideki Isozaki, Tsutomu Hirao, Kevin Duh, Katsuhito Sudoh, and Hajime Tsukada. 2010. Automatic eval- uation of translation quality for distant language pairs. In Proceedings of the 2010 Conference on Empirical Methods in Natural Language Process- ing, pages 944-952, Cambridge, MA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Google's multilingual neural machine translation system: Enabling zero-shot translation", |
| "authors": [ |
| { |
| "first": "Melvin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Thorat", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernanda", |
| "middle": [], |
| "last": "Vi\u00e9gas", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Wattenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Macduff", |
| "middle": [], |
| "last": "Hughes", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "339--351", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00065" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Melvin Johnson, Mike Schuster, Quoc V. Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Vi\u00e9gas, Martin Wattenberg, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2017. Google's multilingual neural machine translation system: En- abling zero-shot translation. Transactions of the As- sociation for Computational Linguistics, 5:339-351.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "SentencePiece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
| "authors": [ |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Richardson", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "66--71", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-2012" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taku Kudo and John Richardson. 2018. SentencePiece: A simple and language independent subword tok- enizer and detokenizer for neural text processing. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 66-71, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Ai4bharat-indicnlp corpus: Monolingual corpora and word embeddings for indic languages", |
| "authors": [ |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Kunchukuttan", |
| "suffix": "" |
| }, |
| { |
| "first": "Divyanshu", |
| "middle": [], |
| "last": "Kakwani", |
| "suffix": "" |
| }, |
| { |
| "first": "Satish", |
| "middle": [], |
| "last": "Golla", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "C" |
| ], |
| "last": "Gokul", |
| "suffix": "" |
| }, |
| { |
| "first": "Avik", |
| "middle": [], |
| "last": "Bhattacharyya", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Mitesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Pratyush", |
| "middle": [], |
| "last": "Khapra", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anoop Kunchukuttan, Divyanshu Kakwani, Satish Golla, Gokul N. C., Avik Bhattacharyya, Mitesh M. Khapra, and Pratyush Kumar. 2020. Ai4bharat- indicnlp corpus: Monolingual corpora and word embeddings for indic languages. CoRR, abs/2005.00085.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal ; Abdelrahman Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "7871--7880", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.703" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre- training for natural language generation, translation, and comprehension. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Multilingual denoising pre-training for neural machine translation", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiatao", |
| "middle": [], |
| "last": "Gu", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Xian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Marjan", |
| "middle": [], |
| "last": "Ghazvininejad", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "8", |
| "issue": "", |
| "pages": "726--742", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00343" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020. Multilingual denoising pre-training for neural machine translation. Transac- tions of the Association for Computational Linguis- tics, 8:726-742.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Jass: Japanesespecific sequence to sequence pre-training for neural machine translation", |
| "authors": [ |
| { |
| "first": "Zhuoyuan", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabien", |
| "middle": [], |
| "last": "Cromieres", |
| "suffix": "" |
| }, |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "" |
| }, |
| { |
| "first": "Haiyue", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Sadao", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "3683--3691", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhuoyuan Mao, Fabien Cromieres, Raj Dabre, Haiyue Song, and Sadao Kurohashi. 2020. Jass: Japanese- specific sequence to sequence pre-training for neu- ral machine translation. In Proceedings of The 12th Language Resources and Evaluation Confer- ence, pages 3683-3691, Marseille, France. Euro- pean Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Proceedings of the 6th Workshop on Asian Translation", |
| "authors": [ |
| { |
| "first": "Toshiaki", |
| "middle": [], |
| "last": "Nakazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenchen", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "" |
| }, |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Kunchukuttan", |
| "suffix": "" |
| }, |
| { |
| "first": "Nobushige", |
| "middle": [], |
| "last": "Doi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Oda", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Shantipriya", |
| "middle": [], |
| "last": "Parida", |
| "suffix": "" |
| }, |
| { |
| "first": "Isao", |
| "middle": [], |
| "last": "Goto", |
| "suffix": "" |
| }, |
| { |
| "first": "Hidaya", |
| "middle": [], |
| "last": "Mino", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Toshiaki Nakazawa, Chenchen Ding, Raj Dabre, Anoop Kunchukuttan, Nobushige Doi, Yusuke Oda, Ond\u0159ej Bojar, Shantipriya Parida, Isao Goto, and Hidaya Mino, editors. 2019. Proceedings of the 6th Workshop on Asian Translation. Association for Computational Linguistics, Hong Kong, China.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Overview of the 8th workshop on Asian translation", |
| "authors": [ |
| { |
| "first": "Toshiaki", |
| "middle": [], |
| "last": "Nakazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideki", |
| "middle": [], |
| "last": "Nakayama", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenchen", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "" |
| }, |
| { |
| "first": "Shohei", |
| "middle": [], |
| "last": "Higashiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideya", |
| "middle": [], |
| "last": "Mino", |
| "suffix": "" |
| }, |
| { |
| "first": "Isao", |
| "middle": [], |
| "last": "Goto", |
| "suffix": "" |
| }, |
| { |
| "first": "Win", |
| "middle": [ |
| "Pa" |
| ], |
| "last": "Pa", |
| "suffix": "" |
| }, |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Kunchukuttan", |
| "suffix": "" |
| }, |
| { |
| "first": "Shantipriya", |
| "middle": [], |
| "last": "Parida", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenhui", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "" |
| }, |
| { |
| "first": "Akiko", |
| "middle": [], |
| "last": "Eriguchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaori", |
| "middle": [], |
| "last": "Abe", |
| "suffix": "" |
| }, |
| { |
| "first": "Sadao", |
| "middle": [], |
| "last": "Oda", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 8th Workshop on Asian Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Toshiaki Nakazawa, Hideki Nakayama, Chenchen Ding, Raj Dabre, Shohei Higashiyama, Hideya Mino, Isao Goto, Win Pa Pa, Anoop Kunchukut- tan, Shantipriya Parida, Ond\u0159ej Bojar, Chenhui Chu, Akiko Eriguchi, Kaori Abe, and Sadao Oda, Yusuke Kurohashi. 2021. Overview of the 8th work- shop on Asian translation. In Proceedings of the 8th Workshop on Asian Translation, Bangkok, Thailand. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Overview of the 7th workshop on Asian translation", |
| "authors": [ |
| { |
| "first": "Toshiaki", |
| "middle": [], |
| "last": "Nakazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideki", |
| "middle": [], |
| "last": "Nakayama", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenchen", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "" |
| }, |
| { |
| "first": "Shohei", |
| "middle": [], |
| "last": "Higashiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideya", |
| "middle": [], |
| "last": "Mino", |
| "suffix": "" |
| }, |
| { |
| "first": "Isao", |
| "middle": [], |
| "last": "Goto", |
| "suffix": "" |
| }, |
| { |
| "first": "Win", |
| "middle": [ |
| "Pa" |
| ], |
| "last": "Pa", |
| "suffix": "" |
| }, |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Kunchukuttan", |
| "suffix": "" |
| }, |
| { |
| "first": "Shantipriya", |
| "middle": [], |
| "last": "Parida", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Sadao", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 7th Workshop on Asian Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "1--44", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Toshiaki Nakazawa, Hideki Nakayama, Chenchen Ding, Raj Dabre, Shohei Higashiyama, Hideya Mino, Isao Goto, Win Pa Pa, Anoop Kunchukut- tan, Shantipriya Parida, Ond\u0159ej Bojar, and Sadao Kurohashi. 2020. Overview of the 7th workshop on Asian translation. In Proceedings of the 7th Work- shop on Asian Translation, pages 1-44, Suzhou, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1073083.1073135" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1715--1725", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725, Berlin, Germany. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Neural abstractive text summarization with sequence-to-sequence models", |
| "authors": [ |
| { |
| "first": "Tian", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaser", |
| "middle": [], |
| "last": "Keneshloo", |
| "suffix": "" |
| }, |
| { |
| "first": "Naren", |
| "middle": [], |
| "last": "Ramakrishnan", |
| "suffix": "" |
| }, |
| { |
| "first": "Chandan", |
| "middle": [ |
| "K" |
| ], |
| "last": "Reddy", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "ACM/IMS Trans. Data Sci", |
| "volume": "2", |
| "issue": "1", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3419106" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tian Shi, Yaser Keneshloo, Naren Ramakrishnan, and Chandan K. Reddy. 2021. Neural abstractive text summarization with sequence-to-sequence models. ACM/IMS Trans. Data Sci., 2(1).", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Jiatao Gu, and Angela Fan. 2020. Multilingual translation with extensible multilingual pretraining and finetuning", |
| "authors": [ |
| { |
| "first": "Yuqing", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chau", |
| "middle": [], |
| "last": "Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "Xian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng-Jen", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Vishrav", |
| "middle": [], |
| "last": "Chaudhary", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Na- man Goyal, Vishrav Chaudhary, Jiatao Gu, and An- gela Fan. 2020. Multilingual translation with exten- sible multilingual pretraining and finetuning. CoRR, abs/2008.00401.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Transformers: State-of-the-art natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "Remi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Davison", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Shleifer", |
| "suffix": "" |
| }, |
| { |
| "first": "Clara", |
| "middle": [], |
| "last": "Patrick Von Platen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yacine", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Jernite", |
| "suffix": "" |
| }, |
| { |
| "first": "Canwen", |
| "middle": [], |
| "last": "Plu", |
| "suffix": "" |
| }, |
| { |
| "first": "Teven", |
| "middle": [ |
| "Le" |
| ], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sylvain", |
| "middle": [], |
| "last": "Scao", |
| "suffix": "" |
| }, |
| { |
| "first": "Mariama", |
| "middle": [], |
| "last": "Gugger", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Drame", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "38--45", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-demos.6" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, Remi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Trans- formers: State-of-the-art natural language process- ing. In Proceedings of the 2020 Conference on Em- pirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Deep AM-FM: Toolkit for Automatic Dialogue Evaluation", |
| "authors": [ |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Fernando", |
| "suffix": "" |
| }, |
| { |
| "first": "D'", |
| "middle": [], |
| "last": "Haro", |
| "suffix": "" |
| }, |
| { |
| "first": "Rafael", |
| "middle": [ |
| "E" |
| ], |
| "last": "Banchs", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Friedrichs", |
| "suffix": "" |
| }, |
| { |
| "first": "Haizhou", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "53--69", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/978-981-15-8395-7_5" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen Zhang, Luis Fernando D'Haro, Rafael E. Banchs, Thomas Friedrichs, and Haizhou Li. 2021. Deep AM-FM: Toolkit for Automatic Dialogue Evaluation, pages 53-69. Springer Singapore, Singapore.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Transfer learning for low-resource neural machine translation", |
| "authors": [ |
| { |
| "first": "Barret", |
| "middle": [], |
| "last": "Zoph", |
| "suffix": "" |
| }, |
| { |
| "first": "Deniz", |
| "middle": [], |
| "last": "Yuret", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "May", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1568--1575", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1163" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Barret Zoph, Deniz Yuret, Jonathan May, and Kevin Knight. 2016. Transfer learning for low-resource neural machine translation. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 1568-1575, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "text": "Monolingual corpora statistics.", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF3": { |
| "text": "Bilingual corpora statistics for the PMI dataset only.", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "text": "26.21 28.21 20.33 13.64 15.10 16.35 23.66 16.07 14.70 Many-to-one 20.06 27.72 30.86 24.66 21.79 22.66 23.04 27.61 21.90 23.39 MBART+ Unidirectional 21.37 33.65 35.80 29.29 26.55 25.45 25.81 34.34 24.72 27.76", |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>Model</td><td>Bn</td><td>Gu</td><td>Hi</td><td>Kn</td><td colspan=\"2\">Source Language Ml Mr</td><td>Or</td><td>Pa</td><td>Ta</td><td>Te</td></tr><tr><td colspan=\"11\">Unidirectional 11.27 MBART+ Many-to-one 23.89 33.53 36.20 30.87 28.23 27.88 27.93 35.81 26.90 28.77</td></tr><tr><td colspan=\"11\">Official Best Submission 31.87 43.98 46.93 40.34 38.38 36.64 37.06 46.39 36.13 39.80</td></tr><tr><td>Model</td><td>Bn</td><td>Gu</td><td>Hi</td><td>Kn</td><td colspan=\"2\">Target Language Ml Mr</td><td>Or</td><td>Pa</td><td>Ta</td><td>Te</td></tr><tr><td>Unidirectional</td><td colspan=\"5\">5.58 16.38 23.31 10.11 3.34</td><td>8.82</td><td colspan=\"3\">9.08 21.77 6.38</td><td>2.80</td></tr><tr><td>One-to-many</td><td colspan=\"10\">11.56 23.49 29.12 17.53 6.22 15.01 16.43 28.37 10.82 3.81</td></tr><tr><td>MBART+ Unidirectional</td><td colspan=\"10\">10.59 23.04 29.59 16.13 5.98 14.69 15.01 26.94 10.33 4.59</td></tr><tr><td>MBART+ One-to-many</td><td colspan=\"10\">12.84 24.26 30.18 18.22 6.51 16.38 16.69 29.15 11.42 4.20</td></tr><tr><td colspan=\"11\">Official Best Submission 15.97 27.80 38.65 21.30 15.49 20.42 20.15 33.43 14.43 16.85</td></tr></table>" |
| } |
| } |
| } |
| } |