| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T04:34:16.302638Z" |
| }, |
| "title": "HW-TSC's Participation in the WAT 2020 Indic Languages Multilingual Task", |
| "authors": [ |
| { |
| "first": "Zhengzhe", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "yuzhengzhe@huawei.com" |
| }, |
| { |
| "first": "Zhanglin", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "wuzhanglin2@huawei.com" |
| }, |
| { |
| "first": "Xiaoyu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "chenxiaoyu35@huawei.com" |
| }, |
| { |
| "first": "Daimeng", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "weidaimeng@huawei.com" |
| }, |
| { |
| "first": "Hengchao", |
| "middle": [], |
| "last": "Shang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "shanghengchao@huawei.com" |
| }, |
| { |
| "first": "Jiaxin", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "guojiaxin1@huawei.com" |
| }, |
| { |
| "first": "Zongyao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "lizongyao@huawei.com" |
| }, |
| { |
| "first": "Minghan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "wangminghan@huawei.com" |
| }, |
| { |
| "first": "Liangyou", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "liliangyou@huawei.com" |
| }, |
| { |
| "first": "Lizhi", |
| "middle": [], |
| "last": "Lei", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "leilizhi@huawei.com" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "yanghao30@huawei.com" |
| }, |
| { |
| "first": "Ying", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "qinying@huawei.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes our work in the WAT 2020 Indic Multilingual Translation Task. We participated in all 7 language pairs (En\u2194Bn/Hi/Gu/Ml/Mr/Ta/Te) in both directions under the constrained condition-using only the officially provided data. Using transformer as a baseline, our Multi\u2192En and En\u2192Multi translation systems achieve the best performances. Detailed data filtering and data domain selection are the keys to performance enhancement in our experiment, with an average improvement of 2.6 BLEU scores for each language pair in the En\u2192Multi system and an average improvement of 4.6 BLEU scores regarding the Multi\u2192En. In addition, we employed language independent adapter to further improve the system performances. Our submission obtains competitive results in the final evaluation.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes our work in the WAT 2020 Indic Multilingual Translation Task. We participated in all 7 language pairs (En\u2194Bn/Hi/Gu/Ml/Mr/Ta/Te) in both directions under the constrained condition-using only the officially provided data. Using transformer as a baseline, our Multi\u2192En and En\u2192Multi translation systems achieve the best performances. Detailed data filtering and data domain selection are the keys to performance enhancement in our experiment, with an average improvement of 2.6 BLEU scores for each language pair in the En\u2192Multi system and an average improvement of 4.6 BLEU scores regarding the Multi\u2192En. In addition, we employed language independent adapter to further improve the system performances. Our submission obtains competitive results in the final evaluation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "This paper describes our work in the WAT 2020 Indic Multilingual Translation Task (Nakazawa et al., 2020) . Our team (Team ID: HW-TSC) participated in all seven language pairs (En\u2194Bn/Hi/Gu/Ml/Mr/Ta/Te) by training Multi\u2192En and En\u2192Multi multilingual translation models. Based on previous works, we mainly focus on exploiting fine-grained data filtering and domain data selection techniques to enhance system performance.", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 105, |
| "text": "(Nakazawa et al., 2020)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Multistep filtering is conducted to sort out the high-quality subset for training. Several other strategies, including Back-Translation (Edunov et al., 2018) , Tagged Back-Translation , Joint Training (Zhang et al., 2018) , Fine-Tuning (Sun et al., 2019) , Ensemble and Adapter Fine-Tuning are employed and tested in our experiments. sacreBLEU (Post, 2018) is used to evaluation the system performance.", |
| "cite_spans": [ |
| { |
| "start": 136, |
| "end": 157, |
| "text": "(Edunov et al., 2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 201, |
| "end": 221, |
| "text": "(Zhang et al., 2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 236, |
| "end": 254, |
| "text": "(Sun et al., 2019)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 344, |
| "end": 356, |
| "text": "(Post, 2018)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This section describes the size and source of the dataset as well as our data filtering techniques.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We use PM India and CVIT-PIB datasets for the training of the 7 language pairs. PM India (dataset size: 255k) is a high-quality alignment corpus already being filtered while CVIT-PIB (dataset size: 478k) contains mainly multilingual hybrid data that requires alignment. Table 1 shows the data distribution of 7 language pairs. Apart from the two multilingual datasets, 700k monolingual data provided by the organizer is also used in our experiments.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 270, |
| "end": 277, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Source", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Our data pre-processing procedures include:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Pre-processing", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u2022 Convert full-width text to half-width text;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Pre-processing", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u2022 De-duplicate the data;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Pre-processing", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u2022 Remove text which the source or target side is empty;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Pre-processing", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u2022 Perform language identification (Joulin et al., 2016b,a) on the dataset and remove texts with undesired tags;", |
| "cite_spans": [ |
| { |
| "start": 34, |
| "end": 58, |
| "text": "(Joulin et al., 2016b,a)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Pre-processing", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u2022 Employ multilingual sentencepiece model (SPM) with regularization (Kudo and Richardson, 2018; Kudo, 2018) for all language pairs;", |
| "cite_spans": [ |
| { |
| "start": 68, |
| "end": 95, |
| "text": "(Kudo and Richardson, 2018;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 96, |
| "end": 107, |
| "text": "Kudo, 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Pre-processing", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u2022 Filter the corpora with fast-align (Dyer et al., 2013) ;", |
| "cite_spans": [ |
| { |
| "start": 37, |
| "end": 56, |
| "text": "(Dyer et al., 2013)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Pre-processing", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u2022 Delete extra-long sentences with more than 100 sub-tokens.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Pre-processing", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "It should be noted that we trained the hybrid SPM in conjunction with English and 7 other indic languages. In order to ensure that each language has equivalent vocabulary size, we averaged the training data for each language when training SPM, namely, over-sampling low resource languages. In consideration of the small dataset size, we did not perform strict data cleansing strategy at the beginning but merely observed poor alignment results regarding the CVIT-PIB dataset compared with the PM India dataset. So we further use Fast-align on the dataset to improve the data quality, although a quite large amount of data was removed during this process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Pre-processing", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "During the experiment, we observed that the system trained only with the PM India dataset performed better than the system trained jointly with PM India and CVIT-PIB datasets. We believe the reason is that the domain of the PM India dataset is much more align with that of the test set. So we further filtered the CVIT-PIB dataset to select the \"in-domain\" data. Inspired by curriculum learning ideas , we exploited the Static Data Selection strategy. We regarded the PM India dataset and the dev set as \"in-domain\" and tried to sort out \"in-domain\" data in the CVIT-PIB dataset with a trained classifier. First we use PM India dataset combine CVIT-PIB dataset to train a base model. Then we sampled a fixed number of sentences (e.g. 30k) from the source side (EN) of the PM India dataset plus the dev sets and labeled them as IN-domain. Then we sampled the same amount of sentence from the CVIT-PIB dataset and labeled then as OUT-domain. We trained a Fasttext (Bojanowski et al., 2017) classifier on the sampled dataset to score sentences in the CVIT-PIB with the classification probability of P (y = InDomain|x) to retrieve the top-k bi-text pairs. Where k is set to 5k in our experiment. Not that even the probability score is lower than 0.5, we still kept the sentence pairs as long as their ranks are within the top-k. Then we used the \"in-domain\" CVIT-PIB data and PM India data to fine-tune the base model we trained and observed better performances.", |
| "cite_spans": [ |
| { |
| "start": 962, |
| "end": 987, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Selection", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "From the experiment, we find that data selection is quite effective compared to using entire CVIT-PIB dataset on both En\u2192Multi and Multi\u2192En.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Selection", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "This section describes our system used in the WAT 2020 Indic Multilingual Translation Task. The following introduced strategies are tested sequentially and our experimental results regarding each strategy is listed in each part.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Overview", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Language PM India CVIT-PIB Mono En-Bn 26K 48K 114k En-Gu 44K 29K 121k En-Hi 52K 195K 155k En-Ml 29K 31K 80k En-Mr 31K 80K 116k En-Ta 35K 87K 87k En-Te 35K 5K 109k Total 255K 478K", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Overview", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Transformer (Vaswani et al., 2017a) has been widely used for machine translation in recent years, which has achieved good performance even with the most primitive architecture without much modifications. Therefore, we choose to start from Transformer-Deep and consider it as a baseline, which is the model with deeper encoder version proposed in (Sun et al., 2019) , with 35 encoder layers and 3 docoder layers, 512 hidden size and 4096 batch size. We used the Adam optimizer (Kingma and Ba, 2014) with \u03b21 = 0.9, \u03b22 = 0.98. We used the same warmup and decay strategy for learning rate as (Vaswani et al., 2017b) , with 4,000 warmup steps. During training, we employed label smoothing value of 0.1 (Szegedy et al., 2016) . For evaluation, we used beam search with a beam size of 4 and length penalty \u03b1 = 0.6 (Wu et al., 2016) . Our models are implemented with THUMT (Zhang et al., 2017) , and trained on a platform with 8 V100 GPUs. We train models for 100k steps and average the last 6 checkpoints for evaluation.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 35, |
| "text": "(Vaswani et al., 2017a)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 346, |
| "end": 364, |
| "text": "(Sun et al., 2019)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 588, |
| "end": 611, |
| "text": "(Vaswani et al., 2017b)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 697, |
| "end": 719, |
| "text": "(Szegedy et al., 2016)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 807, |
| "end": 824, |
| "text": "(Wu et al., 2016)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 865, |
| "end": 885, |
| "text": "(Zhang et al., 2017)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For this Indic Multilingual Translation Task, we exploited different multilingual training strategies regarding multilingual training on the basis of transformer. We trained the hybrid SPM model in conjunction with English and 7 indic languages as the shared word segmentation system for all language pairs. We kept the vocabulary within 30k, which included all tokens of all 8 languages (En/Bn/Hi/Gu/Ml/Mr/Ta/Te Table 2 shows that Multi\u2192Multi performs worse than the other strategy and thus we only consider the separate En\u2192Multi and Multi\u2192En models in the following experiments. We believe that a Multi\u2192Multi model contains too many languages pairs (14 in this case) so conflicts and confusions may occur among language pairs in different directions. Regarding our En\u2192Multi model, we added tags \"2XX\" (XX indicates the target language, e.g. 2bn) at the beginning of the source sentence for each bilingual sentence pair, a strategy used in (Johnson et al., 2017 ). Then we mixed all data for training. Due to the limitations of the multilingual translation model, once the model is trained, other further training methods (fine-tuning, etc.) might be difficult to improve the performance of the model, so we will introduce the fine-tuning method we use below to improve each language pair without affecting the performance of others.", |
| "cite_spans": [ |
| { |
| "start": 941, |
| "end": 962, |
| "text": "(Johnson et al., 2017", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 413, |
| "end": 420, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Multilingual Strategy", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Our experiment demonstrates that simply combining all bilingual data altogether does not produce gains to model quality, as described in the previous section as well in Table 3 and Table 4 that adding the whole CVIT-PIB dataset negatively influenced the model performance with respect to most of the language pairs. Two strategies regarding data augmentation are leveraged:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 169, |
| "end": 188, |
| "text": "Table 3 and Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Augmentation", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 Data filtering: To address the poor-quality CVIT-PIB data, as we introduced in the previous section, we used fast-align to further filter the dataset despite a significant reduction of the training data size. This strategy works as we can see from Table 3 and Table 4 that the BLEU scores of several languages achieve increases of more than 0.5 points.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 250, |
| "end": 257, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 262, |
| "end": 269, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Augmentation", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 Domain transfer: Static Data Selection is leveraged to filter \"in-domain\" data. As we introduced in the previous section, we regarded the domain of PM India dataset more align with the test set and CVIT-PIB more like \"out-of-domain\" data. We use the techniques described before to select more \"in-domain\" data in the CVIT-PIB dataset and combined the filtered CVIT-PIB data and PM India data to fine-tuning the models. Another key issue constraining the system performance is the imbalanced data sizes for each language.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Augmentation", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In the ideal setting, the amount of data in each language is supposed to be equal. En-Hi is regarded as a high-resource language in this experiment as the size of its training data far exceeding that of other language in this task. Therefore, we over-sampled the training data of other low-resource language data to ensure their training data size are balanced. This strategy led to a huge improvement in BLEU scores, as shown in Table 3 and Table 4 : an average improvement of 2.6 BLEU scores for En\u2192Multi model and an average improvement of 4.6 BLEU scores for Multi\u2192En model.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 430, |
| "end": 449, |
| "text": "Table 3 and Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Augmentation", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We trained four models in each direction with different seeds and ensembed these models. Ensemble also contributed to the increase of BLEU scores in our experiment. Particularly, we observed an 2.7 improvement of BLEU with regard to En\u2192Gu.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ensemble", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Previous works demonstrate that fine-tuning a model with in-domain data could effectively improve the performance of model. However, due to the limitations of the multilingual translation model, once the model is trained, when fine tuning one of the language pairs, the performance of others will go worse. Thanks to the finding of Adapter , we are able to fine-tune each language pair without impacting the performance of others. In the experiment, we set the adapter size to 128 and fine-tuned the model on the dev set for each language pair in En\u2192Multi with 3,000 tokens per batch for one epoch, successfully achieving 1.02 of BLEU improvements on En\u2192Ta and 1.8 of BLEU improvements on En\u2192Te. However, we do not gain any improvement for other language pairs. Due to time restriction and heavy workload, we did not fine-tune the Multi\u2192En model. One should noticed that whether En\u2192Multi or Multi\u2192en are multilingual translation models, finetuning cannot be used usually, because the improvement of a one language pair and will hurt others' performance. Through adapter fine-tuning, we can guarantee that fine-tuning one language pair does not affect the quality of other language pairs in the model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language independence Adapter Fine-tuning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "This section presents the experimental results for each direction of all three language pairs in Table 3 and Table 4 , where the contribution of strategies introduced in previous sections are listed in each row. In this competition, among the 14 directions of the 7 Indic language pairs (En\u2194Bn/Hi/Gu/Ml/Mr/Ta/Te), our submission ranks the first place in 13 language directions while En\u2192Hi even achieve an improvement of 10.5 points in term of BLEU when comparing with the baseline.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 97, |
| "end": 116, |
| "text": "Table 3 and Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Result", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Here are several findings worthy of sharing during our experiments:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 In this experiment, we also used Back Translation and Tagged Back-Translation, but only saw undesired results. The performance of most language pairs became even worse and the BLEU scores of some languages even reduced more than 10 points. We think data domain may be responsible for the BLEU reduction, similar to the situation when we adding CVIT-PIB data for training but only gained worse results. Therefore we give up Back-Translation in our experiment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 Both En\u2192Multi and Multi\u2192En models are better than the Multi\u2192Multi model, which is the same as the result of mainstream viewpoint. Although many believe that the reason is insufficient model capacity of a Multi\u2192Multi model, we think another possible reason is language confusion. This experiment contains 14 language pairs (two-way) while the data size is under one million. So the transformer capability is certainly enough. But since there are 14 languages in one model, there may be conflicts and confusion between language pairs in different directions, especially when they come from the same language family. Off-target (Zhang et al., 2020) could be a key issue, which we will further investigate in our future work.", |
| "cite_spans": [ |
| { |
| "start": 627, |
| "end": 647, |
| "text": "(Zhang et al., 2020)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 We find that data selection plays a more important role in our experiment when comparing with other training strategies. We observe that the domain of a small dataset is usually too narrow so the introduction of other data source will cause a great shift on domain, thereby affecting the performance of models on dev/test sets. For example, En\u2192Bn can reach a BLEU score of 15.61 with only PM India data, but only 7.27 after adding the CVIT-PIB data. So we refer to the idea of Static Data Selection in the curriculum learning and ensured little domain-shifting while training data size increases, thus the system performance enhances.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "This paper presents the submissions by HW-TSC on the WAT 2020 Indic Multilingual Translation Task. We perform experiments with a series of pre-processing and training strategies. The effectiveness of each strategy is demonstrated. Our submission finally achieves competitive result in the evaluation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Massively multilingual neural machine translation in the wild: Findings and challenges", |
| "authors": [ |
| { |
| "first": "Naveen", |
| "middle": [], |
| "last": "Arivazhagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Bapna", |
| "suffix": "" |
| }, |
| { |
| "first": "Orhan", |
| "middle": [], |
| "last": "Firat", |
| "suffix": "" |
| }, |
| { |
| "first": "Dmitry", |
| "middle": [], |
| "last": "Lepikhin", |
| "suffix": "" |
| }, |
| { |
| "first": "Melvin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Mia", |
| "middle": [ |
| "Xu" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Cherry", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.05019" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naveen Arivazhagan, Ankur Bapna, Orhan Firat, Dmitry Lepikhin, Melvin Johnson, Maxim Krikun, Mia Xu Chen, Yuan Cao, George Foster, Colin Cherry, et al. 2019. Massively multilingual neural machine translation in the wild: Findings and chal- lenges. arXiv preprint arXiv:1907.05019.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Simple, scalable adaptation for neural machine translation", |
| "authors": [ |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Bapna", |
| "suffix": "" |
| }, |
| { |
| "first": "Naveen", |
| "middle": [], |
| "last": "Arivazhagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Orhan", |
| "middle": [], |
| "last": "Firat", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.08478" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankur Bapna, Naveen Arivazhagan, and Orhan Firat. 2019. Simple, scalable adaptation for neural ma- chine translation. arXiv preprint arXiv:1909.08478.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Trans. Assoc. Comput. Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "135--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Trans. Assoc. Comput. Lin- guistics, 5:135-146.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A simple, fast, and effective reparameterization of IBM model 2", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Chahuneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Human Language Technologies: Conference of the North American Chapter of the Association of Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "644--648", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Dyer, Victor Chahuneau, and Noah A. Smith. 2013. A simple, fast, and effective reparameteriza- tion of IBM model 2. In Human Language Technolo- gies: Conference of the North American Chapter of the Association of Computational Linguistics, Pro- ceedings, June 9-14, 2013, Westin Peachtree Plaza Hotel, Atlanta, Georgia, USA, pages 644-648.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Understanding back-translation at scale", |
| "authors": [ |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "489--500", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/d18-1045" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sergey Edunov, Myle Ott, Michael Auli, and David Grangier. 2018. Understanding back-translation at scale. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, Brussels, Belgium, October 31 -November 4, 2018, pages 489-500.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Google's multilingual neural machine translation system: Enabling zero-shot translation", |
| "authors": [ |
| { |
| "first": "Melvin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Thorat", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernanda", |
| "middle": [], |
| "last": "Vi\u00e9gas", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Wattenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Macduff", |
| "middle": [], |
| "last": "Hughes", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "339--351", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00065" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Melvin Johnson, Mike Schuster, Quoc V. Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Vi\u00e9gas, Martin Wattenberg, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2017. Google's multilingual neural machine translation system: En- abling zero-shot translation. Transactions of the As- sociation for Computational Linguistics, 5:339-351.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Matthijs Douze, H\u00e9rve J\u00e9gou, and Tomas Mikolov. 2016a. Fasttext.zip: Compressing text classification models", |
| "authors": [ |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1612.03651" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, Matthijs Douze, H\u00e9rve J\u00e9gou, and Tomas Mikolov. 2016a. Fasttext.zip: Compressing text classification models. arXiv preprint arXiv:1612.03651.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Bag of tricks for efficient text classification", |
| "authors": [ |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1607.01759" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2016b. Bag of tricks for efficient text classification. arXiv preprint arXiv:1607.01759.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Subword regularization: Improving neural network translation models with multiple subword candidates", |
| "authors": [ |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, ACL 2018", |
| "volume": "1", |
| "issue": "", |
| "pages": "66--75", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1007" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taku Kudo. 2018. Subword regularization: Improving neural network translation models with multiple sub- word candidates. In Proceedings of the 56th Annual Meeting of the Association for Computational Lin- guistics, ACL 2018, Melbourne, Australia, July 15- 20, 2018, Volume 1: Long Papers, pages 66-75.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Sentencepiece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
| "authors": [ |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Richardson", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "2018", |
| "issue": "", |
| "pages": "66--71", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/d18-2012" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taku Kudo and John Richardson. 2018. Sentencepiece: A simple and language independent subword tok- enizer and detokenizer for neural text processing. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, EMNLP 2018: System Demonstrations, Brussels, Belgium, October 31 -November 4, 2018, pages 66-71.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Overview of the 7th workshop on Asian translation", |
| "authors": [ |
| { |
| "first": "Toshiaki", |
| "middle": [], |
| "last": "Nakazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideki", |
| "middle": [], |
| "last": "Nakayama", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenchen", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideya", |
| "middle": [], |
| "last": "Mino", |
| "suffix": "" |
| }, |
| { |
| "first": "Isao", |
| "middle": [], |
| "last": "Goto", |
| "suffix": "" |
| }, |
| { |
| "first": "Win", |
| "middle": [ |
| "Pa" |
| ], |
| "last": "Pa", |
| "suffix": "" |
| }, |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Kunchukuttan", |
| "suffix": "" |
| }, |
| { |
| "first": "Shantipriya", |
| "middle": [], |
| "last": "Parida", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Sadao", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 7th Workshop on Asian Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Toshiaki Nakazawa, Hideki Nakayama, Chenchen Ding, Raj Dabre, Hideya Mino, Isao Goto, Win Pa Pa, Anoop Kunchukuttan, Shantipriya Parida, Ond\u0159ej Bojar, and Sadao Kurohashi. 2020. Overview of the 7th workshop on Asian transla- tion. In Proceedings of the 7th Workshop on Asian Translation, Suzhou, China. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A call for clarity in reporting BLEU scores", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "186--191", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6319" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186- 191, Belgium, Brussels. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Baidu neural machine translation systems for WMT19", |
| "authors": [ |
| { |
| "first": "Meng", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Bojian", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhongjun", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Conference on Machine Translation, WMT 2019", |
| "volume": "2", |
| "issue": "", |
| "pages": "374--381", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/w19-5341" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Meng Sun, Bojian Jiang, Hao Xiong, Zhongjun He, Hua Wu, and Haifeng Wang. 2019. Baidu neural ma- chine translation systems for WMT19. In Proceed- ings of the Fourth Conference on Machine Transla- tion, WMT 2019, Florence, Italy, August 1-2, 2019 -Volume 2: Shared Task Papers, Day 1, pages 374- 381.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Rethinking the inception architecture for computer vision", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Szegedy", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Vanhoucke", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Ioffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [], |
| "last": "Shlens", |
| "suffix": "" |
| }, |
| { |
| "first": "Zbigniew", |
| "middle": [], |
| "last": "Wojna", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "2818--2826", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. 2016. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE conference on computer vi- sion and pattern recognition, pages 2818-2826.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaiser", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \\Lukasz Kaiser, and Illia Polosukhin. 2017a. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017b. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Dynamically composing domain-data selection with clean-data selection by\" co-curricular learning\" for neural machine translation", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Isaac", |
| "middle": [], |
| "last": "Caswell", |
| "suffix": "" |
| }, |
| { |
| "first": "Ciprian", |
| "middle": [], |
| "last": "Chelba", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.01130" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Wang, Isaac Caswell, and Ciprian Chelba. 2019. Dynamically composing domain-data selection with clean-data selection by\" co-curricular learning\" for neural machine translation. arXiv preprint arXiv:1906.01130.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Google's neural machine translation system", |
| "authors": [ |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Norouzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Qin", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Klaus", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Bridging the gap between human and machine translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.08144" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. 2016. Google's neural machine translation system: Bridging the gap between hu- man and machine translation. arXiv preprint arXiv:1609.08144.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Improving massively multilingual neural machine translation and zero-shot translation", |
| "authors": [ |
| { |
| "first": "Biao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| }, |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2004.11867" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Biao Zhang, Philip Williams, Ivan Titov, and Rico Sennrich. 2020. Improving massively multilingual neural machine translation and zero-shot translation. arXiv preprint arXiv:2004.11867.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "THUMT: an open source toolkit for neural machine translation", |
| "authors": [ |
| { |
| "first": "Jiacheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanzhuo", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Shiqi", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Huan-Bo", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiacheng Zhang, Yanzhuo Ding, Shiqi Shen, Yong Cheng, Maosong Sun, Huan-Bo Luan, and Yang Liu. 2017. THUMT: an open source toolkit for neural machine translation. CoRR, abs/1706.06415.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Joint training for neural machine translation models with monolingual data", |
| "authors": [ |
| { |
| "first": "Zhirui", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shujie", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Enhong", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1803.00353" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhirui Zhang, Shujie Liu, Mu Li, Ming Zhou, and En- hong Chen. 2018. Joint training for neural machine translation models with monolingual data. arXiv preprint arXiv:1803.00353.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "html": null, |
| "num": null, |
| "text": "Data source of Indic Multilingual Translation Task", |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "num": null, |
| "text": "BLEU score of Multilingual strategy for En\u2192Multi, Multi\u2192En and Multi\u2192Multi. for line En\u2192Multi / Multi\u2192En, En\u2192XX inferenced by En\u2192Multi model and XX\u2192En inferenced by Multi\u2192En model both methods by training three models with the PM India dataset. The results listed in", |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "html": null, |
| "num": null, |
| "text": "+3.44) 10.74 (+0.33) 16.63 (+0.65) 3.23 (+0.36) 9.39 (+0.44) 4.17 (-0.22) 3.50 (+1.45) +0.92 + Domain Transfer 18.30 (+7.59) 11.9 (+1.16) 22.12 (+5.49) 4.10 (+0.87) 10.54 (+1.15) 5.64 (+1.47) 4.22 (+0.72) +2.64 + Ensemble 18.47 (+0.17) 14.64 (+2.74) 23.13 (+1.01) 4.57 (+0.47) 11.32 (+0.78) 6.17 (+0.53) 4.64 (+0.42) +0.87", |
| "content": "<table><tr><td/><td>En\u2192Bn</td><td>En\u2192Gu</td><td>En\u2192Hi</td><td>En\u2192Ml</td><td>En\u2192Mr</td><td>En\u2192Ta</td><td>En\u2192Te</td><td>Avg</td></tr><tr><td>PM India Data</td><td>15.61</td><td>10.6</td><td>19.03</td><td>3.55</td><td>8.03</td><td>4.59</td><td>3.63</td></tr><tr><td>+ CVIT-PIB Data</td><td>7.27 (-8.34)</td><td colspan=\"4\">11.07 (+0.74) 15.98 (-3.05) 2.87 (-0.68) 8.95 (+0.92)</td><td>4.39 (-0.2)</td><td colspan=\"2\">2.05 (-1.58) -1.74</td></tr><tr><td colspan=\"2\">+ Fast-align 10.71 (+ Adapter Fine-tuning -</td><td>-</td><td>-</td><td>-</td><td>-</td><td colspan=\"3\">7.19 (+1.02) 6.49 (+1.85) +1.44</td></tr><tr><td>2020 Submission</td><td>19.64</td><td>14.66</td><td>24.48</td><td>4.60</td><td>11.52</td><td>7.21</td><td>6.93</td></tr><tr><td>Official Baseline</td><td>15.03</td><td>9.73</td><td>13.96</td><td>6.32</td><td>8.84</td><td>4.33</td><td>5.20</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "html": null, |
| "num": null, |
| "text": "The experimental result of En\u2192Multi", |
| "content": "<table><tr><td/><td>Bn\u2192En</td><td>Gu\u2192En</td><td>Hi\u2192En</td><td>Ml\u2192En</td><td>Mr\u2192En</td><td>Ta\u2192En</td><td>Te\u2192En</td><td>Avg</td></tr><tr><td>PM India Data</td><td>16.97</td><td>18.40</td><td>18.60</td><td>12.58</td><td>15.42</td><td>12.56</td><td>16.97</td></tr><tr><td>+ CVIT-PIB Data</td><td colspan=\"8\">14.98 (-1.99) 19.39 (+0.99) 18.97 (+0.37) 14.59 (+2.01) 17.13 (+1.71) 14.17 (+1.61) 11.74 (-5.23) -0.08</td></tr><tr><td>+ Fast-align</td><td colspan=\"8\">15.89 (+0.91) 21.26 (+1.87) 22.70 (+3.73) 14.26 (-0.33) 18.61 (+1.48) 14.58 (+0.41) 11.63 (-0.11) +1.14</td></tr><tr><td colspan=\"9\">+ Domain Transfer 21.52 (+5.63) 27.33 (+6.07) 26.96 (+4.26) 18.90 (+4.64) 22.88 (+4.27) 16.12 (+1.54) 17.32 (+5.69) +4.58</td></tr><tr><td>+ Ensemble</td><td colspan=\"3\">22.99 (+1.47) 29.91 (+2.58) 28.26 (+1.3)</td><td colspan=\"5\">20.63 (+1.73) 23.84 (+0.96) 19.98 (+3.84) 18.74 (+1.42) +1.90</td></tr><tr><td>2020 Submission</td><td>23.38</td><td>30.26</td><td>28.51</td><td>20.87</td><td>24.05</td><td>20.16</td><td>19.03</td></tr><tr><td>Official Baseline</td><td>21.80</td><td>24.48</td><td>25.68</td><td>15.46</td><td>21.15</td><td>18.37</td><td>15.44</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "html": null, |
| "num": null, |
| "text": "The experimental result of Multi\u2192En", |
| "content": "<table/>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |