| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T04:34:17.901932Z" |
| }, |
| "title": "Optimal Word Segmentation for Neural Machine Translation into Dravidian Languages", |
| "authors": [ |
| { |
| "first": "Prajit", |
| "middle": [], |
| "last": "Dhar", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Groningen", |
| "location": {} |
| }, |
| "email": "p.dhar@rug.nl" |
| }, |
| { |
| "first": "Arianna", |
| "middle": [], |
| "last": "Bisazza", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Groningen", |
| "location": {} |
| }, |
| "email": "a.bisazza@rug.nl" |
| }, |
| { |
| "first": "Gertjan", |
| "middle": [], |
| "last": "Van Noord", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Groningen", |
| "location": {} |
| }, |
| "email": "g.j.m.van.noord@rug.nl" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Dravidian languages, such as Kannada and Tamil, are notoriously difficult to translate by state-of-the-art neural models. This stems from the fact that these languages are morphologically very rich as well as being lowresourced. In this paper, we focus on subword segmentation and evaluate Linguistically Motivated Vocabulary Reduction (LMVR) against the more commonly used SentencePiece (SP) for the task of translating from English into four different Dravidian languages. Additionally we investigate the optimal subword vocabulary size for each language. We find that SP is the overall best choice for segmentation, and that larger subword vocabulary sizes lead to higher translation quality.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Dravidian languages, such as Kannada and Tamil, are notoriously difficult to translate by state-of-the-art neural models. This stems from the fact that these languages are morphologically very rich as well as being lowresourced. In this paper, we focus on subword segmentation and evaluate Linguistically Motivated Vocabulary Reduction (LMVR) against the more commonly used SentencePiece (SP) for the task of translating from English into four different Dravidian languages. Additionally we investigate the optimal subword vocabulary size for each language. We find that SP is the overall best choice for segmentation, and that larger subword vocabulary sizes lead to higher translation quality.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Dravidian languages are an important family of languages spoken by about 250 million of people primarily located in Southern India and Sri Lanka (Steever, 2019) . Kannada (KN), Malayalam (MA), Tamil (TA) and Telugu (TE) are the four most spoken Dravidian languages with approximately 47, 34, 71 and 79 million native speakers, respectively. Together, they account for 93% of all Dravidian language speakers. While Kannada, Malayalam and Tamil are classified as South Dravidian languages, Telugu is a part of South-Central Dravidian languages. All four languages are SOV (Subject-Object-Verb) languages with free word order. They are highly agglutinative and inflectionally rich languages. Additionally, each language has a different writing system. Table 1 presents an English sentence example and its Dravidianlanguage translations.", |
| "cite_spans": [ |
| { |
| "start": 145, |
| "end": 160, |
| "text": "(Steever, 2019)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 749, |
| "end": 756, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The highly complex morphology of the Dravidian languages under study is illustrated if we compare translated sentence pairs. The analysis of our parallel datasets (section 4.1, Table 3 ) shows for instance that an average English sentence contains almost ten times as many words as its Kannada equivalent. For the other three languages, the ratio is a bit smaller but the difference with English remains considerable. This indicates why it is important to consider word segmentation algorithms as part of the translation system.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 177, |
| "end": 184, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper we describe our work on Neural Machine Translation (NMT) from English into the Dravidian languages Kannada, Malayalam, Tamil and Telugu. We investigated the optimal translation settings for the pairs and in particular looked at the effect of word segmentation. The aim of the paper is to answer the following research questions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Does LMVR, a linguistically motivated word segmentation algorithm, outperform the purely data-driven SentencePiece?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 What is the optimal subword dictionary size for translating from English into these Dravidian languages?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In what follows, we review the relevant previous work (Sect. 2), introduce the two segmenters (Sect. 3), describe the experimental setup (Sect. 4), and present our answers to the above research questions (Sect. 5).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Statistical Machine Translation One of the earliest automatic translation systems for English into a Dravidian language was the English\u2192Tamil system by Germann (2001) . They trained a hybrid rule-based/statistical machine translation system that was trained on only 5k English-Tamil parallel sentences. Ramasamy et al. (2012) sentences (henceforth referred to as UFAL). They also reported that applying pre-processing steps involving morphological rules based on Tamil suffixes improved the BLEU score of the baseline model to a small extent (from 9.42 to 9.77). For the Indic languages multilingual tasks of WAT-2018, the Phrasal-based SMT system of Ojha et al. (2018) with a BLEU score of 30.53. Subsequent papers also focused on SMT systems for Malayalam and Telugu with some notable work including: (Anto and Nisha, 2016; Bhattacharyya, 2017, 2018) for Malayalam and (Lingam et al., 2014; Yadav and Lingam, 2017) for Telugu.", |
| "cite_spans": [ |
| { |
| "start": 152, |
| "end": 166, |
| "text": "Germann (2001)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 303, |
| "end": 325, |
| "text": "Ramasamy et al. (2012)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 803, |
| "end": 825, |
| "text": "(Anto and Nisha, 2016;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 826, |
| "end": 852, |
| "text": "Bhattacharyya, 2017, 2018)", |
| "ref_id": null |
| }, |
| { |
| "start": 857, |
| "end": 892, |
| "text": "Malayalam and (Lingam et al., 2014;", |
| "ref_id": null |
| }, |
| { |
| "start": 893, |
| "end": 916, |
| "text": "Yadav and Lingam, 2017)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Translation Systems", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "On the neural machine translation (NMT) side, there have been a handful of NMT systems trained on English\u2192Tamil. On the aforementioned Indic languages multilingual tasks of WAT-2018 , Sen et al. (2018 , Dabre et al. (2018) reported only 11.88 and 18.60 BLEU scores, respectively, for English\u2192Tamil. The poor performance of these systems compared to the 30.53 BLEU score of the SMT system (Ojha et al., 2018) showed that those NMT systems were not yet suitable for translating into the morphologically rich Tamil.", |
| "cite_spans": [ |
| { |
| "start": 173, |
| "end": 181, |
| "text": "WAT-2018", |
| "ref_id": null |
| }, |
| { |
| "start": 182, |
| "end": 200, |
| "text": ", Sen et al. (2018", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 203, |
| "end": 222, |
| "text": "Dabre et al. (2018)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 388, |
| "end": 407, |
| "text": "(Ojha et al., 2018)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Machine Translation", |
| "sec_num": null |
| }, |
| { |
| "text": "However, the following year, Philip et al. (2019) outperformed Ramasamy et al. (2012) on the UFAL dataset with a BLEU score of 13.05 (the previous best score on this test set was 9.77). They report that techniques such as domain adaptation and back-translation can make training NMT systems on low-resource languages possible. Similar findings was also reported by Ramesh et al. (2020) for Tamil and Dandapat and Federmann (2018) for Telugu .", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 85, |
| "text": "Ramasamy et al. (2012)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 365, |
| "end": 385, |
| "text": "Ramesh et al. (2020)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 400, |
| "end": 429, |
| "text": "Dandapat and Federmann (2018)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Machine Translation", |
| "sec_num": null |
| }, |
| { |
| "text": "To the best of our knowledge and as of 2021, there has not been any scientific publication involving translation to and from Kannada, except for Chakravarthi et al. (2019). One possible reason for this could be the fact that sizeable corpora involving Kannada (i.e. in the order of magnitude of at least thousand sentences) have been readily available only since 2019, with the release of the JW300 Corpus (Agi\u0107 and Vuli\u0107, 2019) .", |
| "cite_spans": [ |
| { |
| "start": 406, |
| "end": 428, |
| "text": "(Agi\u0107 and Vuli\u0107, 2019)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Machine Translation", |
| "sec_num": null |
| }, |
| { |
| "text": "Since 2018 several studies have presented multilingual NMT systems that can handle English \u2192 Malayalam, Tamil and Telugu translation (Dabre et al., 2018; Choudhary et al., 2020; Ojha et al., 2018; Sen et al., 2018; Yu et al., 2020; Dabre and Chakrabarty, 2020) . In particular, Sen et al. (2018) presented results where the BLEU score improved when comparing monolingual and multilingual models. Conversely, Yu et al. (2020) found that NMT systems that were multi-way (Indic \u2194 Indic) performed worse than English \u2194 Indic systems.", |
| "cite_spans": [ |
| { |
| "start": 133, |
| "end": 153, |
| "text": "(Dabre et al., 2018;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 154, |
| "end": 177, |
| "text": "Choudhary et al., 2020;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 178, |
| "end": 196, |
| "text": "Ojha et al., 2018;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 197, |
| "end": 214, |
| "text": "Sen et al., 2018;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 215, |
| "end": 231, |
| "text": "Yu et al., 2020;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 232, |
| "end": 260, |
| "text": "Dabre and Chakrabarty, 2020)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 278, |
| "end": 295, |
| "text": "Sen et al. (2018)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 408, |
| "end": 424, |
| "text": "Yu et al. (2020)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual NMT", |
| "sec_num": null |
| }, |
| { |
| "text": "To our knowledge, no work so far has explored the effect of the segmentation algorithm and dictionary size on the four languages: Kannada, Malayalam, Tamil and Telugu.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multilingual NMT", |
| "sec_num": null |
| }, |
| { |
| "text": "Prior to the emergence of subword segmenters, translation systems were plagued with the issue of out-of-vocabulary (OOV) tokens. This was particularly an issue for translations involving agglutinative languages such as Turkish (Ataman and Federico, 2018) or Malayalam (Manohar et al., 2020) . Various segmentation algorithms were brought forward to circumvent this issue and in turn, improve translation quality.", |
| "cite_spans": [ |
| { |
| "start": 227, |
| "end": 254, |
| "text": "(Ataman and Federico, 2018)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 258, |
| "end": 290, |
| "text": "Malayalam (Manohar et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Subword Segmentation Techniques", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Perhaps the most widely used algorithm in NMT to date is the language-agnostic Byte Pair Encoding (BPE) by Sennrich et al. (2016) . Initially proposed by Gage (1994) , BPE was repurposed by Sennrich et al. (2016) for the task of subword segmentation, and is based on a simple principle whereby pairs of character sequences that are frequently observed in a corpus get merged iteratively until a predetermined dictionary size is attained. In this paper we use a popular implementation of BPE, called SentencePiece (SP) (Kudo and Richardson, 2018) . While purely statistical algorithms are able to segment any token into smaller segments, there is no guarantee that the generated tokens will be linguistically sensible. Unsupervised morphological induction is a rich area of research that also aims at learning a segmentation from data, but in a linguistically motivated way. The most well-known example is Morphessor with its different variants (Creutz and Lagus, 2002; Kohonen et al., 2010; Gr\u00f6nroos et al., 2014 ). An important obstacle to applying Morfessor to the task of NMT is the lack of a mechanism to determine the dictionary size.", |
| "cite_spans": [ |
| { |
| "start": 107, |
| "end": 129, |
| "text": "Sennrich et al. (2016)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 154, |
| "end": 165, |
| "text": "Gage (1994)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 190, |
| "end": 212, |
| "text": "Sennrich et al. (2016)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 518, |
| "end": 545, |
| "text": "(Kudo and Richardson, 2018)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 944, |
| "end": 968, |
| "text": "(Creutz and Lagus, 2002;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 969, |
| "end": 990, |
| "text": "Kohonen et al., 2010;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 991, |
| "end": 1012, |
| "text": "Gr\u00f6nroos et al., 2014", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Subword Segmentation Techniques", |
| "sec_num": "3" |
| }, |
| { |
| "text": "To address this, Ataman et al. 2017proposed a modification of Morfessor FlatCat (Gr\u00f6nroos et al., 2014) , called Linguistically Motivated Vocabulary Reduction (LMVR). Specifically, LMVR imposes an extra condition on the cost function of Morfessor Flatcat so as to favour vocabularies of the desired size. In a comparison of LMVR to BPE, Ataman et al. 2017reported a +2.3 BLEU improvement on the English-Turkish translation task of WMT18.", |
| "cite_spans": [ |
| { |
| "start": 80, |
| "end": 103, |
| "text": "(Gr\u00f6nroos et al., 2014)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Subword Segmentation Techniques", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Given the encouraging results reported on the agglutinative Turkish language, we hypothesise that translation into Dravidian languages may also benefit from a linguistically motivated segmenter, and evaluate LMVR against SP across varying vocabulary sizes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Subword Segmentation Techniques", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The parallel training data is mostly taken from the datasets available for the MultiIndicMT task from WAT 2021. If a certain dataset is not available from the MultiIndicMT training repository, we resorted to extract that dataset from OPUS (Tiedemann, 2012) or WMT20. Table 2 reports on the datasets that we used along with their domain and their source.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 267, |
| "end": 274, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training Corpora", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "After extracting and cleaning the data (see below), approximately 8 million English tokens and their corresponding target language tokens are selected as our training corpora. We fixed the number of source tokens across language pairs in or-Target Language Tokens(k) EN Tokens(k) Sentences(k) Source/ Target Token Ratio Kannada 817 7791 361 9.53 Malayalam 1153 7973 458 6.91 Tamil 1171 7854 345 6.71 Telugu 1027 7872 385 7.67 Table 3 : Approximate sizes (in thousands) of the parallel training corpora der to compare the efficacy of a segmentation technique across the languages without a size bias. Table 3 presents the statistics on the corpora for all language pairs. One takeaway from the table is that there is a very large difference in the token sizes between English and the Dravidian languages. On average, there are 6 to 9 times more tokens on the English side of a corpus than on its Dravidian language translation. This shows that all our Dravidian languages are morphologically very complex, but there are also important differences among them, with Kannada having the highest source/target ratio, considerably higher than the more widely studied Tamil language.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 301, |
| "end": 456, |
| "text": "Target Token Ratio Kannada 817 7791 361 9.53 Malayalam 1153 7973 458 6.91 Tamil 1171 7854 345 6.71 Telugu 1027 7872 385 7.67 Table 3", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 623, |
| "end": 630, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training Corpora", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Sentence pairs with identical source and target sides, or with more than 150 tokens are removed. The target language texts are then normalized using the Indic NLP Library 1 . Afterwards, either SP 2 or LMVR 3 is used to segment both source and target sentences. To further reduce noise in the datasets, we discard sentences pairs with either (i) a target to source length ratio above 0.7 or (ii) a language match threshold below 85% according to the lang-id tool (Lui and Baldwin, 2011) , and (iii) duplicate sentence pairs.", |
| "cite_spans": [ |
| { |
| "start": 463, |
| "end": 486, |
| "text": "(Lui and Baldwin, 2011)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pre-Processing", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We developed our NMT systems using Fairseq (Ott et al., 2019) . We adopt the Transformer-Base implementation (BASE) with a few modifications following the architecture setup of Philip et al. (2019) and Dhar et al. (2020) . These modifications include: setting both encoder and decoder layers to 6, embedding dimensions to size 1024 and number of attention heads to 8. Training is performed using batches of 4k tokens, using a label-smoothed cross entropy loss. The hidden layers are of 1024 dimensions and layer normalization is applied before each encoder and decoder layer. Dropout is set to 0.001 and weight decay to 0.2. Our loss function is cross-entropy with label smoothing of 0.3. The models are trained for a maximum of 100 epochs with early stopping criterion set to 5.", |
| "cite_spans": [ |
| { |
| "start": 43, |
| "end": 61, |
| "text": "(Ott et al., 2019)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 177, |
| "end": 197, |
| "text": "Philip et al. (2019)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 202, |
| "end": 220, |
| "text": "Dhar et al. (2020)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NMT Training", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The segmentation algorithms are trained on the training data described in Section 4.1. We experiment with the following subword dictionary sizes: 1k, 5k, 10k, 15k, 20k, 30k, 40k and 50k. In all experiments, we learn separate subword dictionaries for the source and target languages, for two reasons: (i) LMVR is a linguistically motivated morphology learning algorithm that models the composition of a word based on the transitions between different morphemes and their categories. Therefore, training jointly on two languages would not be a principled choice. (ii) Prior studies such as (Dhar et al., 2020) have reported better translation scores for English-Tamil using SP models that were separately trained on the source and target sides.", |
| "cite_spans": [ |
| { |
| "start": 588, |
| "end": 607, |
| "text": "(Dhar et al., 2020)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dictionary Size", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The NMT systems are evaluated and tested on the official development and test sets, respectively from WAT21. These evaluation sets are sourced from the PMIndia dataset (Haddow and Kirefu, 2020) . During validation, models are evaluated by BLEU on the segmented data, whereas final test scores are computed on the un-segmented and detokenized sentences (de-tokenization is performed with the Indic NLP library tool). In addition to BLEU (Papineni et al., 2002) , we also report on CHRF score (Popovi\u0107, 2015) , which is based on character n-grams and is therefore more suitable to assess translation quality in morphologically complex languages. 4 We report the macro-averaged ferences are +14.9 for BLEU and +5.9 for CHRF.", |
| "cite_spans": [ |
| { |
| "start": 168, |
| "end": 193, |
| "text": "(Haddow and Kirefu, 2020)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 436, |
| "end": 459, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 491, |
| "end": 506, |
| "text": "(Popovi\u0107, 2015)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 644, |
| "end": 645, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "However, we find no clear winner for the other two languages. When observing only BLEU scores, LMVR appears to have the upper hand, with an improvement of +2.8 BLEU and +4.5 BLEU for Malayalam and Telugu, respectively. However the results are flipped when we look at the CHRF scores. SP systems here report higher scores, with +3.5 improvement in Malayalam and +1.1 for Telugu. Given the morphological richness of our target languages, we take CHRF as the more reliable score, and conclude that the purely statistical segmenter SP is a better choice for translation into Dravidian languages in our setup.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mixed results for Telugu and Malayalam:", |
| "sec_num": null |
| }, |
| { |
| "text": "Larger dictionary sizes better: When observing the effect of the dictionary size, we find that the size 50k gives the highest BLEU scores for Malayalam, Tamil and Telugu. This is in contrast with studies such as (Philip et al., 2019; Sennrich and Zhang, 2019) who suggest to use a smaller dictionary size for low-resource settings. For these language pairs, we see a steady increase in BLEU and CHRF as we increase the dictionary size. For Kannada, the best results are obtained for much smaller dictionary sizes, but in contrast with the other three languages, the differences between the scores for other dictionary sizes is much smaller. For instance, looking at the CHRF scores of SP, the numbers decrease from 48.3 to 46.0, whereas for instance for Malayalam, these numbers range from 47.4 to 63.6.", |
| "cite_spans": [ |
| { |
| "start": 212, |
| "end": 233, |
| "text": "(Philip et al., 2019;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 234, |
| "end": 259, |
| "text": "Sennrich and Zhang, 2019)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mixed results for Telugu and Malayalam:", |
| "sec_num": null |
| }, |
| { |
| "text": "Kannada hardest to translate: When comparing more in general translation difficulty across target languages, Kannada appears to be the most challenging language by far. A possible explanation for this difference is the genre distribution of our datasets (cf. Table 2) : While the test sets are from PMIndia (a mixture of background information, news and speeches), the majority of our Kannada training data consists of religion related texts. Another possible confounding factor is that we based our NMT configuration on prior work that focused only on English-Tamil (Philip et al., 2019; Dhar et al., 2020) , and this may be sub-optimal for the other Dravidian languages despite the similar training data size.", |
| "cite_spans": [ |
| { |
| "start": 567, |
| "end": 588, |
| "text": "(Philip et al., 2019;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 589, |
| "end": 607, |
| "text": "Dhar et al., 2020)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 259, |
| "end": 267, |
| "text": "Table 2)", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Mixed results for Telugu and Malayalam:", |
| "sec_num": null |
| }, |
| { |
| "text": "6.1 Different Subtokens generated Table 4 presents the Jaccard similarity (JS) between the segmenter outputs between LMVR and SP. The outputs are either the types (dictionaries) or the tokens in the training sentences. A JS of 0 denotes that none of the subwords were the same in the sentences being compared, while a score of 100 denotes a complete match (i.e, they are identical). As visible from the scores, though there is some sharing of types between the segmenters (ranging from 9-17%), there is no such sharing of subwords in the training data, with a maximum JS score of only around 4% for the smallest dictionary sizes. In fact, these values reduce even further as the dictionary size are increased. For the largest dictionary size (50k), almost no subtoken sharing occurs.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 34, |
| "end": 41, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We carried out an analysis on the effect of unknown subwords found in the development set after the application of a given segmentation algorithm.We present these statistics in Figure 1 . Few details stand out:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 177, |
| "end": 185, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Unknown Subwords", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "High percentage of unknown subwords in Kannada with LMVR While development sets encoded with SP reported the lowest percentage of unknowns, it is the complete opposite for the ones encoded with LMVR (0.2% vs 15% on average). This could have played a role in the lowest CHRF scores achieved by the LMVR systems on Kannada.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of Unknown Subwords", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "LMVR sensitive to dictionary size This is observed in particular for Kannada and Malayalam, where the increase in dictionary size leads to higher numbers of unknown subwords. Conversely for SP, increasing the dictionary size causes no major change in the number of unknowns found for these two languages. On the other hand, SP is more susceptible to the dictionary size for Tamil while Telugu, in general, does not present any such trends. Overall we find no strong correlation between system performance and percentage of unknown subwords. By contrast, and quite surprisingly so, our best NMT systems for Malayalam, Tamil and Telugu are those with larger dictionary sizes and higher percentage of unknowns in the development set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of Unknown Subwords", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "We also looked at the effect of the segmenter on the subword length. Given a language and segmenter, we calculate the average length of a subword (in characters) for the training sets. In Figure 2 we plot the distribution of the average subword lengths for all our settings. Few observations are apparent,", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 188, |
| "end": 196, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of subword lengths", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "\u2022 For every language and dictionary size, LMVR results in shorter subwords. Taking dictionary size of 50k as an example, the dif- ference between LMVR and SP ranges from 1.2 for Malayalam to 1.7 for Tamil.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of subword lengths", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "\u2022 As the dictionary size increases, we see the distributions spreading out. As the dictionary size decreases, the distributions become more centered. This is particularly seen for LMVR. As the dictionary size increases, the distributions of the SP systems spread out more than their LMVR counterparts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of subword lengths", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "\u2022 While it makes sense that the average subword length increases as we increase the dictionary size (from 3 to 5), the apparent widening in the difference between SP and LMVR is not so easily explained.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of subword lengths", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "In the end however, we find no discernible connection between the subword length and the performance of a segmenter. Across all languages, we see similar trends of how the distrubtions change, but this does not seem to affect the translation quality, as seen in the difference in the CHRF scores.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of subword lengths", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "We presented our work on Neural Machine Translation from English into four Dravidian languages (Kannada, Malayalam, Tamil and Telugu). Several experiments were carried out to find out whether a linguistically motivated subword segmenter (LMVR) is more suitable than a purely statistical one (SentencePiece) for translating into the morphologically complex Dravidian languages, while using a Transformer architecture. While BLEU results were mixed on Malayalam and Telugu, CHRF scores clearly suggest that Sentence-Piece remains the best option for all of our tested language pairs. We also found interesting differences among the four target languages. Though they all belong to the same language family and share various linguistic phenomena, they are different with respect to source/target token ratio (Table 3) , and the rate of unknown subwords in the development set (Figure 1) . Whether this is due to linguistic characteristics or to genre differences in the training corpora remains hard to gauge.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 805, |
| "end": 814, |
| "text": "(Table 3)", |
| "ref_id": null |
| }, |
| { |
| "start": 873, |
| "end": 883, |
| "text": "(Figure 1)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Finally, we invite future researchers to carry out research on Dravidian languages, especially Kannada. Compared to the plethora of work found for other languages, the work on Dravidian languages is lagging behind. As our results show, there remains a large space for improvements, particularly Figure 2 : The Probability density function plot showing the distribution of the average subword length for a given segmenter and language on the training sets. The colored boxes denote the mean of the respective distributions. Also included are the differences in the CHRF scores between SP and LMVR.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 295, |
| "end": 303, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "when translating into these languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "http://anoopkunchukuttan.github.io/indic_ nlp_library/ 2 https://github.com/google/sentencepiece 3 https://github.com/d-ataman/lmvr", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We compute BLEU scores with SacreBLEU(Post, 2018), and CHRF scores with chrF++.py https://github.com/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "JW300: A widecoverage parallel corpus for low-resource languages", |
| "authors": [ |
| { |
| "first": "\u017deljko", |
| "middle": [], |
| "last": "Agi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3204--3210", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1310" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "\u017deljko Agi\u0107 and Ivan Vuli\u0107. 2019. JW300: A wide- coverage parallel corpus for low-resource languages. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3204-3210, Florence, Italy. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Text to speech synthesis system for english to malayalam translation", |
| "authors": [ |
| { |
| "first": "Ancy", |
| "middle": [], |
| "last": "Anto", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Nisha", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "International Conference on Emerging Technological Trends (ICETT)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--6", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ancy Anto and K. Nisha. 2016. Text to speech syn- thesis system for english to malayalam translation. 2016 International Conference on Emerging Techno- logical Trends (ICETT), pages 1-6.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Compositional representation of morphologically-rich input for neural machine translation", |
| "authors": [ |
| { |
| "first": "Duygu", |
| "middle": [], |
| "last": "Ataman", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "305--311", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-2049" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Duygu Ataman and Marcello Federico. 2018. Compo- sitional representation of morphologically-rich input for neural machine translation. In Proceedings of the 56th Annual Meeting of the Association for Com- putational Linguistics (Volume 2: Short Papers), pages 305-311, Melbourne, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Linguistically motivated vocabulary reduction for neural machine translation from turkish to english", |
| "authors": [ |
| { |
| "first": "Matteo", |
| "middle": [], |
| "last": "Duygu Ataman", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Negri", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Turchi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "The Prague Bulletin of Mathematical Linguistics", |
| "volume": "108", |
| "issue": "1", |
| "pages": "331--342", |
| "other_ids": { |
| "DOI": [ |
| "10.1515/pralin-2017-0031" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Duygu Ataman, Matteo Negri, Marco Turchi, and Mar- cello Federico. 2017. Linguistically motivated vo- cabulary reduction for neural machine translation from turkish to english. The Prague Bulletin of Mathematical Linguistics, 108(1):331 -342.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Comparison of Different Orthographies for Machine Translation of Under-Resourced Dravidian Languages", |
| "authors": [ |
| { |
| "first": "Mihael", |
| "middle": [], |
| "last": "Bharathi Raja Chakravarthi", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "P" |
| ], |
| "last": "Arcan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccrae", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "2nd Conference on Language, Data and Knowledge (LDK 2019", |
| "volume": "70", |
| "issue": "", |
| "pages": "1--6", |
| "other_ids": { |
| "DOI": [ |
| "10.4230/OASIcs.LDK.2019.6" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bharathi Raja Chakravarthi, Mihael Arcan, and John P. McCrae. 2019. Comparison of Different Orthogra- phies for Machine Translation of Under-Resourced Dravidian Languages. In 2nd Conference on Lan- guage, Data and Knowledge (LDK 2019), volume 70 of OpenAccess Series in Informatics (OASIcs), pages 6:1-6:14, Dagstuhl, Germany. Schloss Dagstuhl- Leibniz-Zentrum fuer Informatik.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Neural machine translation for lowresourced Indian languages", |
| "authors": [ |
| { |
| "first": "Himanshu", |
| "middle": [], |
| "last": "Choudhary", |
| "suffix": "" |
| }, |
| { |
| "first": "Shivansh", |
| "middle": [], |
| "last": "Rao", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajesh", |
| "middle": [], |
| "last": "Rohilla", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "3610--3615", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Himanshu Choudhary, Shivansh Rao, and Rajesh Ro- hilla. 2020. Neural machine translation for low- resourced Indian languages. In Proceedings of the 12th Language Resources and Evaluation Confer- ence, pages 3610-3615, Marseille, France. Euro- pean Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Unsupervised discovery of morphemes", |
| "authors": [ |
| { |
| "first": "Mathias", |
| "middle": [], |
| "last": "Creutz", |
| "suffix": "" |
| }, |
| { |
| "first": "Krista", |
| "middle": [], |
| "last": "Lagus", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the ACL-02 Workshop on Morphological and Phonological Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "21--30", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1118647.1118650" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mathias Creutz and Krista Lagus. 2002. Unsupervised discovery of morphemes. In Proceedings of the ACL- 02 Workshop on Morphological and Phonological Learning, pages 21-30. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "NICT's submission to WAT 2020: How effective are simple many-to-many neural machine translation models?", |
| "authors": [ |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "" |
| }, |
| { |
| "first": "Abhisek", |
| "middle": [], |
| "last": "Chakrabarty", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 7th Workshop on Asian Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "98--102", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raj Dabre and Abhisek Chakrabarty. 2020. NICT's submission to WAT 2020: How effective are sim- ple many-to-many neural machine translation mod- els? In Proceedings of the 7th Workshop on Asian Translation, pages 98-102, Suzhou, China. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "NICT's participation in WAT 2018: Approaches using multilingualism and recurrently stacked layers", |
| "authors": [ |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "" |
| }, |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Kunchukuttan", |
| "suffix": "" |
| }, |
| { |
| "first": "Atsushi", |
| "middle": [], |
| "last": "Fujita", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 32nd Pacific Asia Conference on Language, Information and Computation: 5th Workshop on Asian Translation: 5th Workshop on Asian Translation, Hong Kong. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raj Dabre, Anoop Kunchukuttan, Atsushi Fujita, and Eiichiro Sumita. 2018. NICT's participation in WAT 2018: Approaches using multilingualism and recur- rently stacked layers. In Proceedings of the 32nd Pa- cific Asia Conference on Language, Information and Computation: 5th Workshop on Asian Translation: 5th Workshop on Asian Translation, Hong Kong. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Iterative data augmentation for neural machine translation: a low resource case study for english-telugu", |
| "authors": [ |
| { |
| "first": "Sandipan", |
| "middle": [], |
| "last": "Dandapat", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Federmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 21st Annual Conference of the European Association for Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "287--292", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sandipan Dandapat and Christian Federmann. 2018. It- erative data augmentation for neural machine trans- lation: a low resource case study for english-telugu. In Proceedings of the 21st Annual Conference of the European Association for Machine Translation, pages 287-292, Alacant, Spain.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Linguistically motivated subwords for English-Tamil translation: University of Groningen's submission to WMT-2020", |
| "authors": [ |
| { |
| "first": "Prajit", |
| "middle": [], |
| "last": "Dhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Arianna", |
| "middle": [], |
| "last": "Bisazza", |
| "suffix": "" |
| }, |
| { |
| "first": "Gertjan", |
| "middle": [], |
| "last": "Van Noord", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Fifth Conference on Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "126--133", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Prajit Dhar, Arianna Bisazza, and Gertjan van No- ord. 2020. Linguistically motivated subwords for English-Tamil translation: University of Gronin- gen's submission to WMT-2020. In Proceedings of the Fifth Conference on Machine Translation, pages 126-133, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A new algorithm for data compression", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Gage", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "C Users J", |
| "volume": "12", |
| "issue": "2", |
| "pages": "23--38", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip Gage. 1994. A new algorithm for data compres- sion. C Users J., 12(2):23-38.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Building a statistical machine translation system from scratch: How much bang for the buck can we expect?", |
| "authors": [ |
| { |
| "first": "Ulrich", |
| "middle": [], |
| "last": "Germann", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the ACL 2001 Workshop on Data-Driven Methods in Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ulrich Germann. 2001. Building a statistical machine translation system from scratch: How much bang for the buck can we expect? In Proceedings of the ACL 2001 Workshop on Data-Driven Methods in Machine Translation.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Morfessor FlatCat: An HMMbased method for unsupervised and semi-supervised learning of morphology", |
| "authors": [ |
| { |
| "first": "Stig-Arne", |
| "middle": [], |
| "last": "Gr\u00f6nroos", |
| "suffix": "" |
| }, |
| { |
| "first": "Sami", |
| "middle": [], |
| "last": "Virpioja", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Smit", |
| "suffix": "" |
| }, |
| { |
| "first": "Mikko", |
| "middle": [], |
| "last": "Kurimo", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "1177--1185", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stig-Arne Gr\u00f6nroos, Sami Virpioja, Peter Smit, and Mikko Kurimo. 2014. Morfessor FlatCat: An HMM- based method for unsupervised and semi-supervised learning of morphology. In Proceedings of COLING 2014, the 25th International Conference on Compu- tational Linguistics: Technical Papers, pages 1177- 1185, Dublin, Ireland.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Pmindia -a collection of parallel corpora of languages of india", |
| "authors": [ |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Faheem", |
| "middle": [], |
| "last": "Kirefu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Barry Haddow and Faheem Kirefu. 2020. Pmindia -a collection of parallel corpora of languages of india.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Semi-supervised learning of concatenative morphology", |
| "authors": [ |
| { |
| "first": "Oskar", |
| "middle": [], |
| "last": "Kohonen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sami", |
| "middle": [], |
| "last": "Virpioja", |
| "suffix": "" |
| }, |
| { |
| "first": "Krista", |
| "middle": [], |
| "last": "Lagus", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 11th Meeting of the ACL Special Interest Group on Computational Morphology and Phonology", |
| "volume": "", |
| "issue": "", |
| "pages": "78--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oskar Kohonen, Sami Virpioja, and Krista Lagus. 2010. Semi-supervised learning of concatenative morphol- ogy. In Proceedings of the 11th Meeting of the ACL Special Interest Group on Computational Morphol- ogy and Phonology, pages 78-86, Uppsala, Sweden. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "SentencePiece: A simple and language independent subword tokenizer and detokenizer for neural text processing", |
| "authors": [ |
| { |
| "first": "Taku", |
| "middle": [], |
| "last": "Kudo", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Richardson", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "66--71", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-2012" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taku Kudo and John Richardson. 2018. SentencePiece: A simple and language independent subword tok- enizer and detokenizer for neural text processing. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 66-71, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "English to telugu rule based machine translation system: A hybrid approach", |
| "authors": [ |
| { |
| "first": ",", |
| "middle": [ |
| "E" |
| ], |
| "last": "Keerthi Lingam", |
| "suffix": "" |
| }, |
| { |
| "first": "Srujana", |
| "middle": [], |
| "last": "Ramalakshmi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Inturi", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "International Journal of Computer Applications", |
| "volume": "101", |
| "issue": "2", |
| "pages": "19--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Keerthi Lingam, E. Ramalakshmi, and Srujana Inturi. 2014. English to telugu rule based machine transla- tion system: A hybrid approach. International Jour- nal of Computer Applications, 101(2):19-24. Full text available.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Cross-domain feature selection for language identification", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Lui", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Baldwin", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of 5th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "553--561", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Lui and Timothy Baldwin. 2011. Cross-domain feature selection for language identification. In Pro- ceedings of 5th International Joint Conference on Natural Language Processing, pages 553-561, Chi- ang Mai, Thailand. Asian Federation of Natural Lan- guage Processing.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Quantitative analysis of the morphological complexity of malayalam language", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "R" |
| ], |
| "last": "Kavya Manohar", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajeev", |
| "middle": [], |
| "last": "Jayan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rajan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "71--78", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kavya Manohar, A. R. Jayan, and Rajeev Rajan. 2020. Quantitative analysis of the morphological complex- ity of malayalam language. In Text, Speech, and Di- alogue, pages 71-78, Cham. Springer International Publishing.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "The RGNLP machine translation systems for", |
| "authors": [ |
| { |
| "first": "Atul", |
| "middle": [], |
| "last": "Kr", |
| "suffix": "" |
| }, |
| { |
| "first": "Dutta", |
| "middle": [], |
| "last": "Ojha", |
| "suffix": "" |
| }, |
| { |
| "first": "Chao-Hong", |
| "middle": [], |
| "last": "Chowdhury", |
| "suffix": "" |
| }, |
| { |
| "first": "Karan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Saxena", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 32nd Pacific Asia Conference on Language, Information and Computation: 5th Workshop on Asian Translation: 5th Workshop on Asian Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Atul Kr. Ojha, Koel Dutta Chowdhury, Chao-Hong Liu, and Karan Saxena. 2018. The RGNLP ma- chine translation systems for WAT 2018. In Pro- ceedings of the 32nd Pacific Asia Conference on Lan- guage, Information and Computation: 5th Workshop on Asian Translation: 5th Workshop on Asian Trans- lation, Hong Kong. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "fairseq: A fast, extensible toolkit for sequence modeling", |
| "authors": [ |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexei", |
| "middle": [], |
| "last": "Baevski", |
| "suffix": "" |
| }, |
| { |
| "first": "Angela", |
| "middle": [], |
| "last": "Fan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of NAACL-HLT 2019: Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. fairseq: A fast, extensible toolkit for sequence modeling. In Proceedings of NAACL-HLT 2019: Demonstrations.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": ",", |
| "middle": [ |
| "S" |
| ], |
| "last": "Kishore Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, S. Roukos, T. Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In ACL.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Cvit's submissions to wat-2019", |
| "authors": [ |
| { |
| "first": "Jerin", |
| "middle": [], |
| "last": "Philip", |
| "suffix": "" |
| }, |
| { |
| "first": "Shashank", |
| "middle": [], |
| "last": "Siripragada", |
| "suffix": "" |
| }, |
| { |
| "first": "Upendra", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Vinay", |
| "middle": [], |
| "last": "Namboodiri", |
| "suffix": "" |
| }, |
| { |
| "first": "C V", |
| "middle": [], |
| "last": "Jawahar", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 6th Workshop on Asian Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "131--136", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-5215" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jerin Philip, Shashank Siripragada, Upendra Kumar, Vinay Namboodiri, and C V Jawahar. 2019. Cvit's submissions to wat-2019. In Proceedings of the 6th Workshop on Asian Translation, pages 131-136, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "chrF: character n-gram F-score for automatic MT evaluation", |
| "authors": [ |
| { |
| "first": "Maja", |
| "middle": [], |
| "last": "Popovi\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Tenth Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "392--395", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W15-3049" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maja Popovi\u0107. 2015. chrF: character n-gram F-score for automatic MT evaluation. In Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392-395, Lisbon, Portugal. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "A call for clarity in reporting BLEU scores", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Third Conference on Machine Translation: Research Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "186--191", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-6319" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186- 191, Brussels, Belgium. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Morphological processing for english-tamil statistical machine translation", |
| "authors": [ |
| { |
| "first": "Loganathan", |
| "middle": [], |
| "last": "Ramasamy", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "Zden\u011bk", |
| "middle": [], |
| "last": "\u017dabokrtsk\u00fd", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Workshop on Machine Translation and Parsing in Indian Languages (MTPIL-2012)", |
| "volume": "", |
| "issue": "", |
| "pages": "113--122", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Loganathan Ramasamy, Ond\u0159ej Bojar, and Zden\u011bk \u017dabokrtsk\u00fd. 2012. Morphological processing for english-tamil statistical machine translation. In Pro- ceedings of the Workshop on Machine Translation and Parsing in Indian Languages (MTPIL-2012), pages 113-122.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "An error-based investigation of statistical and neural machine translation performance on Hindi-to-Tamil and Englishto-Tamil", |
| "authors": [ |
| { |
| "first": "Akshai", |
| "middle": [], |
| "last": "Ramesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Rejwanul", |
| "middle": [], |
| "last": "Venkatesh Balavadhani Parthasa", |
| "suffix": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [], |
| "last": "Haque", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Way", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 7th Workshop on Asian Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "178--188", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Akshai Ramesh, Venkatesh Balavadhani Parthasa, Re- jwanul Haque, and Andy Way. 2020. An error-based investigation of statistical and neural machine trans- lation performance on Hindi-to-Tamil and English- to-Tamil. In Proceedings of the 7th Workshop on Asian Translation, pages 178-188, Suzhou, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "IITP-MT at WAT2018: Transformer-based multilingual indic-English neural machine translation system", |
| "authors": [ |
| { |
| "first": "Sukanta", |
| "middle": [], |
| "last": "Sen", |
| "suffix": "" |
| }, |
| { |
| "first": "Asif", |
| "middle": [], |
| "last": "Kamal Kumar Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Pushpak", |
| "middle": [], |
| "last": "Ekbal", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bhattacharyya", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 32nd Pacific Asia Conference on Language, Information and Computation: 5th Workshop on Asian Translation: 5th Workshop on Asian Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sukanta Sen, Kamal Kumar Gupta, Asif Ekbal, and Pushpak Bhattacharyya. 2018. IITP-MT at WAT2018: Transformer-based multilingual indic- English neural machine translation system. In Pro- ceedings of the 32nd Pacific Asia Conference on Lan- guage, Information and Computation: 5th Workshop on Asian Translation: 5th Workshop on Asian Trans- lation, Hong Kong. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1715--1725", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725, Berlin, Germany. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Revisiting lowresource neural machine translation: A case study", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Biao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "211--221", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1021" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich and Biao Zhang. 2019. Revisiting low- resource neural machine translation: A case study. In Proceedings of the 57th Annual Meeting of the As- sociation for Computational Linguistics, pages 211- 221, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "A case study on english-malayalam machine translation", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Sreelekha", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bhattacharyya", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S Sreelekha and P Bhattacharyya. 2017. A case study on english-malayalam machine translation. ArXiv, abs/1702.08217.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Morphology injection for English-Malayalam statistical machine translation", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Sreelekha", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bhattacharyya", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S Sreelekha and P Bhattacharyya. 2018. Morphology injection for English-Malayalam statistical machine translation. In Proceedings of the Eleventh Interna- tional Conference on Language Resources and Eval- uation (LREC 2018), Miyazaki, Japan. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "The Dravidian Languages", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Sanford", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Steever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sanford B Steever. 2019. The Dravidian Languages. Routledge.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Parallel data, tools and interfaces in opus", |
| "authors": [ |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Tiedemann", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J\u00f6rg Tiedemann. 2012. Parallel data, tools and inter- faces in opus. In Proceedings of the Eight Interna- tional Conference on Language Resources and Eval- uation (LREC'12), Istanbul, Turkey. European Lan- guage Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Rule based machine translation of complex sentences from english to telugu", |
| "authors": [ |
| { |
| "first": "Yadav", |
| "middle": [], |
| "last": "K Deepthi", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Lingam", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "International Journal of Research", |
| "volume": "4", |
| "issue": "9", |
| "pages": "790--800", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K Deepthi Yadav and L Lingam. 2017. Rule based machine translation of complex sentences from en- glish to telugu. International Journal of Research, 4(9):790-800.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "HW-TSC's participation in the WAT 2020 indic languages multilingual task", |
| "authors": [ |
| { |
| "first": "Zhengzhe", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhanglin", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Daimeng", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Hengchao", |
| "middle": [], |
| "last": "Shang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiaxin", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Zongyao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Minghan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Liangyou", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Lizhi", |
| "middle": [], |
| "last": "Lei", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ying", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 7th Workshop on Asian Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "92--97", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhengzhe Yu, Zhanglin Wu, Xiaoyu Chen, Daimeng Wei, Hengchao Shang, Jiaxin Guo, Zongyao Li, Minghan Wang, Liangyou Li, Lizhi Lei, Hao Yang, and Ying Qin. 2020. HW-TSC's participation in the WAT 2020 indic languages multilingual task. In Pro- ceedings of the 7th Workshop on Asian Translation, pages 92-97, Suzhou, China. Association for Com- putational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "text": "Number of unknown tokens (in percentages) in the development set vs Dictionary size for each language and segmentation type. Also systems that reported the lowest and highest CHRF scores (on the development set) for each language and segmentation are marked.", |
| "uris": null, |
| "num": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "text": "avaru n\u0101gapat \u0323t \u0323an \u0323am jilleya tirukkuvalay gr\u0101madalli 1924ra j\u016bn 3randu janisiddaru.1924\u0d32 \u0d4d \u0d28\u0d3e\u0d17\u0d2a \u0d23\u0d02 \u0d1c\u0d3f \u0d2f\u0d3f\u0d46\u0d32 \u0d24\u0d3f\u0d30\u0d41 \u0d41\u0d35\u0d48\u0d33 \u0d17\u0d3e\u0d2e \u0d3f\u0d32\u0d3e\u0d23\u0d4d \u0d05\u0d47 \u0d39\u0d02 \u0d1c\u0d28\u0d3f \u0d24\u0d4d ML 1924l n\u0101gapat \u0323t \u0323an \u0323am jillayile tirukkuval \u0323ai gr\u0101mattil\u0101n \u0323 add\u0113ham janiccat.", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td colspan=\"4\">EN He was born in Thirukkuvalai village in Nagapattinam District on 3rd June, 1924.</td></tr><tr><td>KN</td><td>\u0c85\u0cb5\u0cb0\u0cc1 \u0c97\u0caa\u0c9f \u0ca3\u0c82</td><td>\u0caf \u0cb0\u0cc1\u0c95\u0cc1 \u0cb5\u0cb2</td><td>\u0cae\u0ca6 1924\u0cb0 \u0c9c\u0cc2 3\u0cb0\u0c82\u0ca6\u0cc1 \u0c9c \u0ca6 \u0cb0\u0cc1.</td></tr><tr><td colspan=\"4\">\u0ba8\u0bbe\u0b95 \u0baa \u0b9c\u0bc2 \u0bae\u0bbe\u0ba4 3-\u0b86 \u0bc7\u0ba4 \u0ba9 \u0bae\u0bbe\u0bb5 \u0b9f TA n\u0101gappat \u0323t \u0323inam m\u0101vat \u0323t \u0323am tirukkuval \u0323aik kir\u0101mattil avar 1924-\u0101m \u0101n \u0323t \u0323u j\u016bn m\u0101tam 3-\u0101m t\u0113ti \u0bb5\u0bc8\u0bb3 \u0bb0\u0bbe\u0bae \u0b85\u0bb5 1924-\u0b86 \u0b86 \u0bb1 \u0ba4\u0bbe .</td></tr><tr><td/><td>pirant\u0101r.</td><td/><td/></tr><tr><td colspan=\"4\">\u0c06\u0c2f\u0c28 \u0c17\u0c2a\u0c1f \u0c23\u0c02 TE \u0101yana n\u0101gapat \u0323t \u0323an \u0323am jill\u0101 tirukkuv\u0101lai gr\u0101manl\u014d 1924 j\u016bn 3na janminc\u0101ru. \u0c56 \u0c2e\u0c02 1924 3\u0c28 \u0c1c \u0c02 .</td></tr><tr><td/><td/><td/><td>cre-</td></tr><tr><td/><td/><td/><td>ated SMT systems (phrase-based and hierarchical)</td></tr><tr><td/><td/><td/><td>which were trained on a dataset of 190k parallel</td></tr></table>" |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "text": "Example sentence in English along with its translation and transliteration in the four Dravidian languages.", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "text": "", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "text": "BLEU and CHRF scores for English-to-X NMT, using different segmenters and varying subword vocabulary size. SP refers to the purely statistical SentencePiece segmenter, LMVR to Linguistically Motivated Vocabulary Reduction. Dictionary size refers to the size of both the source and target subword dictionaries. Rightmost columns show the Jaccard similarity (percentage) for the types and tokens from the segmenter outputs.", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>document level F3-score.Results are presented in</td></tr><tr><td>Table 4.</td></tr><tr><td>SP clear winner for Kannada and Tamil: SP</td></tr><tr><td>presented the highest BLEU and CHRF scores</td></tr><tr><td>for Kannada and Tamil. When we compare the</td></tr><tr><td>best systems for both SP and LMVR, large differ-</td></tr><tr><td>ences are observed. For Kannada differences of +6</td></tr><tr><td>BLEU and +7.4 are observed and for Tamil the dif-</td></tr></table>" |
| } |
| } |
| } |
| } |