| { |
| "paper_id": "L16-1002", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:05:33.779747Z" |
| }, |
| "title": "Using BabelNet to Improve OOV Coverage in SMT", |
| "authors": [ |
| { |
| "first": "Jinhua", |
| "middle": [], |
| "last": "Du", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Dublin City University", |
| "location": { |
| "settlement": "Dublin", |
| "country": "Ireland" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [], |
| "last": "Way", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Dublin City University", |
| "location": { |
| "settlement": "Dublin", |
| "country": "Ireland" |
| } |
| }, |
| "email": "away@computing.dcu.ie" |
| }, |
| { |
| "first": "Andrzej", |
| "middle": [], |
| "last": "Zydron", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "XTM International Ltd", |
| "location": { |
| "settlement": "London", |
| "country": "UK" |
| } |
| }, |
| "email": "azydron@xtm-intl.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Out-of-vocabulary words (OOVs) are a ubiquitous and difficult problem in statistical machine translation (SMT). This paper studies different strategies of using BabelNet to alleviate the negative impact brought about by OOVs. BabelNet is a multilingual encyclopedic dictionary and a semantic network, which not only includes lexicographic and encyclopedic terms, but connects concepts and named entities in a very large network of semantic relations. By taking advantage of the knowledge in BabelNet, three different methodsusing direct training data, domain-adaptation techniques and the BabelNet API-are proposed in this paper to obtain translations for OOVs to improve system performance. Experimental results on English-Polish and English-Chinese language pairs show that domain adaptation can better utilize BabelNet knowledge and performs better than other methods. The results also demonstrate that BabelNet is a really useful tool for improving translation performance of SMT systems.", |
| "pdf_parse": { |
| "paper_id": "L16-1002", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Out-of-vocabulary words (OOVs) are a ubiquitous and difficult problem in statistical machine translation (SMT). This paper studies different strategies of using BabelNet to alleviate the negative impact brought about by OOVs. BabelNet is a multilingual encyclopedic dictionary and a semantic network, which not only includes lexicographic and encyclopedic terms, but connects concepts and named entities in a very large network of semantic relations. By taking advantage of the knowledge in BabelNet, three different methodsusing direct training data, domain-adaptation techniques and the BabelNet API-are proposed in this paper to obtain translations for OOVs to improve system performance. Experimental results on English-Polish and English-Chinese language pairs show that domain adaptation can better utilize BabelNet knowledge and performs better than other methods. The results also demonstrate that BabelNet is a really useful tool for improving translation performance of SMT systems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "OOVs -source words that have no translation in the phrase table -are a ubiquitous and difficult problem for SMT. Due to the fact that they are trained on pre-defined static data sets, SMT systems necessarily encounter OOVs when translating new documents. In such circumstances, there are two main strategies deployed: (i) to output the source word 'as is' on the target side; or (ii) to omit it altogether. Of course, in both cases, erroneous and disfluent translations are produced. The problem is exacerbated when bilingual data are scarce, or if the text to be translated is not from the same domain as the training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "OOVs are often named entities, specialized terms and neologisms. For example, some person names or technical terms are phonetically transliterated from other languages. In the past, plenty of work has been done to alleviate the impact of OOVs, including orthographic (lexicon-inductionbased) and morphosyntactic preprocessing (Popovic and Ney, 2004; Sadat and Habash, 2006; Habash, 2008; Garera et al., 2009) , pivot languages (Callison-Burch et al., 2006) , grapheme-based model for phonetic transliterations (Lehal and Saini, 2012; Luo et al., 2013) , paraphrases (Habash, 2008; Marton et al., 2009; Du et al., 2010) and contextbased semantic models (Haghighi et al., 2008; Daume-III and Jagarlamudi, 2011; Zhang et al., 2012) .", |
| "cite_spans": [ |
| { |
| "start": 326, |
| "end": 349, |
| "text": "(Popovic and Ney, 2004;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 350, |
| "end": 373, |
| "text": "Sadat and Habash, 2006;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 374, |
| "end": 387, |
| "text": "Habash, 2008;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 388, |
| "end": 408, |
| "text": "Garera et al., 2009)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 427, |
| "end": 456, |
| "text": "(Callison-Burch et al., 2006)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 510, |
| "end": 533, |
| "text": "(Lehal and Saini, 2012;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 534, |
| "end": 551, |
| "text": "Luo et al., 2013)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 566, |
| "end": 580, |
| "text": "(Habash, 2008;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 581, |
| "end": 601, |
| "text": "Marton et al., 2009;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 602, |
| "end": 618, |
| "text": "Du et al., 2010)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 652, |
| "end": 675, |
| "text": "(Haghighi et al., 2008;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 676, |
| "end": 708, |
| "text": "Daume-III and Jagarlamudi, 2011;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 709, |
| "end": 728, |
| "text": "Zhang et al., 2012)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "In this paper, we propose to use BabelNet to handle OOVs. BabelNet is both a multilingual encyclopedic dictionary, with lexicographic and encyclopedic coverage of terms, as well as a semantic network comprising 14 million entries which connects concepts and named entities in a very large network of semantic relations. These are called Babel synsets, each of which represents a given meaning and contains all the synonyms which express that meaning in a range of different languages (Navigli and Ponzetto, 2010; Roberto Navigli, 2012; Navigli, 2012) . The most recent version of BabelNet is 3.5 which integrates knowledge from WordNet, Wikipedia, Microsoft Terminology, Ima-geNet etc. 1 BabelNet has been applied in many natural language processing tasks, such as multilingual lexicon extraction, crosslingual word-sense disambiguation, annotation, and information extraction, all with good performance (Elbedweihy et al., 2013; Jadidinejad, 2013; Navigli et al., 2013; Ehrmann et al., 2014; Moro et al., 2014) . However, to the best of our knowledge, to date there is no comprehensive work applying BabelNet knowledge to SMT tasks. In this paper, we present three different strategies to utilize Babel-Net resources in SMT systems, namely using direct training data, domain adaptation and OOV post-processing approaches. Specifically, the first strategy very straightforwardly appends the bilingual dictionary extracted from Ba-belNet to the initial training data, and then verifies the impact on translation performance; the second uses domainadaptation methods to select in-domain entries from the extracted bilingual dictionary, which are then added to the initial training data in a similar manner; finally we directly call the BabelNet API to post-process OOVs contained in the translation of the source sentence. Experiments conducted on different language pairs show that the second and third strategies are more robust and effective than the first one in augmenting SMT systems. The remainder of the paper is as follows. Section 2 reviews related work. In Section 3, we present three different strategies to use BabelNet to handle OOVs. Section 4 describes our experiments and analysis. In Section 5, we conclude and provide some avenues for future research.", |
| "cite_spans": [ |
| { |
| "start": 484, |
| "end": 512, |
| "text": "(Navigli and Ponzetto, 2010;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 513, |
| "end": 535, |
| "text": "Roberto Navigli, 2012;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 536, |
| "end": 550, |
| "text": "Navigli, 2012)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 904, |
| "end": 929, |
| "text": "(Elbedweihy et al., 2013;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 930, |
| "end": 948, |
| "text": "Jadidinejad, 2013;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 949, |
| "end": 970, |
| "text": "Navigli et al., 2013;", |
| "ref_id": null |
| }, |
| { |
| "start": 971, |
| "end": 992, |
| "text": "Ehrmann et al., 2014;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 993, |
| "end": 1011, |
| "text": "Moro et al., 2014)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1." |
| }, |
| { |
| "text": "There has been a long line of research on handling OOVs in SMT. In this section, we briefly introduce some representative work in terms of the methods of processing OOVs. Lexicon-induction-based and morpho-syntactic methods are commonly used for handling unknown words by creating a bilingual lexicon for OOVs. By extending this work, Habash (2008) presents techniques for online treatment of OOVs for Arabic-to-English such as spelling expansion and morphological expansion. Huang et al. (2011) propose to combine sublexical/constituent translations of an OOV word or phrase to generate its translations. Pivot language techniques take advantage of available parallel data between the source language and a third language to handle the problem of OOVs. Using a pivot language, OOVs are translated into a third language and back into the source language and thereby paraphrases for those OOVs are extracted (Callison-Burch et al., 2006) . Semantic methods are based on the distributional hypothesis that words appearing in the same contexts tend to have similar meanings. Paraphrases can express similar meaning of different words or phrases that are useful to alleviate the OOV problem. Marton et al. (2009) use a monolingual text on the source side to find paraphrases for OOVs for which translations are available. The translations of these paraphrases are then used as the translations of the OOV word. Du et al. (2010) constructed a source-side paraphrase lattice to handle OOVs and allow the decoder to decide which paraphrase candidate is the best option for the translation. Instead of replacing OOVs, Zhang et al. (2012) propose a different way of using semantics for handling OOVs. They focus on keeping the untranslated words in the correct position in the translation, i.e. employing the distributional semantic model and the bidirectional language model to determine the semantic function which the unknown words serve in the test sentence, and keeping the semantic function unchanged in the translation process. In this way, unknown words will help the phrase reordering and lexical selection of their surrounding words even though they themselves still remain untranslated. For OOVs that are transliterations, a grapheme-based model maps directly from source graphemes to target graphemes. In this model, phonetic information or pronunciation is used, and thus an additional processing step of converting source grapheme to source phoneme is required. For example, Lehal and Saini (2012) propose a hybrid transliteration approach using both the graphemebased transliteration model and the phoneme-based model. Different from the methods above, we utilize an extra semantic resource to handle the problem of OOVs. Specifically, we use BabelNet (i) as direct parallel data, or (ii) to retrieve the translations of OOVs via an API call. Experimental results on different language pairs show that Babel-Net is helpful in improving translation performance of an SMT system.", |
| "cite_spans": [ |
| { |
| "start": 335, |
| "end": 348, |
| "text": "Habash (2008)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 476, |
| "end": 495, |
| "text": "Huang et al. (2011)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 907, |
| "end": 936, |
| "text": "(Callison-Burch et al., 2006)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1188, |
| "end": 1208, |
| "text": "Marton et al. (2009)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1407, |
| "end": 1423, |
| "text": "Du et al. (2010)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1610, |
| "end": 1629, |
| "text": "Zhang et al. (2012)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 2480, |
| "end": 2502, |
| "text": "Lehal and Saini (2012)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2." |
| }, |
| { |
| "text": "In this section, we introduce three different strategies to utilize BabelNet resources in SMT.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Strategies for Using BabelNet in SMT", |
| "sec_num": "3." |
| }, |
| { |
| "text": "This strategy is straightforward. We first retrieve the whole source and target terms in BabelNet and then perform set of word alignment to obtain a bilingual dictionary. Then clean up the dictionary and add it to the initial training data. The entries in the dictionary are not only single words, but also multi-word expressions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Direct Training Data", |
| "sec_num": "3.1." |
| }, |
| { |
| "text": "As mentioned, BabelNet integrates many knowledge resources, so its entries come from many different domains. However, for practical use, we often use domain-specific data sets to build SMT systems, e.g. Newswire, Europarl (Koehn, 2005) , DGT etc. In order to make good use of the knowledge in the BabelNet dictionary rather than simply adding it to the training data, we propose to use different domain adaptation methods, namely the entry-match strategy and Cross Entropy Difference (CED) (Moore and Lewis, 2010; Axelrod et al., 2011; Haque et al., 2014) , to select in-domain entries. The entry match strategy uses a straightforward way to select in-domain entries, i.e. if either the source side or the target side of one entry-pair occurs in the initial training data, then this entry-pair will be selected. The advantage of this method is that it can increase the probability estimation of existing words. However, the disadvantage is that it cannot handle OOVs. We also use a domain-adaptation method which has a good performance record -CED -to facilitate entry selection from the bilingual dictionary. In this method, given an indomain (ID) corpus I and a general corpus O, language models are built from both, and each sentence s in O is scored according to the entropy difference, as in 1:", |
| "cite_spans": [ |
| { |
| "start": 222, |
| "end": 235, |
| "text": "(Koehn, 2005)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 490, |
| "end": 513, |
| "text": "(Moore and Lewis, 2010;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 514, |
| "end": 535, |
| "text": "Axelrod et al., 2011;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 536, |
| "end": 555, |
| "text": "Haque et al., 2014)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Domain Adaptation", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "score(s) = H I (s) \u2212 H O (s)", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Domain Adaptation", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "where H I (s) is the entropy of s in the ID corpus I, and H O (s) is the entropy of s in the out-of-domain (OOD) corpus O. The advantage of this method is that it can handle OOVs to some extent. However, the potential problem is that the selection accuracy might be affected by short entries, i.e. there are many entries that only contain either a single word or a pair of words, which may cause some context information to be lost for domain adaptation. Therefore, in order to minimize this influence, we use bigrams to build the language models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Domain Adaptation", |
| "sec_num": "3.2." |
| }, |
| { |
| "text": "There is a lot of noise in the extracted dictionary. For example, on the Chinese side of the English-Chinese dictionary, an entry might occur in Simplified or Traditional Chinese, or an entry might be segmented into words or characters. More importantly, there are many possible target terms for a given source term which come from different domains.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "OOV Post-processing: BabelNet API", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "In our experiments, using BabelNet bilingual entries as the training data does not perform well, so we propose to directly call the BabelNet API (Navigli and Ponzetto, 2012) to post-process OOVs in the translation of a source sentence. We use the BabelNet API to call the precompiled index bundle (version: 2.5.1) 2 to retrieve the translation for an OOV. Specifically, an OOV in the source sentence is automatically marked in the output of the SMT decoder, and then we recognize this OOV and retrieve its 1-best candidate by calling the BabelNet API. Finally, we replace the OOV in the target side by the candidate translation.", |
| "cite_spans": [ |
| { |
| "start": 145, |
| "end": 173, |
| "text": "(Navigli and Ponzetto, 2012)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "OOV Post-processing: BabelNet API", |
| "sec_num": "3.3." |
| }, |
| { |
| "text": "Polish -Training Data #sen #word #entry #sen #word #entry 518,155 11,270,214 52,247 518,155 9,743,192 144,146 ", |
| "cite_spans": [ |
| { |
| "start": 58, |
| "end": 109, |
| "text": "518,155 11,270,214 52,247 518,155 9,743,192 144,146", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "English -Training Data", |
| "sec_num": null |
| }, |
| { |
| "text": "We use Moses (Koehn et al., 2007) as the SMT system and configure the argument '-mark-unknown' to mark up the OOVs in the translation. Experiments are conducted on English-Polish (EN-PL), English-Chinese (EN-ZH) and Chinese-English (ZH-EN) translation tasks.", |
| "cite_spans": [ |
| { |
| "start": 13, |
| "end": 33, |
| "text": "(Koehn et al., 2007)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Settings", |
| "sec_num": "4.1." |
| }, |
| { |
| "text": "In this section, we verify the impact of Strategy 1 on system performance, i.e. directly adding the bilingual dictionary to the training data to build the SMT system.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments on Strategy 1: Direct Training Data (DTD)", |
| "sec_num": "4.2." |
| }, |
| { |
| "text": "Regarding the EN-PL task, the initial training data comes from Europarl which contains 518,155 sentence pairs, and the devset and test set each contain 2,000 sentence pairs which are randomly extracted from the Europarl data set. The statistics of the data are shown in Table 1 and Table 2 . Regarding the ZH-EN and EN-ZH tasks, the training data comes from NIST FBIS that contains 270,794 sentence pairs, the devset is the NIST 2006 current set that includes 1,664 sentences with 4 references for each, and the test set is the NIST 2005 current set that contains 1,082 sentences with 4 references for each. The statistics of the data are shown in Table 3 and Table 4 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 270, |
| "end": 289, |
| "text": "Table 1 and Table 2", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 648, |
| "end": 667, |
| "text": "Table 3 and Table 4", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Statistics", |
| "sec_num": "4.2.1." |
| }, |
| { |
| "text": "The EN-PL BabelNet dictionary contains 6,199,888 bilingual entries. However, the raw data contains a lot of noise, so we performed some pre-processing of the dictionary, including:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BabelNet Bilingual Dictionary and Denoising", |
| "sec_num": "4.2.2." |
| }, |
| { |
| "text": "\u2022 if East Asian characters are included in either the English or Polish side, we remove this pair;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BabelNet Bilingual Dictionary and Denoising", |
| "sec_num": "4.2.2." |
| }, |
| { |
| "text": "\u2022 if the English side contains symbols which are neither letters nor digits, then we remove this pair;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BabelNet Bilingual Dictionary and Denoising", |
| "sec_num": "4.2.2." |
| }, |
| { |
| "text": "\u2022 if either side contains punctuation, then we remove this pair;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BabelNet Bilingual Dictionary and Denoising", |
| "sec_num": "4.2.2." |
| }, |
| { |
| "text": "\u2022 if the English side is the same as the Polish side, then we remove this pair;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BabelNet Bilingual Dictionary and Denoising", |
| "sec_num": "4.2.2." |
| }, |
| { |
| "text": "\u2022 if the ratio of the word-level entry lengths between the English side and the Polish side is less than 0.5 or greater than 2, then we remove it. This rule is based on the fact that 99% of pairs of EN-PL sentences in Europarl training data fall within this range.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BabelNet Bilingual Dictionary and Denoising", |
| "sec_num": "4.2.2." |
| }, |
| { |
| "text": "After this clean-up, we are left with 2, 215, 248 pairs. Note that almost 4 million sentence-pairs -or 64.3% of the original date -are filtered out, indicating that the EN-PL data is overall not of high quality. Nonetheless, 2.2 million sentence-pairs is still more than a reasonable amount of additional data for MT engine training. The ZH-EN BabelNet dictionary contains 5,975,619 bilingual entries. As we did for the EN-PL dictionary, we also filtered the ZH-EN dictionary as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BabelNet Bilingual Dictionary and Denoising", |
| "sec_num": "4.2.2." |
| }, |
| { |
| "text": "\u2022 many Chinese characters are encoded as UTF-8 Traditional format (BIG5), so we convert them to UTF-8 Simplified format (GBK);", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BabelNet Bilingual Dictionary and Denoising", |
| "sec_num": "4.2.2." |
| }, |
| { |
| "text": "\u2022 if the English side contains symbols which are neither letters nor digits, then we remove this pair; \u2022 if the Chinese side does not contain Chinese characters, then we remove this pair;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BabelNet Bilingual Dictionary and Denoising", |
| "sec_num": "4.2.2." |
| }, |
| { |
| "text": "\u2022 if the English side is the same as the Chinese side, then we remove this pair;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BabelNet Bilingual Dictionary and Denoising", |
| "sec_num": "4.2.2." |
| }, |
| { |
| "text": "\u2022 we first remove the spaces between any characters on the Chinese side, and then re-segment into words to remain consistent with the training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BabelNet Bilingual Dictionary and Denoising", |
| "sec_num": "4.2.2." |
| }, |
| { |
| "text": "Finally, we obtain 5, 493, 323 Chinese-English pairs from the original 5, 975, 619 pairs. That is, far less data is filtered out than for EN-PL; for ZH-EN, just 8% of the original data is discarded, indicating that on the whole, the ZH-EN data is of superior quality than the EN-PL entries.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BabelNet Bilingual Dictionary and Denoising", |
| "sec_num": "4.2.2." |
| }, |
| { |
| "text": "The results for EN-PL and ZH-EN language pairs on different data sets are shown in Table 5 , where 'Baseline' indicates the system does not contain any BabelNet entries; 'DTD-CLEAN-1' indicates that the cleaned BabelNet entries are only repeated once in the training data; and 'DTD-CLEAN-10' indicates that the cleaned BabelNet entries are repeated 10 times in the training data. It can be seen that (i) using the BabelNet dictionary as the training data on the EN-PL task does not result in better performance; (ii) however, it achieves better performance on ZH-EN and EN-ZH tasks compared to the baselines; (iii) repeating the occurrences of the BabelNet entries does not improve the quality, but actually results in worse performance than when using DTD-CLEAN-1; (iv) for different languages, BabelNet has an unstable impact on system performance.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 83, |
| "end": 90, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "4.2.3." |
| }, |
| { |
| "text": "As far as the above results are concerned, we infer that (i) the domain of the BabelNet dictionary might have a sig-nificant difference compared to that of the initial training data; and (ii) the entries need to be further cleaned up to obtain more domain-related data. Therefore, we propose to use domain-adaptation strategies to verify the contribution of BabelNet as described in the next section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "4.2.3." |
| }, |
| { |
| "text": "In this section, we use two different domain-adaptation methods to select in-domain entries from the cleaned Ba-belNet dictionary as described in Section 3.2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments on Strategy 2: Domain Adaptation", |
| "sec_num": "4.3." |
| }, |
| { |
| "text": "We utilize two entry-match methods:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Entry Match", |
| "sec_num": "4.3.1." |
| }, |
| { |
| "text": "\u2022 Co-occurrence Entry Match (CEM): if both the source side and the target side of an entry occur in the initial training data, then we select it;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Entry Match", |
| "sec_num": "4.3.1." |
| }, |
| { |
| "text": "\u2022 Unilateral Entry Match (UEM): if either the source side or the target side of an entry occurs in the initial training data, we select it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Entry Match", |
| "sec_num": "4.3.1." |
| }, |
| { |
| "text": "The statistics regarding the selected entries are shown in Table 6 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 59, |
| "end": 66, |
| "text": "Table 6", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Entry Match", |
| "sec_num": "4.3.1." |
| }, |
| { |
| "text": "The numbers of selected entries using the CED method are shown in Table 6 . Compared to the other data selection methods, quite similar amounts of data are selected for both EN-PL and EN-ZH using this approach, with around 870K BabelNet entries.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 66, |
| "end": 73, |
| "text": "Table 6", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "CED", |
| "sec_num": "4.3.2." |
| }, |
| { |
| "text": "The results for EN-PL and ZH-EN language pairs on these different domain-adaptation methods are shown in Table 7 . In terms of BLEU (Papineni et al., 2002) and TER (Snover et al., 2006) , we can make the following observations: (i) all three domain adaptation methods (CEM, UEM and CED) outperform the baselines on both EN-ZH and ZH-EN tasks; (ii) the UEM method performs best on EN-ZH and ZH-EN tasks, while CED performs best on EN-PL; (iii) for the EN-PL task, all domain-adaptation methods perform better than DTD-CLEAN-1 which shows that the data-selection methods are better at removing out-ofdomain and noisy data. Based on these results, we can say that the UEM and CED methods are effective and feasible in selecting useful or indomain data from the noisy, out-of-domain dictionary.", |
| "cite_spans": [ |
| { |
| "start": 132, |
| "end": 155, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 164, |
| "end": 185, |
| "text": "(Snover et al., 2006)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 105, |
| "end": 112, |
| "text": "Table 7", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "4.3.3." |
| }, |
| { |
| "text": "The statistics of OOVs occurring in the test sets in terms of EN-PL, EN-ZH and ZH-EN tasks are shown in Table 8 . Here '#Translated' indicates the number of translations of OOVs that can be retrieved in BabelNet; the last column indicates the ratio of how many OOVs are translated. We can see that only a small proportion of OOVs can be translated by BabelNet. The results of calling the BabelNet API to process OOVs are shown in Table 9 . 'Best-DoAdpt' gives the best result of all domain-adaption methods. 'API' refers to the offline BabelNet API-call method. We can see that the Ba-belNet API method did not beat the best domain-adaptation method on all tasks in terms of BLEU and TER. However, it does improve system performance compared to the baselines, which shows that using BabelNet can alleviate the issue of unknown words to some extent. However, the improvements are not significant (Koehn, 2004) . The possible reasons for this include:", |
| "cite_spans": [ |
| { |
| "start": 895, |
| "end": 908, |
| "text": "(Koehn, 2004)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 104, |
| "end": 111, |
| "text": "Table 8", |
| "ref_id": null |
| }, |
| { |
| "start": 430, |
| "end": 437, |
| "text": "Table 9", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments on Strategy 3: BabelNet API", |
| "sec_num": "4.4." |
| }, |
| { |
| "text": "\u2022 Most OOVs cannot be retrieved by BabelNet;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments on Strategy 3: BabelNet API", |
| "sec_num": "4.4." |
| }, |
| { |
| "text": "\u2022 Due to lack of context, the retrieved translation for an OOV might not be correct;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments on Strategy 3: BabelNet API", |
| "sec_num": "4.4." |
| }, |
| { |
| "text": "\u2022 Some retrieved translations need to be processed further, e.g. using BIG5 and simplified encoding for Chinese, tokenization or segmentation etc.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments on Strategy 3: BabelNet API", |
| "sec_num": "4.4." |
| }, |
| { |
| "text": "From these results, we can see that the domain-adaptation methods are more effective in utilizing BabelNet resources.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments on Strategy 3: BabelNet API", |
| "sec_num": "4.4." |
| }, |
| { |
| "text": "In this paper, we proposed three strategies for using Babel-Net to augment SMT, namely using direct training data, domain adaptation and BabelNet API methods. Experiments conducted on EN-PL, EN-ZH and ZH-EN tasks show that the domain-adaptation strategy is the most effective out of the three strategies in using BabelNet resources to improve system performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "5." |
| }, |
| { |
| "text": "In future work, we intend to carry out further studies on the use of BabelNet for SMT regarding (i) using the latest version of BabelNet and online Web service-based API to process OOVs; (ii) examining BabelNet on more language pairs; (iii) studying different components of using Babel-Net resources to augment SMT, such as supervising word alignment, phrase extraction and decoding.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "5." |
| }, |
| { |
| "text": "http://babelnet.org/about", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://babelnet.org/download", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This research is supported by Science Foundation Ireland through the ADAPT Centre (Grant 13/RC/2106) (www.adaptcentre.ie) at Dublin City University and Trinity College Dublin, and by Grant 610879 for the Falcon project funded by the European Commission.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Improved statistical machine translation using paraphrases", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Axelrod", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Osborne", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 25th Pacific Asia Conference on Language, Information and Computation", |
| "volume": "", |
| "issue": "", |
| "pages": "17--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Axelrod, A., He, X., and Gao, J. (2011). Domain Adapta- tion via Pseudo InDomain Data Selection. In Proceed- ings of the 25th Pacific Asia Conference on Language, Information and Computation, pages 20-30, Singapore. Callison-Burch, C., Koehn, P., and Osborne, M. (2006). Improved statistical machine translation using para- phrases. In Proceedings of the NAACL, pages 17-24, New York City, USA.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Domain adaptation for machine translation by mining unseen words", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Daume-Iii", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Jagarlamudi", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "ACL-HLT 2011: Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "407--412", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daume-III, H. and Jagarlamudi, J. (2011). Domain adap- tation for machine translation by mining unseen word- s. In ACL-HLT 2011: Proceedings of the 49th Annual Meeting of the Association for Computational Linguis- tics: Human Language Technologies, pages 407-412, Portland, Oregon, USA.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Facilitating translation using source language paraphrase lattices", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Way", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "420--429", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Du, J., Jiang, J., and Way, A. (2010). Facilitating transla- tion using source language paraphrase lattices. In Pro- ceedings of the 2010 Conference on Empirical Methods in Natural Language Processing, pages 420-429, Cam- bridge, MA.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Representing multilingual data as linked data: the case of babelnet 2.0", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Ehrmann", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Cecconi", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Vannella", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "P" |
| ], |
| "last": "Mccrae", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Cimiano", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation)", |
| "volume": "", |
| "issue": "", |
| "pages": "401--408", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ehrmann, M., Cecconi, F., Vannella, D., Mccrae, J. P., Cimiano, P., and Navigli, R. (2014). Representing mul- tilingual data as linked data: the case of babelnet 2.0. In Proceedings of the Ninth International Conference on Language Resources and Evaluation), pages 401-408, Reykjavik, Iceland, May.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Using babelnet in bridging the gap between natural language queries and linked data concepts", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Elbedweihy", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Wrigley", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Ciravegna", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 12th International Semantic Web Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elbedweihy, K., Wrigley, S., and Ciravegna, F. (2013). Using babelnet in bridging the gap between natural lan- guage queries and linked data concepts. In Proceedings of the 12th International Semantic Web Conference.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Improving translation lexicon induction from monolingual corpora via dependency contexts and part-of-speech equivalences", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Garera", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Yarowsky", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 13th Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "129--137", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Garera, N., Callison-Burch, C., and Yarowsky, D. (2009). Improving translation lexicon induction from monolin- gual corpora via dependency contexts and part-of-speech equivalences. In Proceedings of the 13th Conference on Computational Natural Language Learning, pages 129- 137, Boulder, Colorado, June.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Four techniques for online handling of out-of-vocabularywords in arabic-english statistical machine translation", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Habash", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL-08", |
| "volume": "", |
| "issue": "", |
| "pages": "57--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Habash, N. (2008). Four techniques for online handling of out-of-vocabularywords in arabic-english statistical ma- chine translation. In Proceedings of ACL-08, pages 57- 60, Columbus, Ohio, June. Association for Computation- al Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Learning bilingual lexicons from monolingual corpora", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Haghighi", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Berg-Kirkpatrick", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL-08: HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "771--779", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Haghighi, A., Liang, P., Berg-Kirkpatrick, T., and Klein, D. (2008). Learning bilingual lexicons from monolingual corpora. In Proceedings of ACL-08: HLT, pages 771- 779, Columbus, Ohio, June.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Bilingual Termbank Creation via Log-Likelihood Comparison and Phrase-Based Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Haque", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Penkale", |
| "suffix": "" |
| }, |
| { |
| "first": "Way", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 4th International Workshop on Computational Terminology (COLING2014)", |
| "volume": "", |
| "issue": "", |
| "pages": "42--51", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Haque, R., Penkale, S., and Way, A. (2014). Bilingual Termbank Creation via Log-Likelihood Comparison and Phrase-Based Statistical Machine Translation. In Pro- ceedings of the 4th International Workshop on Com- putational Terminology (COLING2014), pages 42-51, Dublin, Ireland, August.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Using sublexical translations to handle the oov problem in machine translation", |
| "authors": [ |
| { |
| "first": "C.-C", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "H.-C", |
| "middle": [], |
| "last": "Yen", |
| "suffix": "" |
| }, |
| { |
| "first": "P.-C", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "S.-T", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chang", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "S" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "ACM Transactions on Asian Language Information Processing", |
| "volume": "10", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Huang, C.-C., Yen, H.-C., Yang, P.-C., Huang, S.-T., , and Chang, J. S. (2011). Using sublexical translations to handle the oov problem in machine translation. ACM Transactions on Asian Language Information Process- ing, 10(3):16.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Unsupervised information extraction using babelnet and dbpedia", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "H" |
| ], |
| "last": "Jadidinejad", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Making Sense of Microposts (MSM2013) Concept Extraction Challenge", |
| "volume": "", |
| "issue": "", |
| "pages": "54--56", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jadidinejad, A. H. (2013). Unsupervised information ex- traction using babelnet and dbpedia. In Proceedings of the Making Sense of Microposts (MSM2013) Concep- t Extraction Challenge, pages 54-56, Rio de Janeiro, Brazil, May.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Moses: open source toolkit for statistical machine translation", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Bertoldi", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Cowan", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Moran", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Zens", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Constantin", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Herbst", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the ACL on Interactive Poster and Demonstration Sessions", |
| "volume": "", |
| "issue": "", |
| "pages": "177--180", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Koehn, P., Hoang, H., Birch, A., Callison-Burch, C., Fed- erico, M., Bertoldi, N., Cowan, B., Shen, W., Moran, C., Zens, R., Dyer, C., Bojar, O., Constantin, A., and Herb- st, E. (2007). Moses: open source toolkit for statistical machine translation. In Proceedings of the 45th Annual Meeting of the ACL on Interactive Poster and Demon- stration Sessions, ACL 2007, pages 177-180, Prague, Czech Republic, June.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Statistical significance tests for machine translation evaluation", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of EMNLP 2004", |
| "volume": "", |
| "issue": "", |
| "pages": "388--395", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Koehn, P. (2004). Statistical significance tests for machine translation evaluation. In Proceedings of EMNLP 2004, pages 388-395, Barcelona, Spain, July.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Europarl: a parallel corpus for statistical machine translation", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "MT Summit X: Conference Proceedings: the tenth Machine Translation Summit", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Koehn, P. (2005). Europarl: a parallel corpus for statistical machine translation. In MT Summit X: Conference Pro- ceedings: the tenth Machine Translation Summit, pages 79-86, Phuket, Thailand.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Conversion between scripts of punjabi: Beyond simple transliteration", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [ |
| "S" |
| ], |
| "last": "Lehal", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "S" |
| ], |
| "last": "Saini", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 24th International Conference on Computational Linguistics)", |
| "volume": "", |
| "issue": "", |
| "pages": "633--642", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lehal, G. S. and Saini, T. S. (2012). Conversion between scripts of punjabi: Beyond simple transliteration. In Pro- ceedings of the 24th International Conference on Com- putational Linguistics), pages 633-642, Mumbai, India, December.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Exploiting parallel corpus for handling out-of-vocabularywords", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Tinsley", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Lepage", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 27th PACLIC", |
| "volume": "", |
| "issue": "", |
| "pages": "399--408", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luo, J., Tinsley, J., and Lepage, Y. (2013). Exploiting parallel corpus for handling out-of-vocabularywords. In Proceedings of the 27th PACLIC, pages 399-408, Taipei, Taiwan, November.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Improved statistical machine translation using monolingually-derived paraphrases", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Marton", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "381--390", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marton, Y., Callison-Burch, C., and Resnik, P. (2009). Improved statistical machine translation using monolingually-derived paraphrases. In Proceedings of the Conference on Empirical Methods in Natural Language Processing, pages 381-390, Singapore, Singapore, August.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Intelligent Selection of Language Model Training Data", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [ |
| "C" |
| ], |
| "last": "Moore", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Association for Computational Linguistics (ACL) 2010", |
| "volume": "", |
| "issue": "", |
| "pages": "220--224", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Moore, R. C. and Lewis, W. (2010). Intelligent Selection of Language Model Training Data. In Proceedings of the Association for Computational Linguistics (ACL) 2010, pages 220-224, Uppsala, Sweden, July.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Annotating the masc corpus with babelnet", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Moro", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [ |
| "M" |
| ], |
| "last": "Tucci", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "J" |
| ], |
| "last": "Passonneau", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "4214--4219", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Moro, A., Navigli, R., Tucci, F. M., and Passonneau, R. J. (2014). Annotating the masc corpus with babelnet. In Proceedings of the Ninth International Conference on Language Resources and Evaluation, pages 4214-4219, Reykjavik, Iceland, May.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "BabelNet: Building a Very Large Multilingual Semantic Network", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "P" |
| ], |
| "last": "Ponzetto", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "216--225", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Navigli, R. and Ponzetto, S. P. (2010). BabelNet: Build- ing a Very Large Multilingual Semantic Network. In Proceedings of the 48th Annual Meeting of the Associ- ation for Computational Linguistics (ACL), pages 216- 225, Uppsala, Sweden, July.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Multilingual WSD with Just a Few Lines of Code: the BabelNet API", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "P" |
| ], |
| "last": "Ponzetto", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "67--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Navigli, R. and Ponzetto, S. P. (2012). Multilingual WSD with Just a Few Lines of Code: the BabelNet API. In Proceedings of the 50th Annual Meeting of the Associa- tion for Computational Linguistics (ACL), pages 67-72, Jeju, Republic of Korea, July.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Multilingual word sense disambiguation", |
| "authors": [], |
| "year": null, |
| "venue": "Proceedings of the Seventh International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "222--231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Semeval-2013 task 12: Multilingual word sense disam- biguation. In Proceedings of the Seventh International Workshop on Semantic Evaluation, pages 222-231, At- lanta, Georgia, June.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Babelnet goes to the (multilingual) semantic web", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the ISWC 2012 Workshop on Multilingual Semantic Web", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Navigli, R. (2012). Babelnet goes to the (multilingual) se- mantic web. In Proceedings of the ISWC 2012 Workshop on Multilingual Semantic Web, Boston, USA, November.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "BLEU: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "W.-J", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th annual meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Papineni, K., Roukos, S., Ward, T., and Zhu, W.-J. (2002). BLEU: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, USA, July.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Towards the use of word stems and suffixes for statistical machine translation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Popovic", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "1585--1588", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Popovic, M. and Ney, H. (2004). Towards the use of word stems and suffixes for statistical machine transla- tion. In Proceedings of Language Resources and Evalu- ation, pages 1585-1588, Lisbon, Portugal, May.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "BabelNet: The automatic construction, evaluation and application of a widecoverage multilingual semantic network", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "P P" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Artificial Intelligence", |
| "volume": "193", |
| "issue": "", |
| "pages": "217--250", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli, S. P. P. (2012). BabelNet: The automat- ic construction, evaluation and application of a wide- coverage multilingual semantic network. Artificial Intel- ligence, 193:217-250.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Combination of arabic preprocessing schemes for statistical machine translation", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Sadat", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Habash", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 21st COLING and 44th ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sadat, F. and Habash, N. (2006). Combination of arabic preprocessing schemes for statistical machine transla- tion. In Proceedings of the 21st COLING and 44th ACL, pages 1-8, Sydney, Australia, July.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "A study of translation edit rate with targeted human annotation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Snover", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Dorr", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Micciulla", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Makhoul", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Association for Machine Translation in the Americas", |
| "volume": "", |
| "issue": "", |
| "pages": "223--231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Snover, M., Dorr, B., Schwartz, R., Micciulla, L., and Makhoul, J. (2006). A study of translation edit rate with targeted human annotation. In Proceedings of the Asso- ciation for Machine Translation in the Americas, pages 223-231, Cambridge, USA, August.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Handling unknown words in statistical machine translation from a new perspective", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Zong", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2nd Natural Language Processing and Chinese Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "176--187", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, J., Zhai, F., and Zong, C. (2012). Handling un- known words in statistical machine translation from a new perspective. In Proceedings of the 2nd Natural Lan- guage Processing and Chinese Computing, pages 176- 187, Beijing, China, October.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "type_str": "table", |
| "text": "Statistics of Europarl EN-PL data for the model training", |
| "num": null, |
| "content": "<table><tr><td colspan=\"2\">English -Test Set</td><td colspan=\"2\">Polish -Test Set</td></tr><tr><td>#sen</td><td colspan=\"2\">#word #entry #sen</td><td>#word #entry</td></tr><tr><td colspan=\"4\">2,000 47,194 4,063 2,000 39,956 7,451</td></tr></table>", |
| "html": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "text": "Statistics of Europarl EN-PL data for the test set", |
| "num": null, |
| "content": "<table><tr><td colspan=\"3\">Chinese -Training Data</td><td colspan=\"3\">English -Training Data</td></tr><tr><td>#sen</td><td>#word</td><td>#entry</td><td>#sen</td><td>#word</td><td>#entry</td></tr><tr><td colspan=\"6\">270,794 9,582,189 102,035 270,794 10,319,019 81,036</td></tr></table>", |
| "html": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "text": "Statistics of FBIS ZH-EN data for model training", |
| "num": null, |
| "content": "<table><tr><td colspan=\"2\">Chinese -Test Set</td><td colspan=\"3\">English -Test Set (4 Refs)</td></tr><tr><td>#sen</td><td colspan=\"2\">#word #entry #sen</td><td>#word</td><td>#entry</td></tr><tr><td colspan=\"5\">1,082 30,489 5,684 1,082 142,794 7,552</td></tr></table>", |
| "html": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "text": "Statistics of FBIS ZH-EN data for the test set", |
| "num": null, |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "text": "Statistics of the entry match methods on different language pairs", |
| "num": null, |
| "content": "<table><tr><td>system</td><td colspan=\"6\">EN-PL BLEU4(%) TER(%) BLEU4(%) TER(%) BLEU4(%) TER(%) EN-ZH ZH-EN</td></tr><tr><td>Baseline</td><td>24.57</td><td>58.47</td><td>11.62</td><td>72.03</td><td>27.08</td><td>67.90</td></tr><tr><td>DTD-CLEAN-1</td><td>24.15</td><td>58.91</td><td>12.53</td><td>71.25</td><td>27.53</td><td>66.20</td></tr><tr><td>CEM</td><td>24.71</td><td>58.29</td><td>12.23</td><td>71.50</td><td>27.91</td><td>66.05</td></tr><tr><td>UEM</td><td>24.59</td><td>58.31</td><td>12.76</td><td>70.81</td><td>28.47</td><td>65.78</td></tr><tr><td>CED</td><td>24.72</td><td>58.29</td><td>12.73</td><td>71.24</td><td>27.23</td><td>66.97</td></tr></table>", |
| "html": null |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "text": "Results of domain adaptation methods on different settings", |
| "num": null, |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF8": { |
| "type_str": "table", |
| "text": "Comparison between BabelNet API method and others", |
| "num": null, |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |