| { |
| "paper_id": "Y15-1033", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:41:54.701199Z" |
| }, |
| "title": "Large-scale Dictionary Construction via Pivot-based Statistical Machine Translation with Significance Pruning and Neural Network Features", |
| "authors": [ |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Kyoto University", |
| "location": {} |
| }, |
| "email": "prajdabre@gmail.com" |
| }, |
| { |
| "first": "Chenhui", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Fabien", |
| "middle": [], |
| "last": "Cromieres", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Toshiaki", |
| "middle": [], |
| "last": "Nakazawa", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "nakazawa@pa.jst.jp" |
| }, |
| { |
| "first": "Sadao", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Kyoto University", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We present our ongoing work on large-scale Japanese-Chinese bilingual dictionary construction via pivot-based statistical machine translation. We utilize statistical significance pruning to control noisy translation pairs that are induced by pivoting. We construct a large dictionary which we manually verify to be of a high quality. We then use this dictionary and a parallel corpus to learn bilingual neural network language models to obtain features for reranking the n-best list, which leads to an absolute improvement of 5% in accuracy when compared to a setting that does not use significance pruning and reranking. 2 The highest accuracy evaluated based on the 1 best translation is 21.7% in (Tsunakawa et al., 2009).", |
| "pdf_parse": { |
| "paper_id": "Y15-1033", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We present our ongoing work on large-scale Japanese-Chinese bilingual dictionary construction via pivot-based statistical machine translation. We utilize statistical significance pruning to control noisy translation pairs that are induced by pivoting. We construct a large dictionary which we manually verify to be of a high quality. We then use this dictionary and a parallel corpus to learn bilingual neural network language models to obtain features for reranking the n-best list, which leads to an absolute improvement of 5% in accuracy when compared to a setting that does not use significance pruning and reranking. 2 The highest accuracy evaluated based on the 1 best translation is 21.7% in (Tsunakawa et al., 2009).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Pivot-based statistical machine translation (SMT) (Wu and Wang, 2007) has been shown to be a possible way of constructing a dictionary for the language pairs that have scarce parallel data (Tsunakawa et al., 2009; Chu et al., 2015) . The assumption of this method is that there is a pair of large-scale parallel data: one between the source language and an intermediate resource rich language (henceforth called pivot), and one between that pivot and the target language. We can use the source-pivot and pivot-target parallel data to develop a source-target term 1 translation model for dictionary construction.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 69, |
| "text": "(Wu and Wang, 2007)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 189, |
| "end": 213, |
| "text": "(Tsunakawa et al., 2009;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 214, |
| "end": 231, |
| "text": "Chu et al., 2015)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Pivot-based SMT uses the log linear model as conventional phrase-based SMT (Koehn et al., 2007) does. This method can address the data sparseness problem of directly merging the source-pivot and pivot-target terms, because it can use the portion of terms to generate new terms. Small-scale experiments in (Tsunakawa et al., 2009) showed very low accuracy of pivot-based SMT for dictionary construction. 2 This paper presents our study to construct a largescale Japanese-Chinese (Ja-Zh) scientific dictionary, using large-scale Japanese-English (Ja-En) (49.1M sentences and 1.4M terms) and English-Chinese (En-Zh) (8.7M sentences and 4.5M terms) parallel data via pivot-based SMT. We generate a large pivot translation model using the Ja-En and En-Zh parallel data. Moreover, a small direct Ja-Zh translation model is generated using small-scale Ja-Zh parallel data. (680k sentences and 561k terms). Both the direct and pivot translation models are used to translate the Ja terms in the Ja-En dictionaries to Zh and the Zh terms in the Zh-En dictionaries to Ja to construct a large-scale Ja-Zh dictionary (about 3.6M terms).", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 95, |
| "text": "(Koehn et al., 2007)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 305, |
| "end": 329, |
| "text": "(Tsunakawa et al., 2009)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 403, |
| "end": 404, |
| "text": "2", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We address the noisy nature of pivoting large phrase tables by statistical significance pruning (Johnson et al., 2007) . In addition, we exploit linguistic knowledge of common Chinese characters (Chu et al., 2013) shared in Ja-Zh to further improve the translation model. Large-scale experiments on scientific domain data indicate that our proposed method achieves high quality dictionaries which we manually verify to have a high quality.", |
| "cite_spans": [ |
| { |
| "start": 96, |
| "end": 118, |
| "text": "(Johnson et al., 2007)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 195, |
| "end": 213, |
| "text": "(Chu et al., 2013)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Reranking the n-best list produced by the SMT decoder is known to help improve the translation quality given that good quality features are used (Och et al., 2004) . In this paper, we use bilingual neural network language model features for reranking the n-best list produced by the pivot-based system which uses significance pruning, and achieve a 2.5% (absolute) accuracy improvement. Compared to a setting which uses neither significance pruning nor n-best list reranking the improvement in accu-racy is about 5% (absolute). We also use character based neural MT to eliminate the out-of-vocabulary (OOV) terms, which further improves the quality.", |
| "cite_spans": [ |
| { |
| "start": 145, |
| "end": 163, |
| "text": "(Och et al., 2004)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of this paper is structured as follows: Section 2 reviews related work. Section 3 presents our dictionary construction using pivot-based SMT with significance pruning. Section 4 describe the bilingual neural language model features using a parallel corpus and the constructed dictionary for reranking the n-best list. Experiments and results are described in Section 5, and we conclude this paper in Section 6.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Many studies have been conducted for pivot-based SMT. Utiyama and Isahara (2007) developed a method (sentence translation strategy) for cascading a source-pivot and a pivot-target system to translate from source to target using a pivot language. Since this results in multiplicative error propagation, Wu and Wang (2009) developed a method (triangulation) in which they combined the source-pivot and pivot-target phrase tables to obtain a source-target phrase table. They then combine the pivoted and direct tables (using source-target parallel corpora) by linear interpolation whose weights were manually specified. There is a method to automatically learn the interpolation weights (Sennrich, 2012) but it requires reference phrase pairs which are not easily available. Work on translation from Indonesian to English using Malay and Spanish to English using Portuguese (Nakov and Ng, 2009) as pivot languages worked well since the pivots had substantial similarity to the source languages. They used the multiple decoding paths (MDP) feature of the phrase-based SMT toolkit Moses (Koehn et al., 2007) to combine multiple tables which avoids interpolation. The issue of noise introduced by pivoting has not been seriously addressed and although statistical significance pruning (Johnson et al., 2007) has shown to be quite effective in a bilingual scenario, it has never been considered in a pivot language scenario. (Tsunakawa et al., 2009) was the first work that constructs a dictionary for language pairs that are resource poor using pivot-based SMT, however the experiments were performed on small-scale data. Chu et al. (2015) conducted large-scale experiments and exploited the linguistic knowledge of common Chinese characters shared in Japanese-Chinese (Chu et al., 2013) to improve the translation model. N-best list reranking (Och et al., 2004; Sutskever et al., 2014) is known to improve the translation quality if good quality features are used. Recently, and (Bahdanau et al., 2014) have shown that recurrent neural networks can be used for phrase-based SMT whose quality rivals the state of the art. Since the neural translation models can also be viewed as bilingual language models, we use them to obtain features for reranking the n-best lists produced by the pivot-based system.", |
| "cite_spans": [ |
| { |
| "start": 54, |
| "end": 80, |
| "text": "Utiyama and Isahara (2007)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 302, |
| "end": 320, |
| "text": "Wu and Wang (2009)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 684, |
| "end": 700, |
| "text": "(Sennrich, 2012)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 871, |
| "end": 891, |
| "text": "(Nakov and Ng, 2009)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1082, |
| "end": 1102, |
| "text": "(Koehn et al., 2007)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1279, |
| "end": 1301, |
| "text": "(Johnson et al., 2007)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1418, |
| "end": 1442, |
| "text": "(Tsunakawa et al., 2009)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 1616, |
| "end": 1633, |
| "text": "Chu et al. (2015)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1763, |
| "end": 1781, |
| "text": "(Chu et al., 2013)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1838, |
| "end": 1856, |
| "text": "(Och et al., 2004;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1857, |
| "end": 1880, |
| "text": "Sutskever et al., 2014)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1974, |
| "end": 1997, |
| "text": "(Bahdanau et al., 2014)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "3 Dictionary Construction via Pivot-based SMT Figure 1 gives an overview of our construction method. Phrase-based SMT (Koehn et al., 2007) is the basis of our method. We first generate Ja-Zh (source-target), Ja-En (source-pivot) and En-Zh (pivot-target) phrase tables from parallel data respectively. The generated Ja-Zh phrase table is used as the direct table. Using the Ja-En and En-Zh phrase tables, we construct a Ja-Zh pivot phrase table via En. The direct and pivot tables are then combined and used for phrase-based SMT to the Ja terms in the Ja-En dictionaries to Zh and the Zh terms in the Zh-En dictionaries to Ja to construct a large-scale Ja-Zh dictionary. In addition, we use common Chinese characters to generate Chinese character features for the phrase tables to improve the SMT performance.", |
| "cite_spans": [ |
| { |
| "start": 118, |
| "end": 138, |
| "text": "(Koehn et al., 2007)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 46, |
| "end": 54, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We follow the phrase table triangulation method (Wu and Wang, 2007) to generate the pivot phrase table. This method generates a source-target phrase table via all their shared pivot phrases in the sourcepivot and pivot-target tables. The formulae for generating the inverse phrase translation probabilities and direct lexical weightings, \u03c6(f |e) and lex(f |e) are given below. Inverting the positions of e and f give the formulae for the direct probabilities and weightings, \u03c6(e|f ) and lex(e|f ).", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 67, |
| "text": "(Wu and Wang, 2007)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pivot Phrase Table Generation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u03c6(f |e) = p i \u03c6(f |p i ) * \u03c6(p i |e) (1) !\"#$%& '()& & !\"#$%*+,&-& 111&!\"&'& !./01'(&111&-& ()*#$% )))& 34#$%&5*,'6-&789:;<=& ,.\"5>*4111&#$&111&2&& \"?\"'+.,&111&!\"&111&2&& )))&", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pivot Phrase Table Generation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "34#$%&'%,\"-.&+\"/0.& !\"#34&5*,'6-&7@A:@<=& !\"#$%&5*,'6-&78:;<=& !\"#$%&?(5>*4\",B&& 79:C<D&\"556,\"5BE&A:F=&", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pivot Phrase Table Generation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "*+&,&111&&'%111&2&& !./0&1&111&()*%111&2&& )))& !\"#$%&?(,.5+&'%,\"-.&+\"/0.& !\"#$%&111&\"?\"'+.,&111&2&& '(&111&,.\"5>*4&111&2&& )))& !\"#34&'%,\"-.&+\"/0.& *+,-./0& & !\"#$%*+,&-& 111&\"?\"'+.,&',*+.(4% )))& !GH&!\"#34&& ?(5>*4\",B&7@@AI=& % ()*#$&111&-& \"5.+B0\">*4&,.\"5>*4% )))& JGHJK&$%#34&& ?(5>*4\",B&79:L<=& K*MM*4& K%(4.-.& 5%\",\"5+.,-2 $%2+ , - !\"2 ! \" # Figure 1: Overview of our dictionary construction method. lex(f |e, a) = p i lex(f |p i , a 1 ) * lex(p i |e, a 2 ) (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pivot Phrase Table Generation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where a 1 is the alignment between phrases f (source) and p i (pivot), a 2 is the alignment between p i and e (target) and a is the alignment between e and f . Note that the lexical weightings are calculated in the same way as the phrase probabilities. Our results might be further improved if we used more sophisticated approaches like the cross-language similarity method or the method which uses pivot induced alignments (Wu and Wang, 2007) .", |
| "cite_spans": [ |
| { |
| "start": 424, |
| "end": 443, |
| "text": "(Wu and Wang, 2007)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pivot Phrase Table Generation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "As pivoting induces a very large number of phrase pairs, we prune all pairs with inverse phrase translation probability less than 0.001. This manually specified threshold is simple, and works in practice but is not statistically motivated.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pivot Phrase Table Generation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To combine the direct and pivot phrase tables, we make use of the MDP method of the phrase-based SMT toolkit Moses (Koehn et al., 2007) , which has been shown to be an effective method (Nakov and Ng, 2009) . MDP, which uses all the tables simultaneously while decoding, ensures that each pivot table is kept separate and translation options are collected from all the tables.", |
| "cite_spans": [ |
| { |
| "start": 115, |
| "end": 135, |
| "text": "(Koehn et al., 2007)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 185, |
| "end": 205, |
| "text": "(Nakov and Ng, 2009)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Combination of the Direct and Pivot Phrase Tables", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Consider a source-pivot phrase pair (X,Y) and a pivot-target phrase pair (Y,Z). If Y is a bad translation of X and Z is a bad translation of Y, then the induced pair (X,Z) will also be a bad pair. The phrase pair extraction processes in phrase-based SMT often result in noisy phrase tables, which when pivoted give even noisier tables. Statistical significance pruning (Johnson et al., 2007) is known to eliminate a large amount of noise and thus we used it to prune our tables before pivoting. We used the \u03b1 + threshold which is based on the parallel corpus size and shown to be optimal. Although the optimal thresholds for a pivot based MT setting might be different, currently we consider only the \u03b1 + threshold which is determined to be the best by (Johnson et al., 2007) . Exhaustive testing using various thresholds will be performed and reported in the future. The negative log probability of the p-value (also called significance value) of the phrase pair is computed and the pair is retained if this exceeds the threshold. It is possible that all phrase pairs for a source phrase might be pruned leading to an out-of-vocabulary (OOV) problem. To remedy this we retain the top 5 phrase pairs (according to inverse translation probability) for such a phrase. We tried 3 different settings: Prune source-pivot table only (labeled \"Pr:S-P\"), Prune pivottarget table only (labeled \"Pr:P-T\") and Prune both tables (labeled \"Pr:Both\"). We discuss the effects of each setting in Section 5.2.4.", |
| "cite_spans": [ |
| { |
| "start": 369, |
| "end": 391, |
| "text": "(Johnson et al., 2007)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 753, |
| "end": 775, |
| "text": "(Johnson et al., 2007)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploiting Statistical Significance Pruning for Pivoting", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Ja-Zh shares Chinese characters. Because many common Chinese characters exist in Ja-Zh, they have been shown to be very effective in many Ja-Zh natural language processing (NLP) tasks (Chu et al., 2013) . In this paper, we compute Chinese character features for the phrase pairs in the translation models, and integrate these features in the log-linear model for decoding. In detail, we compute following two features for each phrase pair:", |
| "cite_spans": [ |
| { |
| "start": 184, |
| "end": 202, |
| "text": "(Chu et al., 2013)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Character Features", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "CC ratio = Ja CC num + Zh CC num Ja char num + Zh char num", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Character Features", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "CCC ratio = Ja CCC num + Zh CCC num Ja CC num + Zh CC num (4) where char num, CC num and CCC num denote the number of characters, Chinese characters and common Chinese characters in a phrase respectively. The common Chinese character ratio is calculated based on the Chinese character mapping table in (Chu et al., 2013) . We simply add these two scores as features to the phrase tables and use these tables for tuning and testing.", |
| "cite_spans": [ |
| { |
| "start": 302, |
| "end": 320, |
| "text": "(Chu et al., 2013)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Character Features", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "A combination of pivoting, statistical significance pruning and Chinese character features is used to construct the high quality large scale dictionary. One can use this dictionary as an additional component in an MT system. In our case we use it to generate features for N-best list reranking (next section).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Character Features", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "The motivation behind n-best list reranking is simple: It is quite common for a good translation candidate to be ranked lower than a bad translation candidate. However, it might be possible to use additional features to rerank the list of candidates in order to push the good translation to the top of the list. Figure 2 gives a simple description of the n-best list reranking procedure using neural features. Using the Ja-Zh dictionary constructed using the methods specified in Section 3 and the Ja-Zh ASPEC corpus we train !\"#$%&\"#'()*+' %\",#$-'.\"$*,#\"/' ! !\"#$%&'(!)! \"\"\"!!\"#$! !*+,-./!\"\"\"!)! %&'()* 000! #$%&'!()*+,!-./012!345678! #$%&'!9:-;.<$/=!! 3>?4@A!$--1/$-=B!6?C8! +,-./1\"\"\"!%D?44>DE!! +0-/1\"\"\"!%F?6CGGD! 000! +,-&/!\"\"\"!%F?E4HEH! +0-/1\"\"\"!%5F?HGFDH! +,-./!\"\"\"!%D6C?4DCE5! 000! +,-&/!\"\"\"!%DD5?F4E6H! Figure 2 : Using neural features for reranking. 4 neural translation models. For each translation direction we train a character based model using the dictionary and corpus separately (2 directions and 2 corpora lead to 4 models). It is important to note that although the dictionary is automatically created and is noisy, neural networks are quite robust and can regulate the noise quite effectively. This claim will be validated by our results (see Section 5.2.4). We use the freely available toolkit for neural MT, GroundHog 3 , which contains an implementation of the work by (Bahdanau et al., 2014) . After training a neural translation model it can be used either to translate an input sentence or it can be used to produce a score given an input sentence and a candidate translation. In the latter case, the neural translation model can be viewed as a bilingual language model.", |
| "cite_spans": [ |
| { |
| "start": 1392, |
| "end": 1415, |
| "text": "(Bahdanau et al., 2014)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 312, |
| "end": 321, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 812, |
| "end": 820, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "N-best List Reranking using Neural Features", |
| "sec_num": "4" |
| }, |
| { |
| "text": "One major limitation of neural network based models is that they are very slow to train in case of large vocabularies. It is possible to learn character based models but such models are not suited for extremely long sequences. In the case of Japanese and Chinese, however, since both languages use Chinese characters the character sequences are not too long and thus it makes sense to use character based MT here. Since the number of characters is quite smaller compared to the number of words, the training is quite fast. Ultimately, character based MT is always worse than word based MT and so, in this work we only use the character based neural MT models to obtain features for n-best list reranking. We also use these models to perform character based translation of untranslated words and avoid OOVs. The procedure we followed to perform reranking is given below. A decoder always gives n-best lists when performing tuning and testing. To learn reranking weights, we use the n-best list, for the tuning/development set, corresponding to the run with the highest evaluation metric score (BLEU in our case).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "N-best List Reranking using Neural Features", |
| "sec_num": "4" |
| }, |
| { |
| "text": "(a) Obtain 4 neural translation scores for each translation candidate. (b) Append the 4 scores to the list of features for the candidate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each input term in the tuning set:", |
| "sec_num": "1." |
| }, |
| { |
| "text": "2. Use kbmira 4 to learn feature weights using the modified n-best list and the references for the tuning set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each input term in the tuning set:", |
| "sec_num": "1." |
| }, |
| { |
| "text": "3. Charater level BLEU as well as word level BLEU are used as reranking metric.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each input term in the tuning set:", |
| "sec_num": "1." |
| }, |
| { |
| "text": "(a) Obtain 4 neural translation scores for each translation candidate and append them to the list of features for that candidate. (b) Perform the linear combination of the learned weights and the features to get a model score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each input term in the test set:", |
| "sec_num": "4." |
| }, |
| { |
| "text": "5. Sort the n-best list for the test set using the calculated model scores (highest score is the best translation) to obtain the reranked list.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each input term in the test set:", |
| "sec_num": "4." |
| }, |
| { |
| "text": "We also try another reranking method by treating it as a classification task using the support vector machine (SVM) toolkit. 5 When evaluating dictionaries, the translation is either correct or incorrect which is unlike sentence translation evaluation. We thus learn a SVM using the development set n-best list and the references to learn a classifier which is able to differentiate between a correct and an incorrect translation. The method we used for reranking is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each input term in the test set:", |
| "sec_num": "4." |
| }, |
| { |
| "text": "1. For each input term in the tuning set: (a) Obtain 4 neural translation scores for each translation candidate. (b) Append the 4 scores to the list of features for the candidate. (c) Generate classification label for candidate by comparing it with the reference.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each input term in the test set:", |
| "sec_num": "4." |
| }, |
| { |
| "text": "2. Learn SVM classifier using the constructed training set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each input term in the test set:", |
| "sec_num": "4." |
| }, |
| { |
| "text": "3. For each input term in the test set:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each input term in the test set:", |
| "sec_num": "4." |
| }, |
| { |
| "text": "(a) Obtain 4 neural translation scores for each translation candidate and append them to the list of features for that candidate. (b) Use the SVM model to perform classification but give the probability scores instead of labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each input term in the test set:", |
| "sec_num": "4." |
| }, |
| { |
| "text": "4. Sort the n-best list for the test set using the calculated probability scores (highest score is the best translation) to obtain the reranked list.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each input term in the test set:", |
| "sec_num": "4." |
| }, |
| { |
| "text": "If there are any OOVs in the reranked n-best list then we replace them with the translation obtained using the above mentioned character based neural models (in the Ja-Zh direction).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "For each input term in the test set:", |
| "sec_num": "4." |
| }, |
| { |
| "text": "We describe the data sets, experimental settings and evaluations of the results below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We used following two types of training data:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training data", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 Bilingual dictionaries: we used general domain Ja-En, En-Zh and Ja-Zh dictionaries (i.e. Wikipedia title pairs and EDR 6 ), and the scientific dictionaries provided by the Japan Science and Technology Agency (JST) 7 and the Institute of Science and Technology information of China (ISTIC) 8 (called the JST dictionary and ISTIC dictionary hereafter), containing 1.4M , 4.5M and 561k term pairs respectively. shows the statistics of the bilingual dictionaries used for training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training data", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 Parallel corpora: the scientific Ja-En, En-Zh and Ja-Zh corpora we used were also provided by JST and ISTIC, containing 49.1M , 8.7M and 680k sentence pairs respectively. Table 2 shows the statistics of parallel corpora used for training. Among which ISTIC pc was provided by ISTIC, and the others were provided by JST.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 173, |
| "end": 180, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training data", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We used the terms with two reference translations 9 in the Ja-Zh Iwanami biology dictionary (5,890 pairs) and the Ja-Zh life science dictionary (4,075 pairs) provided by JST. Half of the data in each dictionary was used for tuning (4,983 pairs), and the other half for testing (4,982 pairs). The evaluation scores on the test set give an idea of the quality of the constructed dictionary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tuning and Testing data", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "In our experiments, we segmented the Chinese and Japanese data using a tool proposed by Shen et al. (2014) and JUMAN (Kurohashi et al., 1994) respectively. For decoding, we used Moses (Koehn et al., 2007) with the default options. We trained a word 5-gram language model on the Zh side of all the En-Zh and Ja-Zh training data (14.4M sentences) using the SRILM toolkit 10 with interpolated Keneser-Ney discounting. Tuning was performed by minimum error rate training which also provides us with the n-best lists used to learn reranking weights.", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 106, |
| "text": "Shen et al. (2014)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 117, |
| "end": 141, |
| "text": "(Kurohashi et al., 1994)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 184, |
| "end": 204, |
| "text": "(Koehn et al., 2007)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "As a baseline, we compared following three methods for training the translation model:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "\u2022 Direct: Only use the Ja-Zh data to train a direct Ja-Zh model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "\u2022 Pivot: Use the Ja-En and En-Zh data for training Ja-En and En-Zh models, and construct a pivot Ja-Zh model using the phrase table triangulation method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "\u2022 Direct+Pivot: Combine the direct and pivot Ja-Zh models using MDP.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "We further conducted experiments using different significance pruning methods described in Section 3.3 and compared the following:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "\u2022 Direct+Pivot (Pr:S-P): Pivoting after pruning the source-pivot table.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "\u2022 Direct+Pivot (Pr:P-T): Pivoting after pruning the pivot-target table.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "\u2022 Direct+Pivot (Pr:Both): Pivoting after pruning both the source-pivot and pivot-target tables.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "We also conducted additional experiments using the Chinese character features (labeled +CC) (described in 3.4), but we only report the scores on Direct+Pivot (Pr:P-T), which is the best setting (thus labeled BS) for constructing the dictionary. Finally, using the BS, we translated the Ja terms in the JST (550k) dictionary to Zh and the Zh terms in the ISTIC (3.4M ) dictionary to Ja, and constructed the Ja-Zh dictionary. The size of the constructed dictionary is 3.6M after discarding the overlapped term pairs in the two translated dictionaries. We then used this dictionary along with the Ja-Zh ASPEC parellel corpus to rerank the n-best list of the BS using the methods mentioned in Section 4. The following scores are reported:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "\u2022 BS+RRCBLEU: Using character BLEU to rerank the n-best list.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "\u2022 BS+RRWBLEU: Using word BLEU to rerank the n-best list.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "\u2022 BS+RRSVM: Using SVM to rerank the n-best list.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "This is followed by substituting the OOVs with the character level translations using the learned neural translation models (which we label as +OOVsub).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "Following (Tsunakawa et al., 2009) , we evaluated the accuracy on the test set using three metrics: 1 best, 20 best and Mean Reciprocal Rank (MRR) (Voorhees, 1999) . In addition, we report the BLEU-4 (Papineni et al., 2002) scores that were computed on the word level. Table 3 shows the evaluation results. We also show the percentage of OOV terms, 11 and the accuracy with and without OOV terms respectively. In general, we can see that Pivot performs better than Direct, because the data of Ja-En and En-Zh is larger than that of Ja-Zh. Direct+Pivot shows better performance than either method.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 34, |
| "text": "(Tsunakawa et al., 2009)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 147, |
| "end": 163, |
| "text": "(Voorhees, 1999)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 200, |
| "end": 223, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 269, |
| "end": 276, |
| "text": "Table 3", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation Criteria", |
| "sec_num": "5.2.3" |
| }, |
| { |
| "text": "Different pruning methods show different performances, where Pr:P-T improves the accuracy, while the other two not. To understand the reason for this, we also investigated the statistics of the pivot tables produced by different methods. Table 4 shows the statistics. We can see that compared to the other two pruning methods, Pr:P-T keeps the number of source phrases, which leads a lower OOV rate. It 11 An OOV term contains at least one OOV word.", |
| "cite_spans": [ |
| { |
| "start": 403, |
| "end": 405, |
| "text": "11", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 238, |
| "end": 245, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results of Automatic Evaluation", |
| "sec_num": "5.2.4" |
| }, |
| { |
| "text": "Size also prunes the number of average translations for each source phrase to a more reasonable number, which allows the decoder to make better decisions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": null |
| }, |
| { |
| "text": "Although the average number of translations for the Pr:Both setting is the smallest, it shows worse performance compared to Pr:P-T method. We suspect the reason for this is that many pivot phrases are pruned by Pr:Both, leading to fewer phrase pairs induced by pivoting. Augmenting with +CC leads to further improvements, and substituting the OOVs using their character level translation gives slightly better performance. The most noteworthy results are obtained when reranking is performed using the bilingual neural language model features. BS+RRCBLEU, which uses character BLEU as a metric, performs almost as well as BS+RRWBLEU which uses word BLEU. There might be a difference in the BLEU scores of these 2 settings but the crucial aspect of dictionary evaluation is the accuracy regarding which there is no notable difference between them. We expected that since reranking using SVM, which focuses on accuracy and not BLEU, would yield better results but it might be the case that the training data obtained from the n-best lists is not very reliable. Finally, substuting the OOVs from the reranked lists further boosts the accuracies and although the increment is slight the OOV rate goes down to 0%. It is important to understand that the 20 best accuracy is 73% in the best case which means that if reranking is proper then it is possible to boost the accuracies by approximately 15%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": null |
| }, |
| { |
| "text": "We manually investigated the terms, whose top 1 translation was evaluated as incorrect according to our automatic evaluation method. Based on our investigation, nearly 75% of them were actually correct translations. They were undervalued because they were not covered by the reference translations in our test set. Taking this observation into consideration, the actual 1 best accuracy is about 90%. Automatic evaluation tends to greatly underestimate the results because of the incompleteness of the test set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results of Manual Evaluation", |
| "sec_num": "5.2.5" |
| }, |
| { |
| "text": "As mentioned before the setting Direct+Pivot (Pr:P-T)+CC was used to translate the Ja terms in the JST (550k) dictionary to Zh and the Zh terms in the IS-TIC (3.4M ) dictionary to Ja so as to construct the Ja-Zh dictionary. The size of the constructed dictionary is 3.6M after discarding the overlapped term pairs in the two translated dictionaries. Since we had no references to automatically evaluate this massive dictionary, we evaluated its accuracy by humans. We asked 4 Ja-Zh bilingual speakers to evaluate 100 term pairs, which were randomly selected the constructed dictionary. Figure 3 shows the web interface used for human evaluation. It allows the evaluators to correct errors and well as leave subjective comments, which can be used to refine our methods. The evaluation results indicate that the 1 best accuracy is about 90%, which is consistent with the manual evaluation results on the test set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 586, |
| "end": 594, |
| "text": "Figure 3", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluating the Large Scale Dictionary", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "In this paper, we presented a dictionary construction method via pivot-based SMT with significance pruning, chinese character knowledge and bilin- gual neural network language model based features reranking. Large-scale Ja-Zh experiments show that our method is quite effective. Manual evaluations showed that 90% of the terms are correctly translated, which indicates a high practical utility value of the dictionary. We plan to make the constructed dictionary available to the public in near future, and hope that crowdsourcing could be further used to improve it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We observed that the weights learned for the neural features and found out that the highest weight was assigned to the feature obtained using the model learned using this dictionary. And since reranking did improve the accuracies on the test set, it is quite evident that this dictionary is of a fairly high quality. In the future we plan to try an iterative process, where we rerank the n-best list of this massive dictionary to get an improved dictionary on which we learn a better neural bilingual language model for reranking.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In this paper, we call the entries in the dictionary terms. A term consists of one or multiple tokens.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/lisa-groundhog/GroundHog PACLIC 29", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We used the K-best batch MIRA in the Moses decoder to learn feature weights.5 https://www.csie.ntu.edu.tw/cjlin/libsvm/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www2.nict.go.jp/outpromotion/techtransfer/EDR/J index.html 7 http://www.jst.go.jp 8 http://www.istic.ac.cn", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Different terms are annotated with different number of reference translations in these two dictionaries.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.speech.sri.com/projects/srilm PACLIC 29", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2014. Neural machine translation by jointly learning to align and translate. CoRR, abs/1409.0473.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Learning phrase representations using RNN encoder-decoder for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merrienboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Fethi", |
| "middle": [], |
| "last": "Aglar G\u00fcl\u00e7ehre", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Bougares", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyunghyun Cho, Bart van Merrienboer, \u00c7 aglar G\u00fcl\u00e7ehre, Fethi Bougares, Holger Schwenk, and Yoshua Ben- gio. 2014. Learning phrase representations using RNN encoder-decoder for statistical machine transla- tion. CoRR, abs/1406.1078.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Chinese-japanese machine translation exploiting chinese characters", |
| "authors": [ |
| { |
| "first": "Chenhui", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "" |
| }, |
| { |
| "first": "Toshiaki", |
| "middle": [], |
| "last": "Nakazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Daisuke", |
| "middle": [], |
| "last": "Kawahara", |
| "suffix": "" |
| }, |
| { |
| "first": "Sadao", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "ACM Transactions on Asian Language Information Processing", |
| "volume": "12", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chenhui Chu, Toshiaki Nakazawa, Daisuke Kawahara, and Sadao Kurohashi. 2013. Chinese-japanese ma- chine translation exploiting chinese characters. ACM Transactions on Asian Language Information Process- ing (TALIP), 12(4):16:1-16:25.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Large-scale japanese-chinese scientific dictionary construction via pivot-based statistical machine translation", |
| "authors": [ |
| { |
| "first": "Chenhui", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "" |
| }, |
| { |
| "first": "Raj", |
| "middle": [], |
| "last": "Dabre", |
| "suffix": "" |
| }, |
| { |
| "first": "Toshiaki", |
| "middle": [], |
| "last": "Nakazawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Sadao", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 21st Annual Meeting of the Association for Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "99--102", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chenhui Chu, Raj Dabre, Toshiaki Nakazawa, and Sadao Kurohashi. 2015. Large-scale japanese-chinese scien- tific dictionary construction via pivot-based statistical machine translation. In Proceedings of the 21st An- nual Meeting of the Association for Natural Language Processing (NLP 2015), pages 99-102, Kyoto, Japan, Match.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Improving translation quality by discarding most of the phrasetable", |
| "authors": [ |
| { |
| "first": "Howard", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Martin", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Kuhn", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "967--975", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Howard Johnson, Joel Martin, George Foster, and Roland Kuhn. 2007. Improving translation quality by dis- carding most of the phrasetable. In Proceedings of the 2007 Joint Conference on Empirical Methods in Nat- ural Language Processing and Computational Natu- ral Language Learning (EMNLP-CoNLL), pages 967- 975, Prague, Czech Republic, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Moses: Open source toolkit for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcello", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicola", |
| "middle": [], |
| "last": "Bertoldi", |
| "suffix": "" |
| }, |
| { |
| "first": "Brooke", |
| "middle": [], |
| "last": "Cowan", |
| "suffix": "" |
| }, |
| { |
| "first": "Wade", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christine", |
| "middle": [], |
| "last": "Moran", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Zens", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "177--180", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ondrej Bojar, Alexandra Con- stantin, and Evan Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In Proceed- ings of ACL, pages 177-180.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Improvements of Japanese morphological analyzer JUMAN", |
| "authors": [ |
| { |
| "first": "Sadao", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "" |
| }, |
| { |
| "first": "Toshihisa", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuji", |
| "middle": [], |
| "last": "Matsumoto", |
| "suffix": "" |
| }, |
| { |
| "first": "Makoto", |
| "middle": [], |
| "last": "Nagao", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Proceedings of the International Workshop on Sharable Natural Language", |
| "volume": "", |
| "issue": "", |
| "pages": "22--28", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sadao Kurohashi, Toshihisa Nakamura, Yuji Matsumoto, and Makoto Nagao. 1994. Improvements of Japanese morphological analyzer JUMAN. In Proceedings of the International Workshop on Sharable Natural Lan- guage, pages 22-28.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Improved statistical machine translation for resource-poor languages using related resource-rich languages", |
| "authors": [ |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "3", |
| "issue": "", |
| "pages": "1358--1367", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Preslav Nakov and Hwee Tou Ng. 2009. Improved statis- tical machine translation for resource-poor languages using related resource-rich languages. In Proceed- ings of the 2009 Conference on Empirical Methods in Natural Language Processing: Volume 3 -Volume 3, EMNLP '09, pages 1358-1367, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A smorgasbord of features for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Franz Josef", |
| "middle": [], |
| "last": "Och", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Gildea", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjeev", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| }, |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Sarkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenji", |
| "middle": [], |
| "last": "Yamada", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Fraser", |
| "suffix": "" |
| }, |
| { |
| "first": "Shankar", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Libin", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Eng", |
| "suffix": "" |
| }, |
| { |
| "first": "Viren", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [], |
| "last": "Radev", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "HLT-NAACL 2004: Main Proceedings", |
| "volume": "", |
| "issue": "", |
| "pages": "161--168", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Franz Josef Och, Daniel Gildea, Sanjeev Khudanpur, Anoop Sarkar, Kenji Yamada, Alex Fraser, Shankar Kumar, Libin Shen, David Smith, Katherine Eng, Viren Jain, Zhen Jin, and Dragomir Radev. 2004. A smorgasbord of features for statistical machine trans- lation. In Daniel Marcu Susan Dumais and Salim Roukos, editors, HLT-NAACL 2004: Main Proceed- ings, pages 161-168, Boston, Massachusetts, USA, May 2 -May 7. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic evalu- ation of machine translation. In Proceedings of ACL, pages 311-318.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Perplexity minimization for translation model domain adaptation in statistical machine translation", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 13th Conference of the European Chapter of the Association for Computational Linguistics, EACL '12", |
| "volume": "", |
| "issue": "", |
| "pages": "539--549", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich. 2012. Perplexity minimization for trans- lation model domain adaptation in statistical machine translation. In Proceedings of the 13th Conference of the European Chapter of the Association for Com- putational Linguistics, EACL '12, pages 539-549, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Chinese morphological analysis with character-level pos tagging", |
| "authors": [ |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongxiao", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Daisuke", |
| "middle": [], |
| "last": "Kawahara", |
| "suffix": "" |
| }, |
| { |
| "first": "Sadao", |
| "middle": [], |
| "last": "Kurohashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "253--258", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mo Shen, Hongxiao Liu, Daisuke Kawahara, and Sadao Kurohashi. 2014. Chinese morphological analysis with character-level pos tagging. In Proceedings of ACL, pages 253-258.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural networks. CoRR, abs/1409.3215.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A chinese-japanese lexical machine translation through a pivot language", |
| "authors": [ |
| { |
| "first": "Takashi", |
| "middle": [], |
| "last": "Tsunakawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Naoaki", |
| "middle": [], |
| "last": "Okazaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun'ichi", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "ACM Transactions on Asian Language Information Processing (TALIP)", |
| "volume": "8", |
| "issue": "2", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Takashi Tsunakawa, Naoaki Okazaki, Xiao Liu, and Jun'ichi Tsujii. 2009. A chinese-japanese lexical machine translation through a pivot language. ACM Transactions on Asian Language Information Process- ing (TALIP), 8(2):9:1-9:21, May.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "A comparison of pivot methods for phrase-based statistical machine translation", |
| "authors": [ |
| { |
| "first": "Masao", |
| "middle": [], |
| "last": "Utiyama", |
| "suffix": "" |
| }, |
| { |
| "first": "Hitoshi", |
| "middle": [], |
| "last": "Isahara", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the conference on Human Language Technology Conference of the North American Chapter of the Association of Computational Linguistics (NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "484--491", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Masao Utiyama and Hitoshi Isahara. 2007. A compar- ison of pivot methods for phrase-based statistical ma- chine translation. In in Proceedings of the conference on Human Language Technology Conference of the North American Chapter of the Association of Com- putational Linguistics (NAACL-HLT, pages 484-491.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "The TREC-8 question answering track report", |
| "authors": [ |
| { |
| "first": "Ellen", |
| "middle": [ |
| "M" |
| ], |
| "last": "Voorhees", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of the Eighth TExt Retrieval Conference (TREC-8)", |
| "volume": "", |
| "issue": "", |
| "pages": "77--82", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellen M. Voorhees. 1999. The TREC-8 question answer- ing track report. In Proceedings of the Eighth TExt Retrieval Conference (TREC-8), pages 77-82.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Pivot language approach for phrase-based statistical machine translation", |
| "authors": [ |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Machine Translation", |
| "volume": "21", |
| "issue": "3", |
| "pages": "165--181", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hua Wu and Haifeng Wang. 2007. Pivot language approach for phrase-based statistical machine transla- tion. Machine Translation, 21(3):165-181, Septem- ber.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Revisiting pivot language approach for machine translation", |
| "authors": [ |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "1", |
| "issue": "", |
| "pages": "154--162", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hua Wu and Haifeng Wang. 2009. Revisiting pivot language approach for machine translation. In Pro- ceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP: Volume 1 -Volume 1, ACL '09, pages 154- 162, Stroudsburg, PA, USA. Association for Compu- tational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "text": "Human evaluation web interface.", |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "html": null, |
| "content": "<table><tr><td colspan=\"2\">Language Name</td><td>Domain</td><td>Size</td></tr><tr><td/><td>wiki title</td><td>general</td><td>361,016</td></tr><tr><td>Ja-En</td><td>med dic EDR</td><td>medicine general</td><td>54,740 491,008</td></tr><tr><td/><td>JST dic</td><td>science</td><td>550,769</td></tr><tr><td/><td>wiki title</td><td>general</td><td>151,338</td></tr><tr><td/><td>med dic</td><td>medicine</td><td>48,250</td></tr><tr><td>En-Zh</td><td>EDR</td><td>general</td><td>909,197</td></tr><tr><td/><td colspan=\"2\">ISTIC dic science</td><td>3,390,792</td></tr><tr><td/><td>wiki title</td><td>general</td><td>175,785</td></tr><tr><td>Ja-Zh</td><td>med dic</td><td>medicine</td><td>54,740</td></tr><tr><td/><td>EDR</td><td>general</td><td>330,796</td></tr></table>", |
| "text": "", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "html": null, |
| "content": "<table><tr><td colspan=\"2\">Language Name</td><td>Size</td></tr><tr><td/><td>LCAS</td><td>3,588,800</td></tr><tr><td>Ja-En</td><td colspan=\"2\">abst title abst JICST 19,905,978 22,610,643</td></tr><tr><td/><td>ASPEC</td><td>3,013,886</td></tr><tr><td/><td>LCAS</td><td>6,090,535</td></tr><tr><td>En-Zh</td><td>LCAS title</td><td>1,070,719</td></tr><tr><td/><td>ISTIC pc</td><td>1,562,119</td></tr><tr><td>Ja-Zh</td><td>ASPEC</td><td>680,193</td></tr></table>", |
| "text": "Statistics of the bilingual dictionaries used for training.", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "content": "<table/>", |
| "text": "Statistics of the parallel corpora used for training (All the corpora belong to the general scientific domain, except for ISTIC pc that is a computer domain corpus).", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "html": null, |
| "content": "<table/>", |
| "text": "Statistics of the pivot phrase tables (for tuning and test sets combined).", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF6": { |
| "html": null, |
| "content": "<table/>", |
| "text": "Evaluation results.", |
| "num": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |