| { |
| "paper_id": "D09-1038", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:39:41.971198Z" |
| }, |
| "title": "Better Synchronous Binarization for Machine Translation", |
| "authors": [ |
| { |
| "first": "Tong", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Lab Northeastern University Shenyang", |
| "location": { |
| "postCode": "110004", |
| "country": "China" |
| } |
| }, |
| "email": "xiaotong@mail.neu.edu.cn" |
| }, |
| { |
| "first": "Mu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "muli@microsoft.com" |
| }, |
| { |
| "first": "Dongdong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "dozhang@microsoft.com" |
| }, |
| { |
| "first": "Jingbo", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Lab Northeastern University Shenyang", |
| "location": { |
| "postCode": "110004", |
| "country": "China" |
| } |
| }, |
| "email": "zhujingbo@mail.neu.edu.cn" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "mingzhou@microsoft.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Binarization of Synchronous Context Free Grammars (SCFG) is essential for achieving polynomial time complexity of decoding for SCFG parsing based machine translation systems. In this paper, we first investigate the excess edge competition issue caused by a leftheavy binary SCFG derived with the method of Zhang et al. (2006). Then we propose a new binarization method to mitigate the problem by exploring other alternative equivalent binary SCFGs. We present an algorithm that iteratively improves the resulting binary SCFG, and empirically show that our method can improve a string-to-tree statistical machine translations system based on the synchronous binarization method in Zhang et al. (2006) on the NIST machine translation evaluation tasks.", |
| "pdf_parse": { |
| "paper_id": "D09-1038", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Binarization of Synchronous Context Free Grammars (SCFG) is essential for achieving polynomial time complexity of decoding for SCFG parsing based machine translation systems. In this paper, we first investigate the excess edge competition issue caused by a leftheavy binary SCFG derived with the method of Zhang et al. (2006). Then we propose a new binarization method to mitigate the problem by exploring other alternative equivalent binary SCFGs. We present an algorithm that iteratively improves the resulting binary SCFG, and empirically show that our method can improve a string-to-tree statistical machine translations system based on the synchronous binarization method in Zhang et al. (2006) on the NIST machine translation evaluation tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Recently Statistical Machine Translation (SMT) systems based on Synchronous Context Free Grammar (SCFG) have been extensively investigated (Chiang, 2005; Galley et al., 2004; Galley et al., 2006) and have achieved state-of-the-art performance. In these systems, machine translation decoding is cast as a synchronous parsing task. Because general SCFG parsing is an NPhard problem (Satta and Peserico, 2005) , practical SMT decoders based on SCFG parsing requires an equivalent binary SCFG that is directly learned from training data to achieve polynomial time complexity using the CKY algorithm (Kasami, 1965; Younger, 1967) borrowed from CFG parsing techniques. Zhang et al. (2006) proposed synchronous binarization, a principled method to binarize an SCFG in such a way that both the source-side and target-side virtual non-terminals have contiguous spans. This property of synchronous binarization guarantees the polynomial time complexity of SCFG parsers even when an n-gram language model is integrated, which has been proved to be one of the keys to the success of a string-to-tree syntax-based SMT system. However, as shown by Chiang (2007) , SCFGbased decoding with an integrated n-gram language model still has a time complexity of ( 3 4( \u22121) ), where m is the source sentence length, and is the vocabulary size of the language model. Although it is not exponential in theory, the actual complexity can still be very high in practice. Here is an example extracted from real data. Given the following SCFG rule: VP \u2192 VB NP \u4f1a JJR , VB NP will be JJR we can obtain a set of equivalent binary rules using the synchronous binarization method (Zhang et al., 2006) as follows:", |
| "cite_spans": [ |
| { |
| "start": 139, |
| "end": 153, |
| "text": "(Chiang, 2005;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 154, |
| "end": 174, |
| "text": "Galley et al., 2004;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 175, |
| "end": 195, |
| "text": "Galley et al., 2006)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 380, |
| "end": 406, |
| "text": "(Satta and Peserico, 2005)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 595, |
| "end": 609, |
| "text": "(Kasami, 1965;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 610, |
| "end": 624, |
| "text": "Younger, 1967)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 663, |
| "end": 682, |
| "text": "Zhang et al. (2006)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1134, |
| "end": 1147, |
| "text": "Chiang (2007)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1646, |
| "end": 1666, |
| "text": "(Zhang et al., 2006)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "VP \u2192 V 1 JJR , V 1 JJR V 1 \u2192 VB V 2 , VB V 2 V 2 \u2192 NP \u4f1a , NP will be", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This binarization is shown with the solid lines as binarization (a) in Figure 1 . We can see that binarization (a) requires that \"NP \u4f1a\" should be reduced at first. Data analysis shows that \"NP \u4f1a\" is a frequent pattern in the training corpus, and there are 874 binary rules of which the source language sides are \"NP \u4f1a\". Consequently these binary rules generate a large number of competing edges in the chart when \"NP \u4f1a\" is matched in decoding. To reduce the number of edges pro-posed in decoding, hypothesis re-combination is used to combine the equivalent edges in terms of dynamic programming. Generally, two edges can be re-combined if they satisfy the following two constraints: 1) the LHS (left-hand side) nonterminals are identical and the sub-alignments are the same (Zhang et al., 2006) ; and 2) the boundary words 1 on both sides of the partial translations are equal between the two edges (Chiang, 2007) . However, as shown in Figure 2 , the decoder still generates 801 edges after the hypothesis re-combination. As a result, aggressive pruning with beam search has to be employed to reduce the search space to make the decoding practical. Usually in beam search only a very small number of edges are kept in the beam of each chart cell (e.g. less than 100). These edges have to compete with each other to survive from the pruning. Obviously, more competing edges proposed during decoding can lead to a higher risk of making search errors. The edge competition problem for SMT decoding is not addressed in previous work (Zhang et al., 2006; Huang, 2007) in which each SCFG rule is binarized in a fixed way. Actually the results of synchronous binarization may not be the only solution. As illustrated in Figure 1 , the rule 1 For the case of n-gram language model integration, 2 \u00d7 ( \u2212 1) boundary words needs to be examined. can also be binarized as binarization (b) which is shown with the dashed lines.", |
| "cite_spans": [ |
| { |
| "start": 774, |
| "end": 794, |
| "text": "(Zhang et al., 2006)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 899, |
| "end": 913, |
| "text": "(Chiang, 2007)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1530, |
| "end": 1550, |
| "text": "(Zhang et al., 2006;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1551, |
| "end": 1563, |
| "text": "Huang, 2007)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 71, |
| "end": 79, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 937, |
| "end": 945, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 1714, |
| "end": 1722, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "VB NP \u4f1a JJR (a) (b) V 2 V 1 V 2 ' V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We think that this problem can be alleviated by choosing better binarizations for SMT decoders, since there is generally more than one binarization for a SCFG rule. In our investigation, about 96% rules that need to be binarized have more than one binarization under the contiguous constraint. As shown in binarization (b) ( Figure 1 ), \"\u4f1a JJR\" is reduced first. In the decoder, the number of binary rules with the source-side \"\u4f1a JJR\" is 62, and the corresponding number of edges is 57 ( Figure 2 ). The two numbers are both much smaller than those of \"NP \u4f1a\" in (a). This is an informative clue that the binarization (b) could be better than the binarization (a) based on the following: the probability of pruning the rule in (a) is higher than that in (b) as the rule in (b) has fewer competitors and has more chances to survive during pruning.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 325, |
| "end": 334, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 489, |
| "end": 497, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper we propose a novel binarization method, aiming to find better binarizations to improve an SCFG-based machine translation system. We formulate the binarization optimization as a cost reduction process, where the cost is defined as the number of rules sharing a common source-side derivation in an SCFG. We present an algorithm, iterative cost reduction algorithm, to obtain better binarization for the SCFG learnt automatically from the training corpus. It can work with an efficient CKY-style binarizer to search for the lowest-cost binarization. We apply our method into a state-of-the-art string-to-tree SMT system. The experimental results show that our method outperforms the synchronous binarization method (Zhang et al., 2006) with over 0.8 BLEU scores on both NIST 2005 and NIST 2008 Chinese-to-English evaluation data sets.", |
| "cite_spans": [ |
| { |
| "start": 726, |
| "end": 746, |
| "text": "(Zhang et al., 2006)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The problem of binarization originates from the parsing problem in which several binarization methods are studied such as left/right binarization (Charniak et al., 1998; Tsuruoka and Tsujii, 2004) and head binarization (Charniak et al., 2006) . Generally, the pruning issue in SMT decoding is unnecessary for the parsing problem, and the accuracy of parsing does not rely on the binarization method heavily. Thus, many efforts on the binarization in parsing are made for the efficiency improvement instead of the accuracy improvement (Song et al., 2008) .", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 169, |
| "text": "(Charniak et al., 1998;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 170, |
| "end": 196, |
| "text": "Tsuruoka and Tsujii, 2004)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 219, |
| "end": 242, |
| "text": "(Charniak et al., 2006)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 534, |
| "end": 553, |
| "text": "(Song et al., 2008)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Binarization is also an important topic in the research of syntax-based SMT. A synchronous binarization method is proposed in (Zhang et al., 2006) whose basic idea is to build a left-heavy binary synchronous tree (Shapiro and Stephens, 1991) with a left-to-right shift-reduce algorithm. Target-side binarization is another binarization method which is proposed by Huang (2007) . It works in a left-to-right way on the target language side. Although this method is comparatively easy to be implemented, it just achieves the same performance as the synchronous binarization method (Zhang et al., 2006) for syntaxbased SMT systems. In addition, it cannot be easily integrated into the decoding of some syntax-based models (Galley et al., 2004; , because it does not guarantee contiguous spans on the source language side.", |
| "cite_spans": [ |
| { |
| "start": 126, |
| "end": 146, |
| "text": "(Zhang et al., 2006)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 213, |
| "end": 241, |
| "text": "(Shapiro and Stephens, 1991)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 364, |
| "end": 376, |
| "text": "Huang (2007)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 579, |
| "end": 599, |
| "text": "(Zhang et al., 2006)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 719, |
| "end": 740, |
| "text": "(Galley et al., 2004;", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As discussed in Section 1, binarizing an SCFG in a fixed (left-heavy) way (Zhang et al., 2006 ) may lead to a large number of competing edges and consequently high risk of making search errors. Fortunately, in most cases a binarizable SCFG can be binarized in different ways, which provides us with an opportunity to find a better solution than the default left-heavy binarization. An ideal solution to this problem could be that we define an exact edge competition estimation function and choose the best binary SCFG based on it. However, even for the rules with a common source-side, generally it is difficult to estimate the exact number of competing edges in the dynamic SCFG parsing process for machine translation, because in order to integrate an ngram language model, the actual number of edges not only depends on SCFG rules, but also depends on language model states which are specific to input sentences. Instead, we have to employ certain kinds of approximation of it. First we will introduce some notations frequently used in later discussions.", |
| "cite_spans": [ |
| { |
| "start": 74, |
| "end": 93, |
| "text": "(Zhang et al., 2006", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Synchronous Binarization Optimization by Cost Reduction", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We use = { \u2236 \u2192 , } to denote an SCFG, where is the \u210e rule in ; is the LHS (left hand side) non-terminal of ; and are the source-side and target-side RHS (right hand side) derivations of respectively. We use \u212c to denote the set of equivalent binary SCFG of . The goal of SCFG binarization is to find an appropriate binary SCFG \u2032 \u2208 \u212c . For , \u212c = { } \u2286 \u2032 \u2208 \u212c is the set of equivalent binary rules based on , where is the \u210e binary rule in \u212c . Figure 3 illustrates the meanings of these notations with a sample grammar.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 439, |
| "end": 447, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Notations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "VP \u2192 VB NP \u4f1a JJR , VB NP will be JJR S \u2192 NP \u4f1a VP , NP will VP", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Notations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "R 1 : R 2 : G VP \u2192 V 12 JJR , V 12 JJR (R 1 ) G' V 12 \u2192 VB V 13 , VB V 13 V 13 \u2192 NP \u4f1a , NP will be v 11 : v 12 : v 13 : S \u2192 V 22 VP , V 22 VP V 22 \u2192 NP \u4f1a , NP will v 21 : v 22 : (R 2 ) binarization ... v 11 v 12 v 22 S(\"VB NP \u4f1a JJR \", G') S(\"VB NP \u4f1a\", G') S(\"NP \u4f1a\", G') L(v 12 )=\"VB NP \u4f1a\" v 13", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Notations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "rule bucket The function (\u2022) is defined to map a resulting binary rule \u2032 to the sub-sequence in derived from . For example, as shown in Figure 3, the binary rule 13 covers the source subsequence \"NP \u4f1a\" in 1 , so 13 = \"NP \u4f1a\". Similarly, 12 = \"VB NP \u4f1a\". The function (\u2022) is used to group the rules in \u2032 with a common right-hand side derivation for source language. Given a binary rule \u2208 \u2032, we can put it into a bucket in which all the binary rules have the same source sub-sequence ( ). For example (Figure 3 ), as 12 = \"VB NP \u4f1a\", 12 is put into the bucket indexed by \"VB NP \u4f1a\". And 13 and 22 are put into the same bucket, since they have the same source sub-sequence \"NP \u4f1a\". Obviously, \u2032 can be divided into a set of mutual exclusive rule buckets by (\u2022).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 136, |
| "end": 142, |
| "text": "Figure", |
| "ref_id": null |
| }, |
| { |
| "start": 497, |
| "end": 506, |
| "text": "(Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Notations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In this paper, we use ( ( ), \u2032) to denote the bucket for the binary rules having the source subsequence ( ). For example, (\" \u4f1a\", \u2032) denotes the bucket for the binary rules having the source-side \"NP \u4f1a\". For simplicity, we also use ( , \u2032) to denote , \u2032 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Notations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Given a binary SCFG \u2032, it can be easily noticed that if a rule in the bucket ( , \u2032) can be applied to generate one or more new edges in SCFG parsing, any other rules in this bucket can also be applied because all of them can be reduced from the same underlying derivation ( ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Each application of other rules in the bucket ( , \u2032) can generate competing edges with the one based on . Intuitively, the size of bucket can be used to approximately indicate the actual number of competing edges on average, and reducing the size of bucket could help reduce the edges generated in a parsing chart by applying the rules in the bucket. Therefore, if we can find a method to greedily reduce the size of each bucket ( , \u2032), we can reduce the overall expected edge competitions when parsing with \u2032.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "However, it can be easily proved that the numbers of binary rules in any \u2032 \u2208 \u212c are same, which implies that we cannot reduce the sizes of all buckets at the same timeremoving a rule from one bucket means adding it to another. Allowing for this fact, the excess edge competition example shown in Section 1 is essentially caused by the uneven distribution of rules among different buckets \u2022 . Accordingly, our optimization objective should be a more even distribution of rules among buckets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the following, we formally define a metric to model the evenness of rule distribution over buckets. Given a binary SCFG \u2032 and a binary SCFG rule \u2208 \u2032, ( ) is defined as the cost function that maps to the size of the bucket , \u2032 :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "= , \u2032", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Obviously, all the binary rules in , \u2032 share a common cost value , \u2032 . For example (Figure 3) , both 13 and 22 are put into the same bucket \" \u4f1a\", \u2032 , so 13 = 22 = 2. The cost of the SCFG \u2032 is computed by summing up all the costs of SCFG rules in it:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 83, |
| "end": 93, |
| "text": "(Figure 3)", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2032 = ( ) \u2208 \u2032", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Back to our task, we are to find an equivalent binary SCFG \u2032 of with the lowest cost in terms of the cost function (. ) given in Equation 2:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "* = argmin \u2032 \u2208\u212c ( \u2032)", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Next we will show how * is related to the evenness of rule distribution among different buckets. Let \u2032 = { 1 , \u2026 , } be the set of rule buckets containing rules in \u2032, then the value of ( \u2032) can also be written as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2032 = 2 1\u2264 \u2264 (4) Assume =", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "is an empirical distribution of a discrete random variable , then the square deviation of the empirical distribution is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "2 = 1 ( \u2212 ) 2", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Noticing that \u03a3 = \u2032 and = \u2032 / , Equation (5) can be written as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "2 = 1 \u2032 \u2212 \u2032 2", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Since both and | \u2032| are constants, minimizing the cost function ( \u2032) is equivalent to minimizing the square deviation of the distribution of rules among different buckets. A binary SCFG with the lower cost indicates the rules are more evenly distributed in terms of derivation patterns on the source language side.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cost Reduction for SCFG Binarization", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Before moving on discussing the algorithm which can optimize Equation 3based on rule costs specified in Equation 1, we first present an algorithm to find the optimal solution to Equation (3) if we have known the cost setting of * and can use the costs as static values during binarization. Using this simplification, the problem of finding the binary SCFG * with minimal costs can be reduced to find the optimal binarization \u212c * ( ) for each rule in .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Static Cost Reduction", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "To obtain \u212c * ( ) , we can employ a CKYstyle binarization algorithm which builds a compact binarization forest for the rule in bottomup direction. The algorithm combines two adjacent spans of each time, in which two spans can be combined if and only if they observe the BTG constraints \u2212 their translations are either sequentially or reversely adjacent in , the target-side derivation of . The key idea of this algorithm is that we only use the binarization tree with the lowest cost of each span for later combination, which can avoid enumerating all the possible binarization trees of using dynamic programming.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Static Cost Reduction", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Let be the sub-sequence spanning from p to q on the source-side, [ , ] be optimal binarization tree spanning , [ , ] be the cost of [ , ] , and [ , ] be the cost of any binary rules whose source-side is , then the cost of optimal binarization tree spanning can be computed as:", |
| "cite_spans": [ |
| { |
| "start": 132, |
| "end": 137, |
| "text": "[ , ]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Static Cost Reduction", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "[ , ] = min \u2264 \u2264 \u22121 ( [ , ] + [ , ] + [ + 1, ])", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Static Cost Reduction", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The algorithm is shown as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Static Cost Reduction", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Input: a SCFG rule and the cost function (. ). Output: the lowest cost binarization on 1: Function CKYBINARIZATION( , ) 2:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CYK-based binarization algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "for l = 2 to n do \u22b3 Length of span 3: for p = 1 to nl + 1 do \u22b3 Start of span 4:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CYK-based binarization algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "q = p + l \u22b3 End of span 5: for k = p to q -1 do \u22b3 Partition of span 6: if not CONSECUTIVE( , , + 1, ) then next loop 7: [ , ] \u2190 ( ) 8: curCost \u2190 , + , + [ + 1, ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CYK-based binarization algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "9: if curCost < minCost then 10:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CYK-based binarization algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "minCost \u2190 curCost 11:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CYK-based binarization algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "[ , ] \u2190 COMBINE( [ , ], [ + 1, ])", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CYK-based binarization algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "12:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CYK-based binarization algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": ", \u2190 minCost 13:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CYK-based binarization algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "return [1, ] 14: Function CONSECUTIVE(( a, b), (c, d)) 15: return (b = c -1) or (d = a -1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CYK-based binarization algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "where n is the number of tokens (consecutive terminals are viewed as a single token) on the source-side of . COMBINE( [ , ], [ + 1, ]) combines the two binary sub-trees into a larger sub-tree over . , = ( , ) means that the non-terminals covering have the consecutive indices ranging from a to b on the target-side. If the target non-terminal indices are not consecutive, we set , = (\u22121, \u22121). = ( \u2032) where \u2032 is any rule in the bucket , \u2032 . In the algorithm, lines 9-11 implement dynamic programming, and the function CONSECUTIVE checks whether the two spans can be combined. Figure 4 shows an example of the compact forest the algorithm builds, where the solid lines indicate the optimal binarization of the rule, while other alternatives pruned by dynamic programming are shown in dashed lines. The costs for binarization trees are computed based on the cost table given in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 575, |
| "end": 583, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 875, |
| "end": 882, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "CYK-based binarization algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "VB NP \u4f1a V[1,2] V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CYK-based binarization algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "The time complexity of the CKY-based binarization algorithm is \u0398(n 3 ), which is higher than that of the linear binarization such as the synchronous binarization (Zhang et al., 2006) . But it is still efficient enough in practice, as there are generally only a few tokens (n < 5) on the source-sides of SCFG rules. In our experiments, the linear binarization method is just 2 times faster than the CKY-based binarization.", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 182, |
| "text": "(Zhang et al., 2006)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "CYK-based binarization algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "However, (\u2022) cannot be easily predetermined in a static way as is assumed in Section 3.3 because it depends on \u2032 and should be updated whenever a rule in is binarized differently. In our work this problem is solved using the iterative cost reduction algorithm, in which the update of \u2032 and the cost function (\u2022) are coupled together.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative Cost Reduction", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Input: An SCFG Output: An equivalent binary SCFG \u2032 of 1: Function ITERATIVECOSTREDUCTION( ) 2: \u2032 \u2190 0 3: for each \u2208 0 do 4:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "( ) = , 0 5: while ( \u2032) does not converge do 6:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "for each \u2208 do 7:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "[\u2212 ] \u2190 \u2032 \u2212 \u212c( ) 8:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "for each \u2208 \u212c( ) do 9:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "for each \u2032 \u2208 , \u2032 do 10:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2032 \u2190 \u2032 \u2212 1 11: \u212c( ) \u2190 CKYBINARIZATION( , ) 12: \u2032 \u2190 [\u2212 ] \u222a \u212c( ) 13:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "for each \u2208 \u212c( ) do 14:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "for each \u2032 \u2208 , \u2032 do 15:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2032 \u2190 \u2032 + 1 16: return \u2032 In the iterative cost reduction algorithm, we first obtain an initial binary SCFG 0 using the synchronous binarization method proposed in (Zhang et al., 2006) . Then 0 is assigned to an iterative variable \u2032. The cost of each binary rule in 0 is computed based on 0 according to Equation (1) (lines 3-4 in the algorithm).", |
| "cite_spans": [ |
| { |
| "start": 161, |
| "end": 181, |
| "text": "(Zhang et al., 2006)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "After initialization, \u2032 is updated by iteratively finding better binarization for each rule in . The basic idea is: for each in , we remove the current binarization result for from \u2032 (line 7), while the cost function (\u2022) is updated accordingly since the removal of binary rule \u2208 \u212c( ) results in the reduction of the size of the corresponding bucket , \u2032 . Lines 8-10 im-plement the cost reduction of each binary rule in the bucket , \u2032 . Next, we find the lowest cost binarization for based on the updated cost function (\u2022) with the CKY-based binarization algorithm presented in Section 3.3 (line 11).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "At last, the new binarization for is added back to \u2032 and (\u2022) is re-updated to synchronize with this change (lines 12-15). Figure 5 illustrates the differences between the static cost reduction and the iterative cost reduction. The algorithm stops when ( \u2032) does not decrease any more. Next we will show that ( \u2032) is guaranteed not to increase in the iterative process.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 122, |
| "end": 130, |
| "text": "Figure 5", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "For any \u212c( ) on , we have does not increase in the processing on each (lines 7-15), and ( \u2032) will finally converge to a local minimum when the algorithm stops.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Iterative cost reduction algorithm", |
| "sec_num": null |
| }, |
| { |
| "text": "The experiments are conducted on Chinese-to-English translation in a state-of-the-art string-to-tree SMT system. All the results are reported in terms of case-insensitive BLEU4(%).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Our bilingual training corpus consists of about 350K bilingual sentences (9M Chinese words + 10M English words) 2 . Giza++ is employed to perform word alignment on the bilingual sentences. The parse trees on the English side are generated using the Berkeley Parser 3 . A 5-gram language model is trained on the English part of LDC bilingual training data and the Xinhua part of Gigaword corpus. Our development data set comes from NIST2003 evaluation data in which the sentences of more than 20 words are excluded to speed up the Minimum Error Rate Training (MERT). The test data sets are the NIST evaluation sets of 2005 and 2008.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Our string-to-tree SMT system is built based on the work of (Galley et al., 2006; , where both the minimal GHKM and SPMT rules are extracted from the training corpus, and the composed rules are generated by combining two or three minimal GHKM and SPMT rules. Before the rule extraction, we also binarize the parse trees on the English side using Wang et al. (2007) \"s method to increase the coverage of GHKM and SPMT rules. There are totally 4.26M rules after the low frequency rules are filtered out. The pruning strategy is similar to the cube pruning described in (Chiang, 2007) . To achieve acceptable translation speed, the beam size is set to 50 by default. The baseline system is based on the synchronous binarization (Zhang et al., 2006) .", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 81, |
| "text": "(Galley et al., 2006;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 346, |
| "end": 364, |
| "text": "Wang et al. (2007)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 567, |
| "end": 581, |
| "text": "(Chiang, 2007)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 725, |
| "end": 745, |
| "text": "(Zhang et al., 2006)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Besides the baseline (Zhang et al., 2006) and iterative cost reduction binarization methods, we also perform right-heavy and random synchronous binarizations for comparison. In this paper, the random synchronous binarization is obtained by: 1) performing the CKY binarization to build the binarization forest for an SCFG rule; then 2) performing a top-down traversal of the forest. In the traversal, we randomly pick a feasible binarization for each span, and then go on the traversal in the two branches of the picked binarization. Table 2 shows the costs of resulting binary SCFGs generated using different binarization methods. The costs of the baseline (left-heavy) and right-heavy binarization are similar, while the cost of the random synchronous binarization is lower than that of the baseline method 4 . As expected, the iterative cost reduction method obtains the lowest cost, which is much lower than that of the other three methods. Table 2 : Costs of the binary SCFGs generated using different binarization methods. Table 3 shows the performance of SMT systems based on different binarization methods. The iterative cost reduction binarization method achieves the best performance on the test sets as well as the development set. Compared with the baseline method, it obtains gains of 0.82 and 0.84 BLEU scores on NIST05 and NIST08 test sets respectively. Using the statistical significance test described by Koehn (2004) , the improvements are significant (p < 0.05). The baseline method and the right-heavy binarization method achieve similar performance, while the random synchronous binarization method performs slightly better than the baseline method, which agrees with the fact of the cost reduction shown in Table 2 . A possible reason that the random synchronous binarization method can outperform the baseline method lies in that compared with binarizing SCFG in a fixed way, the random synchronous binarization tends to give a more even distribution of rules among buckets, which alleviates the problem of edge competition. However, since the high-frequency source sub-sequences still have high probabilities to be generated in the binarization and lead to the excess competing edges, it just achieves a very small improvement.", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 41, |
| "text": "(Zhang et al., 2006)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1421, |
| "end": 1433, |
| "text": "Koehn (2004)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 533, |
| "end": 540, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 944, |
| "end": 951, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1028, |
| "end": 1035, |
| "text": "Table 3", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 1728, |
| "end": 1735, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Binarization Schemes", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We also study the impacts of cost reduction on translation accuracy over iterations in iterative cost reduction. Figure 6 and Figure 7 show the results on NIST05 and NIST08 test sets. We can see that the cost of the resulting binary SCFG drops greatly as the iteration count increases, especially in the first iteration, and the BLEU scores increase as the cost decreases. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 113, |
| "end": 121, |
| "text": "Figure 6", |
| "ref_id": null |
| }, |
| { |
| "start": 126, |
| "end": 134, |
| "text": "Figure 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Translation Accuracy vs. Cost of Binary SCFG", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "In this section, we study the impacts of beam sizes on translation accuracy as well as competing edges. To explicitly investigate the issue under large beam sizes, we use a subset of NIST05 and NIST08 test sets for test, which has 50 Chinese sentences of no longer than 10 words. Figure 8 shows that the iterative cost reduction method is consistently better than the baseline method under various beam settings. Besides the experiment on the test set of short sentences, we also conduct the experiment on NIST05 test set. To achieve acceptable decoding speed, we range the beam size from 10 to 70. As shown in Figure 9 , the iterative cost reduction method also outperforms the baseline method under various beam settings on the large test set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 280, |
| "end": 288, |
| "text": "Figure 8", |
| "ref_id": null |
| }, |
| { |
| "start": 611, |
| "end": 620, |
| "text": "Figure 9", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Impact of Beam Size", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Though enlarging beam size can reduce the search errors and improve the system performance, the decoding speed of string-to-tree SMT drops dramatically when we enlarge the beam size. The problem is more serious when long Figure 10 compares the baseline method and the iterative cost reduction method in terms of translation accuracy against the number of edges proposed during decoding. Actually, the number of edges proposed during decoding can be regarded as a measure of the size of search space. We can see that the iterative cost reduction method outperforms the baseline method under various search effort. The experimental results of this section show that compared with the baseline method, the iterative cost reduction method can lead to much fewer edges (about 25% reduction) as well as the higher BLEU scores under various beam settings.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 221, |
| "end": 230, |
| "text": "Figure 10", |
| "ref_id": "FIGREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Impact of Beam Size", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "In this section, we study the impacts of cost reduction on the edge competition in the chart cells of our CKY-based decoder. Two metrics are used to evaluate the degree of edge competition. They are the variance and the mean of the number of competing edges in the chart cells, where high variance means that in some chart cells the rules have high risk to be pruned due to the large number of competing edges. The same situation holds for the mean as well. Both of the two metrics are calculated on NIST05 test set, varying with the span length of chart cell. Figure 11 shows the cost of resulting binary SCFG and the variance of competing edges against iteration count in iterative cost reduction. We can see that both the cost and the variance reduce greatly as the iteration count increases. Figure 12 shows the case for mean, where the reduction of cost also leads to the reduction of the mean value. The results shown in Figure 11 and Figure 12 indicate that the cost reduction is helpful to reduce edge competition in the chart cells. We also perform decoding without pruning (i.e. beam size = \u221e) on a very small set which has 20 sentences of no longer than 7 words. In this experiment, the baseline system and our iterative cost reduction based system propose 14,454M and 10,846M competing edges respectively. These numbers can be seen as the real numbers of the edges proposed during decoding instead of an approximate number observed in the pruned search space. It suggests that our method can reduce the number of the edges in real search space effectively. A possible reason to 1.0E+6", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 561, |
| "end": 570, |
| "text": "Figure 11", |
| "ref_id": null |
| }, |
| { |
| "start": 796, |
| "end": 805, |
| "text": "Figure 12", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 927, |
| "end": 936, |
| "text": "Figure 11", |
| "ref_id": null |
| }, |
| { |
| "start": 941, |
| "end": 950, |
| "text": "Figure 12", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Edge Competition vs. Cost of Binary SCFG", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "1.0E+7", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Edge Competition vs. Cost of Binary SCFG", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "1.0E+8", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Edge Competition vs. Cost of Binary SCFG", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "1.0E+9", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Edge Competition vs. Cost of Binary SCFG", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "1.0E+10", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Edge Competition vs. Cost of Binary SCFG", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "1.0E+7", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Edge Competition vs. Cost of Binary SCFG", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "1.0E+8", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Edge Competition vs. Cost of Binary SCFG", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "1.0E+9 this result is that the cost reduction based binarization could reduce the probability of rule mismatching caused by binarization, which results in the reduction of the number of edges proposed during decoding.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Edge Competition vs. Cost of Binary SCFG", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "This paper introduces a new binarization method, aiming at choosing better binarization for SCFGbased SMT systems. We demonstrate the effectiveness of our method on a state-of-the-art string-to-tree SMT system. Experimental results show that our method can significantly outperform the conventional synchronous binarization method, which indicates that better binarization selection is very beneficial to SCFG-based SMT systems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this paper the cost of a binary rule is defined based on the competition among the binary rules that have the same source-sides. However, some binary rules with different source-sides may also have competitions in a chart cell. We think that the cost of a binary rule can be better estimated by taking the rules with different source-sides into account. We intend to study this issue in our future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "LDC2003E14, LDC2003E07, LDC2005T06 and LDC2005T103 http://code.google.com/p/berkeleyparser/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We perform random synchronous binarization for 5 times and report the average cost.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The authors would like to thank the anonymous reviewers for their pertinent comments, and Xinying Song, Nan Duan and Shasha Li for their valuable suggestions for improving this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Multilevel Coarse-to-Fine PCFG Parsing", |
| "authors": [ |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Micha", |
| "middle": [], |
| "last": "Elsner", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Austerweil", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Ellis", |
| "suffix": "" |
| }, |
| { |
| "first": "Isaac", |
| "middle": [], |
| "last": "Haxton", |
| "suffix": "" |
| }, |
| { |
| "first": "Catherine", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Shrivaths", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Moore", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Pozar", |
| "suffix": "" |
| }, |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Vu", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. of HLT-NAACL 2006", |
| "volume": "", |
| "issue": "", |
| "pages": "168--175", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eugene Charniak, Mark Johnson, Micha Elsner, Jo- seph Austerweil, David Ellis, Isaac Haxton, Cathe- rine Hill, R. Shrivaths, Jeremy Moore, Michael Po- zar, and Theresa Vu. 2006. Multilevel Coarse-to- Fine PCFG Parsing. In Proc. of HLT-NAACL 2006, New York, USA, 168-175.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Edge-Based Best-First Chart Parsing", |
| "authors": [ |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proc. of the Six Workshop on Very Large Corpora", |
| "volume": "", |
| "issue": "", |
| "pages": "127--133", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eugene Charniak, Sharon Goldwater, and Mark John- son. 1998. Edge-Based Best-First Chart Parsing. In Proc. of the Six Workshop on Very Large Corpora, pages: 127-133.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A Hierarchical Phrase-Based Model for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Chiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proc. of ACL 2005", |
| "volume": "", |
| "issue": "", |
| "pages": "263--270", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Chiang. 2005. A Hierarchical Phrase-Based Model for Statistical Machine Translation. In Proc. of ACL 2005, Ann Arbor, Michigan, pages: 263- 270.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Hierarchical Phrase-based Translation", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Chiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Computational Linguistics", |
| "volume": "33", |
| "issue": "2", |
| "pages": "202--208", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Chiang. 2007. Hierarchical Phrase-based Translation. Computational Linguistics. 33(2): 202-208.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Scalable Inference and Training of Context-Rich Syntactic Translation Models", |
| "authors": [ |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Graehl", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Deneefe", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ignacio", |
| "middle": [], |
| "last": "Thayer", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "961--968", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michel Galley, Jonathan Graehl, Kevin Knight, Da- niel Marcu, Steve DeNeefe, Wei Wang, and Igna- cio Thayer. 2006. Scalable Inference and Training of Context-Rich Syntactic Translation Models. In Proc. of ACL 2006, Sydney, Australia, pages: 961- 968.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "What\"s in a translation rule", |
| "authors": [ |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Hopkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proc. of HLT-NAACL 2004", |
| "volume": "", |
| "issue": "", |
| "pages": "273--280", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michel Galley, Mark Hopkins, Kevin Knight, and Daniel Marcu. 2004. What\"s in a translation rule? In Proc. of HLT-NAACL 2004, Boston, USA, pag- es: 273-280.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Binarization, Synchronous Binarization, and Target-side binarization", |
| "authors": [ |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of HLT-NAACL 2007 / AMTA workshop on Syntax and Structure in Statistical Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "33--40", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liang Huang. 2007. Binarization, Synchronous Bina- rization, and Target-side binarization. In Proc. of HLT-NAACL 2007 / AMTA workshop on Syntax and Structure in Statistical Translation, New York, USA, pages: 33-40.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "An Efficient Recognition and Syntax Analysis Algorithm for Context-Free Languages", |
| "authors": [ |
| { |
| "first": "Tadao", |
| "middle": [], |
| "last": "Kasami", |
| "suffix": "" |
| } |
| ], |
| "year": 1965, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tadao Kasami. 1965. An Efficient Recognition and Syntax Analysis Algorithm for Context-Free Lan- guages. Technical Report AFCRL-65-758, Air Force Cambridge Research Laboratory, Bedford, Massachusetts.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Statistical Significance Tests for Machine Translation Evaluation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "388--395", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn. 2004. Statistical Significance Tests for Machine Translation Evaluation. In Proc. of EMNLP 2004, Barcelona, Spain , pages: 388-395.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "SPMT: Statistical machine translation with syntactified target language phrases", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdessamad", |
| "middle": [], |
| "last": "Echihabi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "44--52", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Marcu, Wei Wang, Abdessamad Echihabi, and Kevin Knight. 2006. SPMT: Statistical machine translation with syntactified target language phras- es. In Proc. of EMNLP 2006, Sydney, Australia, pages: 44-52.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Some Computational Complexity Results for Synchronous Context-Free Grammars", |
| "authors": [ |
| { |
| "first": "Giorgio", |
| "middle": [], |
| "last": "Satta", |
| "suffix": "" |
| }, |
| { |
| "first": "Enoch", |
| "middle": [], |
| "last": "Peserico", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proc. of HLT-EMNLP 2005", |
| "volume": "", |
| "issue": "", |
| "pages": "803--810", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Giorgio Satta and Enoch Peserico. 2005. Some Com- putational Complexity Results for Synchronous Context-Free Grammars. In Proc. of HLT-EMNLP 2005, Vancouver, pages: 803-810.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Bootstrap percolation, the Sch oder numbers, and the n-kings problem", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Shapiro", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "B" |
| ], |
| "last": "Stephens", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "SIAM Journal on Discrete Mathematics", |
| "volume": "4", |
| "issue": "2", |
| "pages": "275--280", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "L. Shapiro and A. B. Stephens. 1991. Bootstrap per- colation, the Sch oder numbers, and the n-kings problem. SIAM Journal on Discrete Mathematics, 4(2):275-280.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Better Binarization for the CKY Parsing", |
| "authors": [ |
| { |
| "first": "Xinying", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Shilin", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "167--176", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xinying Song, Shilin Ding and Chin-Yew Lin. 2008. Better Binarization for the CKY Parsing. In Proc. of EMNLP 2008, Hawaii, pages: 167-176.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Iterative CKY Parsing for Probabilistic Context-Free Grammars", |
| "authors": [ |
| { |
| "first": "Yoshimasa", |
| "middle": [], |
| "last": "Tsuruoka", |
| "suffix": "" |
| }, |
| { |
| "first": "Junichi", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proc. of IJCNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "52--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshimasa Tsuruoka and Junichi Tsujii. 2004. Itera- tive CKY Parsing for Probabilistic Context-Free Grammars. In Proc. of IJCNLP 2004, pages: 52- 60.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Binarizing Syntax Trees to Improve Syntax-Based Machine Translation Accuracy", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of EMNLP-CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "746--754", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Wang and Kevin Knight and Daniel Marcu. 2007. Binarizing Syntax Trees to Improve Syntax- Based Machine Translation Accuracy. In Proc. of EMNLP-CoNLL 2007, Prague, Czech Republic, pages: 746-754.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Recognition and Parsing of Context-Free Languages in Time n 3 . Information and Control", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "H" |
| ], |
| "last": "Younger", |
| "suffix": "" |
| } |
| ], |
| "year": 1967, |
| "venue": "", |
| "volume": "10", |
| "issue": "", |
| "pages": "189--208", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. H. Younger. 1967. Recognition and Parsing of Context-Free Languages in Time n 3 . Information and Control, 10(2):189-208.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Synchronous Binarization for Machine Translation", |
| "authors": [ |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Gildea", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. of HLT-NAACL 2006", |
| "volume": "", |
| "issue": "", |
| "pages": "256--263", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hao Zhang, Liang Huang, Daniel Gildea, and Kevin Knight. 2006. Synchronous Binarization for Ma- chine Translation. In Proc. of HLT-NAACL 2006, New York, USA, pages: 256-263.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Edge competitions caused by different binarizations" |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Binarization on a sample grammar" |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Comparison between the static cost reduction and the iterative cost reduction" |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Cost of binary SCFG vs. BLEU4 (NIST05) Cost of binary SCFG vs. BLEU4 (NIST08)" |
| }, |
| "FIGREF4": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "BLEU4 against beam size (small test set) BLEU4 against beam size (NIST05)" |
| }, |
| "FIGREF5": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "BLEU4 against competing edges" |
| }, |
| "FIGREF6": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Cost of binary SCFG vs. variance of competing edge number (NIST05) Cost of binary SCFG vs. mean of competing edge number (NIST05)" |
| }, |
| "TABREF2": { |
| "content": "<table/>", |
| "text": "Sub-sequences and corresponding costs", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF6": { |
| "content": "<table><tr><td>: Performance (BLUE4(%)) of different</td></tr><tr><td>binarization methods. * = significantly better than</td></tr><tr><td>baseline (p < 0.05).</td></tr></table>", |
| "text": "", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| }, |
| "TABREF7": { |
| "content": "<table><tr><td>BLEU4(%)</td><td/><td/><td/><td/><td/><td>cost of G'</td></tr><tr><td>38.8</td><td/><td/><td/><td/><td/><td>1.0E+10</td></tr><tr><td>38.6</td><td/><td/><td/><td/><td/><td/></tr><tr><td>38.2 38.4</td><td/><td/><td/><td/><td/><td>1.0E+09</td></tr><tr><td>38</td><td/><td/><td/><td/><td/><td/></tr><tr><td>37.8</td><td/><td/><td/><td/><td/><td>1.0E+08</td></tr><tr><td>0</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>iteration</td></tr><tr><td colspan=\"4\">performance(BLEU4)</td><td/><td colspan=\"2\">cost</td></tr><tr><td>BLEU4(%)</td><td/><td/><td/><td/><td/><td>cost of G'</td></tr><tr><td>28.4</td><td/><td/><td/><td/><td/><td>1.0E+10</td></tr><tr><td>28.2</td><td/><td/><td/><td/><td/><td/></tr><tr><td>27.8 28</td><td/><td/><td/><td/><td/><td>1.0E+09</td></tr><tr><td>27.6</td><td/><td/><td/><td/><td/><td/></tr><tr><td>27.4</td><td/><td/><td/><td/><td/><td>1.0E+08</td></tr><tr><td>0</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td/></tr><tr><td colspan=\"4\">performance(BLEU4)</td><td/><td/><td>cost</td></tr></table>", |
| "text": "sentences are translated. For example, when the beam size is set to a larger number (e.g. 200), our decoder takes nearly one hour to translate a sentence whose length is about 20 on a 3GHz CPU. Decoding on the entire NIST05 and NIST08 test sets with large beam sizes is impractical.", |
| "num": null, |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |