| { |
| "paper_id": "P14-1011", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:04:46.168139Z" |
| }, |
| "title": "Bilingually-constrained Phrase Embeddings for Machine Translation", |
| "authors": [ |
| { |
| "first": "Jiajun", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "National Laboratory of Pattern Recognition", |
| "institution": "CASIA", |
| "location": { |
| "settlement": "Beijing", |
| "country": "P.R. China" |
| } |
| }, |
| "email": "jjzhang@nlpr.ia.ac.cn" |
| }, |
| { |
| "first": "Shujie", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Research Asia", |
| "location": { |
| "settlement": "Beijing", |
| "country": "P.R. China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Mu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Research Asia", |
| "location": { |
| "settlement": "Beijing", |
| "country": "P.R. China" |
| } |
| }, |
| "email": "muli@microsoft.com" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Research Asia", |
| "location": { |
| "settlement": "Beijing", |
| "country": "P.R. China" |
| } |
| }, |
| "email": "mingzhou@microsoft.com" |
| }, |
| { |
| "first": "Chengqing", |
| "middle": [], |
| "last": "Zong", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "National Laboratory of Pattern Recognition", |
| "institution": "CASIA", |
| "location": { |
| "settlement": "Beijing", |
| "country": "P.R. China" |
| } |
| }, |
| "email": "cqzong@nlpr.ia.ac.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We propose Bilingually-constrained Recursive Auto-encoders (BRAE) to learn semantic phrase embeddings (compact vector representations for phrases), which can distinguish the phrases with different semantic meanings. The BRAE is trained in a way that minimizes the semantic distance of translation equivalents and maximizes the semantic distance of nontranslation pairs simultaneously. After training, the model learns how to embed each phrase semantically in two languages and also learns how to transform semantic embedding space in one language to the other. We evaluate our proposed method on two end-to-end SMT tasks (phrase table pruning and decoding with phrasal semantic similarities) which need to measure semantic similarity between a source phrase and its translation candidates. Extensive experiments show that the BRAE is remarkably effective in these two tasks.", |
| "pdf_parse": { |
| "paper_id": "P14-1011", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We propose Bilingually-constrained Recursive Auto-encoders (BRAE) to learn semantic phrase embeddings (compact vector representations for phrases), which can distinguish the phrases with different semantic meanings. The BRAE is trained in a way that minimizes the semantic distance of translation equivalents and maximizes the semantic distance of nontranslation pairs simultaneously. After training, the model learns how to embed each phrase semantically in two languages and also learns how to transform semantic embedding space in one language to the other. We evaluate our proposed method on two end-to-end SMT tasks (phrase table pruning and decoding with phrasal semantic similarities) which need to measure semantic similarity between a source phrase and its translation candidates. Extensive experiments show that the BRAE is remarkably effective in these two tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Due to the powerful capacity of feature learning and representation, Deep (multi-layer) Neural Networks (DNN) have achieved a great success in speech and image processing (Kavukcuoglu et al., 2010; Krizhevsky et al., 2012; Dahl et al., 2012) .", |
| "cite_spans": [ |
| { |
| "start": 171, |
| "end": 197, |
| "text": "(Kavukcuoglu et al., 2010;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 198, |
| "end": 222, |
| "text": "Krizhevsky et al., 2012;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 223, |
| "end": 241, |
| "text": "Dahl et al., 2012)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recently, statistical machine translation (SMT) community has seen a strong interest in adapting and applying DNN to many tasks, such as word alignment , translation confidence estimation (Mikolov et al., 2010; Zou et al., 2013) , phrase reordering prediction , translation modelling (Auli et al., 2013; Kalchbrenner and Blunsom, 2013) and language modelling (Duh et al., 2013; Vaswani et al., 2013) . Most of these works attempt to improve some components in SMT based on word embedding, which converts a word into a dense, low dimensional, real-valued vector representation (Bengio et al., 2003; Bengio et al., 2006; Collobert and Weston, 2008; Mikolov et al., 2013) .", |
| "cite_spans": [ |
| { |
| "start": 188, |
| "end": 210, |
| "text": "(Mikolov et al., 2010;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 211, |
| "end": 228, |
| "text": "Zou et al., 2013)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 284, |
| "end": 303, |
| "text": "(Auli et al., 2013;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 304, |
| "end": 335, |
| "text": "Kalchbrenner and Blunsom, 2013)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 359, |
| "end": 377, |
| "text": "(Duh et al., 2013;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 378, |
| "end": 399, |
| "text": "Vaswani et al., 2013)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 576, |
| "end": 597, |
| "text": "(Bengio et al., 2003;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 598, |
| "end": 618, |
| "text": "Bengio et al., 2006;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 619, |
| "end": 646, |
| "text": "Collobert and Weston, 2008;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 647, |
| "end": 668, |
| "text": "Mikolov et al., 2013)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "However, in the conventional (phrase-based) SMT, phrases are the basic translation units. The models using word embeddings as the direct inputs to DNN cannot make full use of the whole syntactic and semantic information of the phrasal translation rules. Therefore, in order to successfully apply DNN to model the whole translation process, such as modelling the decoding process, learning compact vector representations for the basic phrasal translation units is the essential and fundamental work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we explore the phrase embedding, which represents a phrase (sequence of words) with a real-valued vector. In some previous works, phrase embedding has been discussed from different views. Socher et al. (2011) make the phrase embeddings capture the sentiment information. Socher et al. (2013a) enable the phrase embeddings to mainly capture the syntactic knowledge. attempt to encode the reordering pattern in the phrase embeddings. Kalchbrenner and Blunsom (2013) utilize a simple convolution model to generate phrase embeddings from word embeddings. Mikolov et al. (2013) consider a phrase as an indivisible n-gram. Obviously, these methods of learning phrase embeddings either focus on some aspects of the phrase (e.g. reordering pattern), or impose strong assumptions (e.g. bagof-words or indivisible n-gram). Therefore, these phrase embeddings are not suitable to fully represent the phrasal translation units in SMT due to the lack of semantic meanings of the phrase.", |
| "cite_spans": [ |
| { |
| "start": 203, |
| "end": 223, |
| "text": "Socher et al. (2011)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 286, |
| "end": 307, |
| "text": "Socher et al. (2013a)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 447, |
| "end": 478, |
| "text": "Kalchbrenner and Blunsom (2013)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 566, |
| "end": 587, |
| "text": "Mikolov et al. (2013)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Instead, we focus on learning phrase embeddings from the view of semantic meaning, so that our phrase embedding can fully represent the phrase and best fit the phrase-based SMT. Assuming the phrase is a meaningful composition of its internal words, we propose Bilinguallyconstrained Recursive Auto-encoders (BRAE) to learn semantic phrase embeddings. The core idea behind is that a phrase and its correct translation should share the same semantic meaning. Thus, they can supervise each other to learn their semantic phrase embeddings. Similarly, non-translation pairs should have different semantic meanings, and this information can also be used to guide learning semantic phrase embeddings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In our method, the standard recursive autoencoder (RAE) pre-trains the phrase embedding with an unsupervised algorithm by minimizing the reconstruction error (Socher et al., 2010) , while the bilingually-constrained model learns to finetune the phrase embedding by minimizing the semantic distance between translation equivalents and maximizing the semantic distance between non-translation pairs.", |
| "cite_spans": [ |
| { |
| "start": 158, |
| "end": 179, |
| "text": "(Socher et al., 2010)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We use an example to explain our model. As illustrated in Fig. 1 , the Chinese phrase on the left and the English phrase on the right are translations with each other. If we learn the embedding of the Chinese phrase correctly, we can regard it as the gold representation for the English phrase and use it to guide the process of learning English phrase embedding. In the other direction, the Chinese phrase embedding can be learned in the same way. This procedure can be performed with an co-training style algorithm so as to minimize the semantic distance between the translation equivalents 1 . In this way, the result Chinese and English phrase embeddings will capture the semantics as much as possible. Furthermore, a transformation function between the Chinese and English semantic spaces can be learned as well.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 58, |
| "end": 64, |
| "text": "Fig. 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "With the learned model, we can accurately measure the semantic similarity between a source phrase and a translation candidate. Accordingly, we evaluate the BRAE model on two end-toend SMT tasks (phrase table pruning and decoding with phrasal semantic similarities) which need to check whether a translation candidate and the source phrase are in the same meaning. In phrase table pruning, we discard the phrasal translation rules with low semantic similarity. In decoding with phrasal semantic similarities, we apply the semantic similarities of the phrase pairs as new features during decoding to guide translation can- Figure 1 : A motivation example for the BRAE model. didate selection. The experiments show that up to 72% of the phrase table can be discarded without significant decrease on the translation quality, and in decoding with phrasal semantic similarities up to 1.7 BLEU score improvement over the state-ofthe-art baseline can be achieved. In addition, our semantic phrase embeddings have many other potential applications. For instance, the semantic phrase embeddings can be directly fed to DNN to model the decoding process. Besides SMT, the semantic phrase embeddings can be used in other cross-lingual tasks (e.g. cross-lingual question answering) and monolingual applications such as textual entailment, question answering and paraphrase detection.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 621, |
| "end": 629, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recently, phrase embedding has drawn more and more attention. There are three main perspectives handling this task in monolingual languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "One method considers the phrases as bag-ofwords and employs a convolution model to transform the word embeddings to phrase embeddings (Collobert et al., 2011; Kalchbrenner and Blunsom, 2013) . Gao et al. (2013) also use bag-ofwords but learn BLEU sensitive phrase embeddings. This kind of approaches does not take the word order into account and loses much information. Instead, our bilingually-constrained recursive auto-encoders not only learn the composition mechanism of generating phrases from words, but also fine tune the word embeddings during the model training stage, so that we can induce the full information of the phrases and internal words.", |
| "cite_spans": [ |
| { |
| "start": 134, |
| "end": 158, |
| "text": "(Collobert et al., 2011;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 159, |
| "end": 190, |
| "text": "Kalchbrenner and Blunsom, 2013)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 193, |
| "end": 210, |
| "text": "Gao et al. (2013)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Another method (Mikolov et al., 2013 ) deals with the phrases having a meaning that is not a simple composition of the meanings of its individual words, such as New York Times. They first find the phrases of this kind. Then, they regard these phrases as indivisible units, and learn their embeddings with the context information. How-ever, this kind of phrase embedding is hard to capture full semantics since the context of a phrase is limited. Furthermore, this method can only account for a very small part of phrases, since most of the phrases are compositional. In contrast, our method attempts to learn the semantic vector representation for any phrase.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 36, |
| "text": "(Mikolov et al., 2013", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The third method views any phrase as the meaningful composition of its internal words. The recursive auto-encoder is typically adopted to learn the way of composition (Socher et al., 2010; Socher et al., 2011; Socher et al., 2013a; Socher et al., 2013b; . They pre-train the RAE with an unsupervised algorithm. And then, they fine-tune the RAE according to the label of the phrase, such as the syntactic category in parsing (Socher et al., 2013a) , the polarity in sentiment analysis (Socher et al., 2011; Socher et al., 2013b) , and the reordering pattern in SMT . This kind of semi-supervised phrase embedding is in fact performing phrase clustering with respect to the phrase label. For example, in the RAEbased phrase reordering model for SMT , the phrases with the similar reordering tendency (e.g. monotone or swap) are close to each other in the embedding space, such as the prepositional phrases. Obviously, this kind methods of semi-supervised phrase embedding do not fully address the semantic meaning of the phrases. Although we also follow the composition-based phrase embedding, we are the first to focus on the semantic meanings of the phrases and propose a bilingually-constrained model to induce the semantic information and learn transformation of the semantic space in one language to the other.", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 188, |
| "text": "(Socher et al., 2010;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 189, |
| "end": 209, |
| "text": "Socher et al., 2011;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 210, |
| "end": 231, |
| "text": "Socher et al., 2013a;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 232, |
| "end": 253, |
| "text": "Socher et al., 2013b;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 424, |
| "end": 446, |
| "text": "(Socher et al., 2013a)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 484, |
| "end": 505, |
| "text": "(Socher et al., 2011;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 506, |
| "end": 527, |
| "text": "Socher et al., 2013b)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Auto-encoders", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingually-constrained Recursive", |
| "sec_num": "3" |
| }, |
| { |
| "text": "This section introduces the Bilinguallyconstrained Recursive Auto-encoders (BRAE), that is inspired by two observations. First, the recursive auto-encoder provides a reasonable composition mechanism to embed each phrase. And the semi-supervised phrase embedding (Socher et al., 2011; Socher et al., 2013a; further indicates that phrase embedding can be tuned with respect to the label. Second, even though we have no correct semantic phrase representation as the gold label, the phrases sharing the same meaning provide an indirect but feasible way.", |
| "cite_spans": [ |
| { |
| "start": 262, |
| "end": 283, |
| "text": "(Socher et al., 2011;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 284, |
| "end": 305, |
| "text": "Socher et al., 2013a;", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingually-constrained Recursive", |
| "sec_num": "3" |
| }, |
| { |
| "text": "x 1 x 2 x 3 x 4 y 1 =f(W (1) [x 1 ; x 2 ]+b) y 2 =f(W (1) [y 1 ; x 3 ]+b) y 3 =f(W (1) [y 2 ; x 4 ]+b)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingually-constrained Recursive", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Figure 2: A recursive auto-encoder for a fourword phrase. The empty nodes are the reconstructions of the input.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingually-constrained Recursive", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We will first briefly present the unsupervised phrase embedding, and then describe the semisupervised framework. After that, we introduce the BRAE on the network structure, objective function and parameter inference.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bilingually-constrained Recursive", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In phrase embedding using composition, the word vector representation is the basis and serves as the input to the neural network. After learning word embeddings with DNN (Bengio et al., 2003; Collobert and Weston, 2008; Mikolov et al., 2013) , each word in the vocabulary V corresponds to a vector x \u2208 R n , and all the vectors are stacked into an embedding matrix L \u2208 R n\u00d7|V | .", |
| "cite_spans": [ |
| { |
| "start": 170, |
| "end": 191, |
| "text": "(Bengio et al., 2003;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 192, |
| "end": 219, |
| "text": "Collobert and Weston, 2008;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 220, |
| "end": 241, |
| "text": "Mikolov et al., 2013)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Vector Representations", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "Given a phrase which is an ordered list of m words, each word has an index i into the columns of the embedding matrix L. The index i is used to retrieve the word's vector representation using a simple multiplication with a binary vector e which is zero in all positions except for the ith index:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Vector Representations", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "x i = Le i \u2208 R n (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Vector Representations", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "Note that n is usually set empirically, such as n = 50, 100, 200. Throughout this paper, n = 3 is used for better illustration as shown in Fig. 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 139, |
| "end": 145, |
| "text": "Fig. 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Word Vector Representations", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "Assuming we are given a phrase", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "w 1 w 2 \u2022 \u2022 \u2022 w m , it is first projected into a list of vectors (x 1 , x 2 , \u2022 \u2022 \u2022 , x m )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "using Eq. 1. The RAE learns the vector representation of the phrase by recursively combining two children vectors in a bottomup manner (Socher et al., 2011) . Fig. 2 illustrates an instance of a RAE applied to a binary tree, in which a standard auto-encoder (in box) is re-used at each node. The standard auto-encoder aims at learning an abstract representation of its input. For two children c 1 = x 1 and c 2 = x 2 , the autoencoder computes the parent vector y 1 as follows:", |
| "cite_spans": [ |
| { |
| "start": 135, |
| "end": 156, |
| "text": "(Socher et al., 2011)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 159, |
| "end": 165, |
| "text": "Fig. 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p = f (W (1) [c 1 ; c 2 ] + b (1) )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "Where we multiply the parameter matrix W (1) \u2208 R n\u00d72n by the concatenation of two children [c 1 ; c 2 ] \u2208 R 2n\u00d71 . After adding a bias term b (1) , we apply an element-wise activation function such as f = tanh(\u2022), which is used in our experiments. In order to apply this auto-encoder to each pair of children, the representation of the parent p should have the same dimensionality as the c i 's.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "To assess how well the parent's vector represents its children, the standard auto-encoder reconstructs the children in a reconstruction layer:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "[c 1 ; c 2 ] = f (2) (W (2) p + b (2) )", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "Where c 1 and c 2 are reconstructed children, W (2) and b (2) are parameter matrix and bias term for reconstruction respectively, and", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 51, |
| "text": "(2)", |
| "ref_id": null |
| }, |
| { |
| "start": 58, |
| "end": 61, |
| "text": "(2)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "f (2) = tanh(\u2022).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "To obtain the optimal abstract representation of the inputs, the standard auto-encoder tries to minimize the reconstruction errors between the inputs and the reconstructed ones during training:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "E rec ([c 1 ; c 2 ]) = 1 2 ||[c 1 ; c 2 ] \u2212 [c 1 ; c 2 ]|| 2", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "Given y 1 = p, we can use Eq. 2 again to compute y 2 by setting the children to be [c 1 ; c 2 ] = [y 1 ; x 3 ]. The same auto-encoder is re-used until the vector of the whole phrase is generated.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "For unsupervised phrase embedding, the only objective is to minimize the sum of reconstruction errors at each node in the optimal binary tree:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "RAE \u03b8 (x) = argmin y\u2208A(x) s\u2208y E rec ([c 1 ; c 2 ] s ) (5)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "Where x is the list of vectors of a phrase, and A(x) denotes all the possible binary trees that can be built from inputs x. A greedy algorithm (Socher et al., 2011) is used to generate the optimal binary tree y. The parameters \u03b8 = (W, b) are optimized over all the phrases in the training data.", |
| "cite_spans": [ |
| { |
| "start": 143, |
| "end": 164, |
| "text": "(Socher et al., 2011)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "RAE-based Phrase Embedding", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "The above RAE is completely unsupervised and can only induce general representations of the Figure 3 : An illustration of a semi-supervised RAE unit. Red nodes show the label distribution.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 92, |
| "end": 100, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Semi-supervised Phrase Embedding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "multi-word phrases. Several researchers extend the original RAEs to a semi-supervised setting so that the induced phrase embedding can predict a target label, such as polarity in sentiment analysis (Socher et al., 2011) , syntactic category in parsing (Socher et al., 2013a) and phrase reordering pattern in SMT .", |
| "cite_spans": [ |
| { |
| "start": 198, |
| "end": 219, |
| "text": "(Socher et al., 2011)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 252, |
| "end": 274, |
| "text": "(Socher et al., 2013a)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semi-supervised Phrase Embedding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the semi-supervised RAE for phrase embedding, the objective function over a (phrase, label) pair (x, t) includes the reconstruction error and the prediction error, as illustrated in Fig. 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 185, |
| "end": 191, |
| "text": "Fig. 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Semi-supervised Phrase Embedding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "E(x, t; \u03b8) = \u03b1E rec (x, t; \u03b8)+(1\u2212\u03b1)E pred (x, t; \u03b8)", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Semi-supervised Phrase Embedding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Where the hyper-parameter \u03b1 is used to balance the reconstruction and prediction error. For label prediction, the cross-entropy error is usually used to calculate E pred . By optimizing the above objective, the phrases in the vector embedding space will be grouped according to the labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semi-supervised Phrase Embedding", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We know from the semi-supervised phrase embedding that the learned vector representation can be well adapted to the given label. Therefore, we can imagine that learning semantic phrase embedding is reasonable if we are given gold vector representations of the phrases.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The BRAE Model", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "However, no gold semantic phrase embedding exists. Fortunately, we know the fact that the two phrases should share the same semantic representation if they express the same meaning. We can make inference from this fact that if a model can learn the same embedding for any phrase pair sharing the same meaning, the learned embedding must encode the semantics of the phrases and the corresponding model is our desire.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The BRAE Model", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "As translation equivalents share the same semantic meaning, we employ high-quality phrase translation pairs as training corpus in this work. Accordingly, we propose the Bilinguallyconstrained Recursive Auto-encoders (BRAE), Figure 4 : An illustration of the bilingualconstrained recursive auto-encoders. The two phrases are translations with each other.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 224, |
| "end": 232, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The BRAE Model", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "whose basic goal is to minimize the semantic distance between the phrases and their translations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The BRAE Model", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Unlike previous methods, the BRAE model jointly learns two RAEs (Fig. 4 shows the network structure): one for source language and the other for target language. For a phrase pair (s, t), two kinds of errors are involved: 1. reconstruction error E rec (s, t; \u03b8): how well the learned vector representations p s and p t represent the phrase s and t respectively?", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 64, |
| "end": 71, |
| "text": "(Fig. 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Objective Function", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "E rec (s, t; \u03b8) = E rec (s; \u03b8) + E rec (t; \u03b8)", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "The Objective Function", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "2. semantic error E sem (s, t; \u03b8): what is the semantic distance between the learned vector representations p s and p t ?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Objective Function", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "Since word embeddings for two languages are learned separately and locate in different vector space, we do not enforce the phrase embeddings in two languages to be in the same semantic vector space. We suppose there is a transformation between the two semantic embedding spaces. Thus, the semantic distance is bidirectional: the distance between p t and the transformation of p s , and that between p s and the transformation of p t . As a result, the overall semantic error becomes: Finally, we calculate their Euclidean distance:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Objective Function", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "E sem (s, t; \u03b8) = E sem (s|t, \u03b8) + E sem (t|s, \u03b8) (8) Where E sem (s|t, \u03b8) = E sem (p t , f (W l s p s + b l s ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Objective Function", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "E sem (s|t, \u03b8) = 1 2 ||p t \u2212 f (W l s p s + b l s )|| 2", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "The Objective Function", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "E sem (t|s, \u03b8) can be calculated in exactly the same way. For the phrase pair (s, t), the joint error is: E(s, t; \u03b8) = \u03b1E rec (s, t; \u03b8) + (1 \u2212 \u03b1)E sem (s, t; \u03b8) (10) The hyper-parameter \u03b1 weights the reconstruction and semantic error. The final BRAE objective over the phrase pairs training set (S, T ) becomes:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Objective Function", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "J BRAE = 1 N (s,t)\u2208(S,T ) E(s, t; \u03b8) + \u03bb 2 ||\u03b8|| 2 (11)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Objective Function", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "Ideally, we want the learned BRAE model can make sure that the semantic error for the positive example (a source phrase s and its correct translation t) is much smaller than that for the negative example (the source phrase s and a bad translation t ). However, the current model cannot guarantee this since the above semantic error E sem (s|t, \u03b8) only accounts for positive ones. We thus enhance the semantic error with both positive and negative examples, and the corresponding max-semantic-margin error becomes:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Max-Semantic-Margin Error", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "E * sem (s|t, \u03b8) = max{0, E sem (s|t, \u03b8) \u2212 E sem (s|t , \u03b8) + 1}", |
| "eq_num": "(12)" |
| } |
| ], |
| "section": "Max-Semantic-Margin Error", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "It tries to minimize the semantic distance between translation equivalents and maximize the semantic distance between non-translation pairs simultaneously. Using the above error function, we need to construct a negative example for each positive example. Suppose we are given a positive example (s, t), the correct translation t can be converted into a bad translation t by replacing the words in t with randomly chosen target language words. Then, a negative example (s, t ) is available.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Max-Semantic-Margin Error", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "Like semi-supervised RAE , the parameters \u03b8 in our BRAE model can also be divided into three sets: \u03b8 L : word embedding matrix L for two languages (Section 3.1.1); \u03b8 rec : recursive auto-encoder parameter matrices W (1) , W (2) , and bias terms b (1) , b (2) for two languages (Section 3.1.2); \u03b8 sem : transformation matrix W l and bias term b l for two directions in semantic distance computation (Section 3.3.1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "To have a deep understanding of the parameters, we rewrite Eq. 10: E(s, t; \u03b8) = \u03b1(E rec (s; \u03b8) + E rec (t; \u03b8))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "+ (1 \u2212 \u03b1)(E * sem (s|t, \u03b8) + E * sem (t|s, \u03b8)) = (\u03b1E rec (s; \u03b8 s ) + (1 \u2212 \u03b1)E * sem (s|t, \u03b8 s )) + (\u03b1E rec (t; \u03b8 t ) + (1 \u2212 \u03b1)E * sem (t|s, \u03b8 t ))", |
| "eq_num": "(13)" |
| } |
| ], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "We can see that the parameters \u03b8 can be divided into two classes: \u03b8 s for the source language and \u03b8 t for the target language. The above equation also indicates that the source-side parameters \u03b8 s can be optimized independently as long as the semantic representation p t of the target phrase t is given to compute E sem (s|t, \u03b8) with Eq. 9. It is similar for the target-side parameters \u03b8 t . Assuming the target phrase representation p t is available, the optimization of the source-side parameters is similar to that of semi-supervised RAE. We apply the Stochastic Gradient Descent (SGD) algorithm to optimize each parameter:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b8 s = \u03b8 s \u2212 \u03b7 \u2202J s \u2202\u03b8 s", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "In order to run SGD algorithm, we need to solve two problems: one for parameter initialization and the other for partial gradient calculation. In parameter initialization, \u03b8 rec and \u03b8 sem for the source language is randomly set according to a normal distribution. For the word embedding L s , there are two choices. First, L s is initialized randomly like other parameters. Second, the word embedding matrix L s is pre-trained with DNN (Bengio et al., 2003; Collobert and Weston, 2008; Mikolov et al., 2013) using large-scale unlabeled monolingual data. We prefer to the second one since this kind of word embedding has already encoded some semantics of the words. In this work, we employ the toolkit Word2Vec (Mikolov et al., 2013) to pre-train the word embedding for the source and target languages. The word embeddings will be fine-tuned in our BRAE model to capture much more semantics.", |
| "cite_spans": [ |
| { |
| "start": 436, |
| "end": 457, |
| "text": "(Bengio et al., 2003;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 458, |
| "end": 485, |
| "text": "Collobert and Weston, 2008;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 486, |
| "end": 507, |
| "text": "Mikolov et al., 2013)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 710, |
| "end": 732, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "The partial gradient for one instance is computed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2202J s \u2202\u03b8 s = \u2202E(s|t, \u03b8 s ) \u2202\u03b8 s + \u03bb\u03b8 s", |
| "eq_num": "(15)" |
| } |
| ], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "Where the source-side error given the target phrase representation includes reconstruction error and updated semantic error:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "E(s|t, \u03b8 s ) = \u03b1E rec (s; \u03b8 s ) + (1 \u2212 \u03b1)E * sem (s|t, \u03b8 s )", |
| "eq_num": "(16)" |
| } |
| ], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "Given the current \u03b8 s , we first construct the binary tree (as illustrated in Fig. 2 ) for any source-side phrase using the greedy algorithm (Socher et al., 2011) . Then, the derivatives for the parameters in the fixed binary tree will be calculated via backpropagation through structures (Goller and Kuchler, 1996) . Finally, the parameters will be updated using Eq. 14 and a new \u03b8 s is obtained. The target-side parameters \u03b8 t can be optimized in the same way as long as the source-side phrase representation p s is available. It seems a paradox that updating \u03b8 s needs p t while updating \u03b8 t needs p s . To solve this problem, we propose an co-training style algorithm which includes three steps:", |
| "cite_spans": [ |
| { |
| "start": 141, |
| "end": 162, |
| "text": "(Socher et al., 2011)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 289, |
| "end": 315, |
| "text": "(Goller and Kuchler, 1996)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 78, |
| "end": 84, |
| "text": "Fig. 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "1. Pre-training: applying unsupervised phrase embedding with standard RAE to pre-train the source-and target-side phrase representations p s and p t respectively (Section 2.1.2);", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "2. Fine-tuning: with the BRAE model, using target-side phrase representation p t to update the source-side parameters \u03b8 s and obtain the finetuned source-side phrase representation p s , and meanwhile using p s to update \u03b8 t and get the finetuned p t , and then calculate the joint error over the training corpus;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "3. Termination Check: if the joint error reaches a local minima or the iterations reach the pre-defined number (25 is used in our experiments), we terminate the training procedure, otherwise we set p s = p s , p t = p t , and go to step 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Inference", |
| "sec_num": "3.3.3" |
| }, |
| { |
| "text": "With the semantic phrase embeddings and the vector space transformation function, we apply the BRAE to measure the semantic similarity between a source phrase and its translation candidates in the phrase-based SMT. Two tasks are involved in the experiments: phrase table pruning that discards entries whose semantic similarity is very low and decoding with the phrasal semantic similarities as additional new features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The hyper-parameters in the BRAE model include the dimensionality of the word embedding n in Eq. 1, the balance weight \u03b1 in Eq. 10, \u03bbs in Eq. 11, and the learning rate \u03b7 in Eq. 14.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hyper-Parameter Settings", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For the dimensionality n, we have tried three settings n = 50, 100, 200 in our experiments. We empirically set the learning rate \u03b7 = 0.01. We draw \u03b1 from 0.05 to 0.5 with step 0.05, and \u03bbs from {10 \u22126 , 10 \u22125 , 10 \u22124 , 10 \u22123 , 10 \u22122 }. The overall error of the BRAE model is employed to guide the search procedure. Finally, we choose \u03b1 = 0.15, \u03bb L = 10 \u22122 , \u03bb rec = 10 \u22123 and \u03bb sem = 10 \u22123 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hyper-Parameter Settings", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We have implemented a phrase-based translation system with a maximum entropy based reordering model using the bracketing transduction grammar (Wu, 1997; Xiong et al., 2006) .", |
| "cite_spans": [ |
| { |
| "start": 142, |
| "end": 152, |
| "text": "(Wu, 1997;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 153, |
| "end": 172, |
| "text": "Xiong et al., 2006)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SMT Setup", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The SMT evaluation is conducted on Chineseto-English translation. Accordingly, our BRAE model is trained on Chinese and English. The bilingual training data from LDC 2 contains 0.96M sentence pairs and 1.1M entity pairs with 27.7M Chinese words and 31.9M English words. A 5gram language model is trained on the Xinhua portion of the English Gigaword corpus and the English part of bilingual training data. The NIST MT03 is used as the development data. NIST MT04-06 and MT08 (news data) are used as the test data. Case-insensitive BLEU is employed as the evaluation metric. The statistical significance test is performed by the re-sampling approach (Koehn, 2004) .", |
| "cite_spans": [ |
| { |
| "start": 649, |
| "end": 662, |
| "text": "(Koehn, 2004)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SMT Setup", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In addition, we pre-train the word embedding with toolkit Word2Vec on large-scale monolingual data including the aforementioned data for SMT. The monolingual data contains 1.06B words for Chinese and 1.12B words for English. To obtain high-quality bilingual phrase pairs to train our BRAE model, we perform forced decoding for the bilingual training sentences and collect the phrase pairs used. After removing the duplicates, the remaining 1.12M bilingual phrase pairs (length ranging from 1 to 7) are obtained.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SMT Setup", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Pruning most of the phrase table without much impact on translation quality is very important for translation especially in environments where memory and time constraints are imposed. Many algorithms have been proposed to deal with this problem, such as significance pruning (Johnson et al., 2007; Tomeh et al., 2009) , relevance pruning (Eck et al., 2007) and entropy-based pruning (Ling et al., 2012; Zens et al., 2012) . These algorithms are based on corpus statistics including cooccurrence statistics, phrase pair usage and composition information. For example, the significance pruning, which is proven to be a very effective algorithm, computes the probability named p-value, that tests whether a source phrase s and a target phrase t co-occur more frequently in a bilingual corpus than they happen just by chance. The higher the p-value, the more likely of the phrase pair to be spurious.", |
| "cite_spans": [ |
| { |
| "start": 275, |
| "end": 297, |
| "text": "(Johnson et al., 2007;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 298, |
| "end": 317, |
| "text": "Tomeh et al., 2009)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 338, |
| "end": 356, |
| "text": "(Eck et al., 2007)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 383, |
| "end": 402, |
| "text": "(Ling et al., 2012;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 403, |
| "end": 421, |
| "text": "Zens et al., 2012)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Phrase Table Pruning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Our work has the same objective, but instead of using corpus statistics, we attempt to measure the quality of the phrase pair from the view of semantic meaning. Given a phrase pair (s, t), the BRAE model first obtains their semantic phrase representations (p s , p t ), and then transforms p s into target semantic space p s * , p t into source semantic space p t * . We finally get two similarities Sim(p s * , p t ) and Sim(p t * , p s ). Phrase pairs that have a low similarity are more likely to be noise and more prone to be pruned. In experiments, we discard the phrase pair whose similarity in two directions are smaller than a threshold 3 . Table 1 shows the comparison results between our BRAE-based pruning method and the significance pruning algorithm. We can see a common phenomenon in both of the algorithms: for the first few thresholds, the phrase table becomes smaller and smaller while the translation quality is not much decreased, but the performance jumps a lot at a certain threshold (16 for Significance pruning, 0.8 for BRAE-based one).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 649, |
| "end": 656, |
| "text": "Table 1", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Phrase Table Pruning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Specifically, the Significance algorithm can safely discard 64% of the phrase table at its threshold 12 with only 0.1 BLEU loss in the overall test. In contrast, our BRAE-based algorithm can remove 72% of the phrase table at its threshold 0.7 with only 0.06 BLEU loss in the overall evaluation. When the two algorithms using a similar portion of the phrase tuitive because it is directly based on the semantic similarity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Phrase Table Pruning", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Besides using the semantic similarities to prune the phrase table, we also employ them as two informative features like the phrase translation probability to guide translation hypotheses selection during decoding. Typically, four translation probabilities are adopted in the phrase-based SMT, including phrase translation probability and lexical weights in both directions. The phrase translation probability is based on co-occurrence statistics and the lexical weights consider the phrase as bag-of-words. In contrast, our BRAE model focuses on compositional semantics from words to phrases. Therefore, the semantic similarities computed using our BRAE model are complementary to the existing four translation probabilities.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decoding with Phrasal Semantic Similarities", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The semantic similarities in two directions Sim(p s * , p t ) and Sim(p t * , p s ) are integrated into our baseline phrase-based model. In order to investigate the influence of the dimensionality of the embedding space, we have tried three different settings n = 50, 100, 200. Table 2 , no matter what n is, the BRAE model can significantly improve the translation quality in the overall test data. The largest improvement can be up to 1.7 BLEU score (MT06 for n = 50). It is interesting that with dimensionality growing, the translation performance is not consistently improved. We speculate that using n = 50 or n = 100 can already distinguish good translation candidates from bad ones.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 278, |
| "end": 285, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Decoding with Phrasal Semantic Similarities", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "To have a better intuition about the power of the BRAE model at learning semantic phrase embeddings, we show some examples in Table 3 . Given the BRAE model and the phrase training set, we search from the set the most semantically similar English phrases for any new input English phrase.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 126, |
| "end": 133, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis on Semantic Phrase Embedding", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "The input phrases contain different number of words. The table shows that the unsupervised RAE can at most capture the syntactic property when the phrases are short. For example, the unsupervised RAE finds do not want for the input phrase do not agree. When the phrase becomes longer, the unsupervised RAE cannot even capture the syntactic property. In contrast, our BRAE model learns the semantic meaning for each phrase no matter whether it is short or relatively long. This indicates that the proposed BRAE model is effective at learning semantic phrase embeddings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis on Semantic Phrase Embedding", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "As the semantic phrase embedding can fully represent the phrase, we can go a step further in the phrase-based SMT and feed the semantic phrase embeddings to DNN in order to model the whole translation process (e.g. derivation structure prediction). We will explore this direction in our future work. Besides SMT, the semantic phrase embeddings can be used in other cross-lingual tasks, such as cross-lingual question answering, since the semantic similarity between phrases in different languages can be calculated accurately.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Applications of The BRAE model", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In addition to the cross-lingual applications, we believe the BRAE model can be applied in many Table 2 : Experimental results of decoding with phrasal semantic similarities. n is the embedding dimensionality. \"+\" means that the model significantly outperforms the baseline with p < 0.01. each country regards every citizen in this country each country has its all the people in the country each other , and people all over the country Table 3 : Semantically similar phrases in the training set for the new phrases.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 96, |
| "end": 103, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 436, |
| "end": 443, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Applications of The BRAE model", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "monolingual NLP tasks which depend on good phrase representations or semantic similarity between phrases, such as named entity recognition, parsing, textual entailment, question answering and paraphrase detection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Applications of The BRAE model", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In fact, the phrases having the same meaning are translation equivalents in different languages, but are paraphrases in one language. Therefore, our model can be easily adapted to learn semantic phrase embeddings using paraphrases. Our BRAE model still has some limitations. For example, as each node in the recursive autoencoder shares the same weight matrix, the BRAE model would become weak at learning the semantic representations for long sentences with tens of words. Improving the model to semantically embed sentences is left for our future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Extensions", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "This paper has explored the bilinguallyconstrained recursive auto-encoders in learning phrase embeddings, which can distinguish phrases with different semantic meanings. With the objective to minimize the semantic distance between translation equivalents and maximize the semantic distance between non-translation pairs simultaneously, the learned model can semantically embed any phrase in two languages and can transform the semantic space in one language to the other. Two end-to-end SMT tasks are involved to test the power of the proposed model at learning the semantic phrase embeddings. The experimental results show that the BRAE model is remarkably effective in phrase table pruning and decoding with phrasal semantic similarities.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We have also discussed many other potential applications and extensions of our BRAE model. In the future work, we will explore four directions. 1) we will try to model the decoding process with DNN based on our semantic embeddings of the basic translation units. 2) we are going to learn semantic phrase embeddings with the paraphrase corpus. 3) we will apply the BRAE model in other monolingual and cross-lingual tasks. 4) we plan to learn semantic sentence embeddings by automatically learning different weight matrices for different nodes in the BRAE model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "LDC category numbers: LDC2000T50, LDC2002L27, LDC2003E07, LDC2003E14, LDC2004T07, LDC2005T06, LDC2005T10 and LDC2005T34.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "To avoid the situation that all the translation candidates for a source phrase are pruned, we always keep the first 10 best according to the semantic similarity.4 In the future, we will compare the performance by enforcing the two algorithms to use the same portion of phrase table", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank Nan Yang for sharing the baseline code and anonymous reviewers for their valuable comments. The research work has been partially funded by the Natural Science Founda ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Joint language and translation modeling with recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Quirk", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Zweig", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1044--1054", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Auli, Michel Galley, Chris Quirk, and Geof- frey Zweig. 2013. Joint language and translation modeling with recurrent neural networks. In Pro- ceedings of the 2013 Conference on Empirical Meth- ods in Natural Language Processing, pages 1044- 1054.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A neural probabilistic language model", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9jean", |
| "middle": [], |
| "last": "Ducharme", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Jauvin", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "1137--1155", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, R\u00e9jean Ducharme, Pascal Vincent, and Christian Jauvin. 2003. A neural probabilistic lan- guage model. Journal of Machine Learning Re- search, 3:1137-1155.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Neural probabilistic language models", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean-S\u00e9bastien", |
| "middle": [], |
| "last": "Sen\u00e9cal", |
| "suffix": "" |
| }, |
| { |
| "first": "Fr\u00e9deric", |
| "middle": [], |
| "last": "Morin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean-Luc", |
| "middle": [], |
| "last": "Gauvain", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Innovations in Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "137--186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, Holger Schwenk, Jean-S\u00e9bastien Sen\u00e9cal, Fr\u00e9deric Morin, and Jean-Luc Gauvain. 2006. Neural probabilistic language models. In In- novations in Machine Learning, pages 137-186.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A unified architecture for natural language processing: Deep neural networks with multitask learning", |
| "authors": [ |
| { |
| "first": "Ronan", |
| "middle": [], |
| "last": "Collobert", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 25th international conference on Machine learning", |
| "volume": "", |
| "issue": "", |
| "pages": "160--167", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ronan Collobert and Jason Weston. 2008. A unified architecture for natural language processing: Deep neural networks with multitask learning. In Pro- ceedings of the 25th international conference on Machine learning, pages 160-167.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Natural language processing (almost) from scratch", |
| "authors": [ |
| { |
| "first": "Ronan", |
| "middle": [], |
| "last": "Collobert", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Karlen", |
| "suffix": "" |
| }, |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [], |
| "last": "Kuksa", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2493--2537", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel Kuksa. 2011. Natural language processing (almost) from scratch. The Journal of Machine Learning Re- search, 12:2493-2537.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Context-dependent pre-trained deep neural networks for large-vocabulary speech recognition", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "Dong", |
| "middle": [], |
| "last": "Dahl", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Acero", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "IEEE Transactions on Audio, Speech, and Language Processing", |
| "volume": "20", |
| "issue": "1", |
| "pages": "30--42", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George E Dahl, Dong Yu, Li Deng, and Alex Acero. 2012. Context-dependent pre-trained deep neural networks for large-vocabulary speech recognition. IEEE Transactions on Audio, Speech, and Language Processing, 20(1):30-42.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Adaptation data selection using neural language models: Experiments in machine translation", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Duh", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Katsuhito", |
| "middle": [], |
| "last": "Sudoh", |
| "suffix": "" |
| }, |
| { |
| "first": "Hajime", |
| "middle": [], |
| "last": "Tsukada", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "678--683", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Duh, Graham Neubig, Katsuhito Sudoh, and Ha- jime Tsukada. 2013. Adaptation data selection us- ing neural language models: Experiments in ma- chine translation. In 51st Annual Meeting of the As- sociation for Computational Linguistics, pages 678- 683.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Estimating phrase pair relevance for translation model pruning", |
| "authors": [ |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Eck", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Vogal", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Waibel", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthias Eck, Stephen Vogal, and Alex Waibel. 2007. Estimating phrase pair relevance for translation model pruning. In MTSummit XI.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Learning semantic representations for the phrase translation model", |
| "authors": [ |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Yih", |
| "middle": [], |
| "last": "Wen-Tau", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1312.0482" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jianfeng Gao, Xiaodong He, Wen-tau Yih, and Li Deng. 2013. Learning semantic representations for the phrase translation model. arXiv preprint arXiv:1312.0482.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Learning task-dependent distributed representations by backpropagation through structure", |
| "authors": [ |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "Goller", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Kuchler", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "IEEE International Conference on Neural Networks", |
| "volume": "1", |
| "issue": "", |
| "pages": "347--352", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christoph Goller and Andreas Kuchler. 1996. Learn- ing task-dependent distributed representations by backpropagation through structure. In IEEE Inter- national Conference on Neural Networks, volume 1, pages 347-352.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Improving translation quality by discarding most of the phrasetable", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Howard Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Martin", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Kuhn", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Howard Johnson, Joel Martin, George Foster, and Roland Kuhn. 2007. Improving translation quality by discarding most of the phrasetable. In Proceed- ings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Recurrent continuous translation models", |
| "authors": [ |
| { |
| "first": "Nal", |
| "middle": [], |
| "last": "Kalchbrenner", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1700--1709", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nal Kalchbrenner and Phil Blunsom. 2013. Recurrent continuous translation models. In Proceedings of the 2013 Conference on Empirical Methods in Nat- ural Language Processing, pages 1700-1709.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Learning convolutional feature hierarchies for visual recognition", |
| "authors": [ |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierre", |
| "middle": [], |
| "last": "Sermanet", |
| "suffix": "" |
| }, |
| { |
| "first": "Y-Lan", |
| "middle": [], |
| "last": "Boureau", |
| "suffix": "" |
| }, |
| { |
| "first": "Karol", |
| "middle": [], |
| "last": "Gregor", |
| "suffix": "" |
| }, |
| { |
| "first": "Micha\u00ebl", |
| "middle": [], |
| "last": "Mathieu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann L", |
| "middle": [], |
| "last": "Cun", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1090--1098", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Koray Kavukcuoglu, Pierre Sermanet, Y-Lan Boureau, Karol Gregor, Micha\u00ebl Mathieu, and Yann L Cun. 2010. Learning convolutional feature hierarchies for visual recognition. In Advances in neural informa- tion processing systems, pages 1090-1098.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Statistical significance tests for machine translation evaluation", |
| "authors": [ |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "388--395", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philipp Koehn. 2004. Statistical significance tests for machine translation evaluation. In Proceedings of EMNLP, pages 388-395.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Imagenet classification with deep convolutional neural networks", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoff", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "25", |
| "issue": "", |
| "pages": "1106--1114", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Krizhevsky, Ilya Sutskever, and Geoff Hinton. 2012. Imagenet classification with deep convolu- tional neural networks. In Advances in Neural Infor- mation Processing Systems 25, pages 1106-1114.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Recursive autoencoders for itg-based translation", |
| "authors": [ |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peng Li, Yang Liu, and Maosong Sun. 2013. Recur- sive autoencoders for itg-based translation. In Pro- ceedings of the Conference on Empirical Methods in Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Entropy-based pruning for phrasebased machine translation", |
| "authors": [ |
| { |
| "first": "Wang", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Joao", |
| "middle": [], |
| "last": "Gra\u00e7a", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabel", |
| "middle": [], |
| "last": "Trancoso", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "962--971", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang Ling, Joao Gra\u00e7a, Isabel Trancoso, and Alan Black. 2012. Entropy-based pruning for phrase- based machine translation. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 962-971.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Additive neural networks for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Lemao", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Taro", |
| "middle": [], |
| "last": "Watanabe", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiichiro", |
| "middle": [], |
| "last": "Sumita", |
| "suffix": "" |
| }, |
| { |
| "first": "Tiejun", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "791--801", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lemao Liu, Taro Watanabe, Eiichiro Sumita, and Tiejun Zhao. 2013. Additive neural networks for statistical machine translation. In 51st Annual Meet- ing of the Association for Computational Linguis- tics, pages 791-801.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Recurrent neural network based language model", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Karafi\u00e1t", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukas", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "IN-TERSPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "1045--1048", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Martin Karafi\u00e1t, Lukas Burget, Jan Cernock\u1ef3, and Sanjeev Khudanpur. 2010. Recur- rent neural network based language model. In IN- TERSPEECH, pages 1045-1048.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Cor- rado, and Jeffrey Dean. 2013. Distributed represen- tations of words and phrases and their composition- ality. In Proceedings of NIPS.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Learning continuous phrase representations and syntactic parsing with recursive neural networks", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew Y", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the NIPS-2010", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Christopher D Manning, and An- drew Y Ng. 2010. Learning continuous phrase representations and syntactic parsing with recursive neural networks. In Proceedings of the NIPS-2010", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Deep Learning and Unsupervised Feature Learning Workshop", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deep Learning and Unsupervised Feature Learning Workshop.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Semi-supervised recursive autoencoders for predicting sentiment distributions", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Eric", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "151--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Jeffrey Pennington, Eric H Huang, Andrew Y Ng, and Christopher D Manning. 2011. Semi-supervised recursive autoencoders for predict- ing sentiment distributions. In Proceedings of the Conference on Empirical Methods in Natural Lan- guage Processing, pages 151-161.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Parsing with compositional vector grammars", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew Y", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, John Bauer, Christopher D Manning, and Andrew Y Ng. 2013a. Parsing with composi- tional vector grammars. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Perelygin", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Jean", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chuang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Alex Perelygin, Jean Y Wu, Jason Chuang, Christopher D Manning, Andrew Y Ng, and Christopher Potts. 2013b. Recursive deep mod- els for semantic compositionality over a sentiment treebank. In Proceedings of the Conference on Em- pirical Methods in Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Complexity-based phrase-table filtering for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Nadi", |
| "middle": [], |
| "last": "Tomeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicola", |
| "middle": [], |
| "last": "Cancedda", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Dymetman", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of Summit XII", |
| "volume": "", |
| "issue": "", |
| "pages": "144--151", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nadi Tomeh, Nicola Cancedda, and Marc Dymetman. 2009. Complexity-based phrase-table filtering for statistical machine translation. In Proceedings of Summit XII, pages 144-151.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Decoding with largescale neural language models improves translation", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinggong", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Victoria", |
| "middle": [], |
| "last": "Fossum", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Chiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1387--1392", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Yinggong Zhao, Victoria Fossum, and David Chiang. 2013. Decoding with large- scale neural language models improves translation. In Proceedings of the 2013 Conference on Empiri- cal Methods in Natural Language Processing, pages 1387-1392.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Stochastic inversion transduction grammars and bilingual parsing of parallel corpora", |
| "authors": [ |
| { |
| "first": "Dekai", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Computational linguistics", |
| "volume": "23", |
| "issue": "3", |
| "pages": "377--403", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dekai Wu. 1997. Stochastic inversion transduction grammars and bilingual parsing of parallel corpora. Computational linguistics, 23(3):377-403.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Maximum entropy based phrase reordering model for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shouxun", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of ACL-COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "505--512", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deyi Xiong, Qun Liu, and Shouxun Lin. 2006. Maxi- mum entropy based phrase reordering model for sta- tistical machine translation. In Proceedings of ACL- COLING, pages 505-512.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Word alignment modeling with context dependent deep neural network", |
| "authors": [ |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shujie", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Nenghai", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nan Yang, Shujie Liu, Mu Li, Ming Zhou, and Neng- hai Yu. 2013. Word alignment modeling with con- text dependent deep neural network. In 51st Annual Meeting of the Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "A systematic comparison of phrase table pruning techniques", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Zens", |
| "suffix": "" |
| }, |
| { |
| "first": "Daisy", |
| "middle": [], |
| "last": "Stanton", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "972--983", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Zens, Daisy Stanton, and Peng Xu. 2012. A systematic comparison of phrase table pruning tech- niques. In Proceedings of the 2012 Joint Confer- ence on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 972-983.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Bilingual word embeddings for phrase-based machine translation", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Will", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Zou", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1393--1398", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Will Y Zou, Richard Socher, Daniel Cer, and Christo- pher D Manning. 2013. Bilingual word embeddings for phrase-based machine translation. In Proceed- ings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1393-1398.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "text": "means the transformation of p s is performed as follows: we first multiply a parameter matrix W l s by p s , and after adding a bias term b l s we apply an element-wise activation function f = tanh(\u2022).", |
| "num": null, |
| "type_str": "figure" |
| }, |
| "TABREF2": { |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "num": null, |
| "content": "<table><tr><td colspan=\"6\">Source Reconstruction Error</td><td colspan=\"2\">Target Reconstruction Error</td></tr><tr><td>W s</td><td colspan=\"2\">(2)</td><td>W s</td><td>(label)</td><td colspan=\"3\">Source Prediction Error</td><td>W t</td><td>(2)</td></tr><tr><td colspan=\"2\">W s</td><td>(1)</td><td/><td/><td/><td>W t</td><td>(label)</td><td>W t</td><td>(1)</td></tr></table>" |
| }, |
| "TABREF3": { |
| "html": null, |
| "type_str": "table", |
| "text": "35% in BRAE and 36% in Significance), the BRAE-based algorithm outperforms the Significance algorithm on all the test sets except for MT04. It indicates that our BRAE model is a good alternative for phrase table pruning. Furthermore, our model is much more in-", |
| "num": null, |
| "content": "<table><tr><td>Method</td><td colspan=\"8\">Threshold PhraseTable MT03 MT04 MT05 MT06 MT08 ALL</td></tr><tr><td>Baseline</td><td/><td>100%</td><td>35.81</td><td>36.91</td><td>34.69</td><td>33.83</td><td>27.17</td><td>34.82</td></tr><tr><td/><td>0.4</td><td>52%</td><td>35.94</td><td>36.96</td><td>35.00</td><td>34.71</td><td>27.77</td><td>35.16</td></tr><tr><td/><td>0.5</td><td>44%</td><td>35.67</td><td>36.59</td><td>34.86</td><td>33.91</td><td>27.25</td><td>34.89</td></tr><tr><td>BRAE</td><td>0.6</td><td>35%</td><td>35.86</td><td>36.71</td><td>34.93</td><td>34.63</td><td>27.34</td><td>35.05</td></tr><tr><td/><td>0.7</td><td>28%</td><td>35.55</td><td>36.62</td><td>34.57</td><td>33.97</td><td>27.10</td><td>34.76</td></tr><tr><td/><td>0.8</td><td>20%</td><td>35.06</td><td>36.01</td><td>34.13</td><td>33.04</td><td>26.66</td><td>34.04</td></tr><tr><td/><td>8</td><td>48%</td><td>35.86</td><td>36.99</td><td>34.74</td><td>34.53</td><td>27.59</td><td>35.13</td></tr><tr><td>Significance</td><td>12 16</td><td>36% 25%</td><td>35.59 35.19</td><td>36.73 36.24</td><td>34.65 34.26</td><td>34.17 33.32</td><td>27.16 26.55</td><td>34.72 34.09</td></tr><tr><td/><td>20</td><td>18%</td><td>35.05</td><td>36.09</td><td>34.02</td><td>32.98</td><td>26.37</td><td>33.97</td></tr></table>" |
| }, |
| "TABREF4": { |
| "html": null, |
| "type_str": "table", |
| "text": "Comparison between BRAE-based pruning and Significance pruning of phrase table. Threshold means similarity in BRAE and negative-log-p-value in Significance. \"ALL\" combines the development and test sets. Bold numbers denote that the result is better than or comparable to that of baseline. n = 50 is used for embedding dimensionality.", |
| "num": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |