| { |
| "paper_id": "P16-1004", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:58:34.755326Z" |
| }, |
| "title": "Language to Logical Form with Neural Attention", |
| "authors": [ |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "li.dong@ed.ac.uk" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Semantic parsing aims at mapping natural language to machine interpretable meaning representations. Traditional approaches rely on high-quality lexicons, manually-built templates, and linguistic features which are either domainor representation-specific. In this paper we present a general method based on an attention-enhanced encoder-decoder model. We encode input utterances into vector representations, and generate their logical forms by conditioning the output sequences or trees on the encoding vectors. Experimental results on four datasets show that our approach performs competitively without using hand-engineered features and is easy to adapt across domains and meaning representations.", |
| "pdf_parse": { |
| "paper_id": "P16-1004", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Semantic parsing aims at mapping natural language to machine interpretable meaning representations. Traditional approaches rely on high-quality lexicons, manually-built templates, and linguistic features which are either domainor representation-specific. In this paper we present a general method based on an attention-enhanced encoder-decoder model. We encode input utterances into vector representations, and generate their logical forms by conditioning the output sequences or trees on the encoding vectors. Experimental results on four datasets show that our approach performs competitively without using hand-engineered features and is easy to adapt across domains and meaning representations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Semantic parsing is the task of translating text to a formal meaning representation such as logical forms or structured queries. There has recently been a surge of interest in developing machine learning methods for semantic parsing (see the references in Section 2), due in part to the existence of corpora containing utterances annotated with formal meaning representations. Figure 1 shows an example of a question (left handside) and its annotated logical form (right handside), taken from JOBS (Tang and Mooney, 2001 ), a well-known semantic parsing benchmark. In order to predict the correct logical form for a given utterance, most previous systems rely on predefined templates and manually designed features, which often render the parsing model domain-or representation-specific. In this work, we aim to use a simple yet effective method to bridge the gap between natural language and logical form with minimal domain knowledge. Encoder-decoder architectures based on recurrent neural networks have been successfully applied to a variety of NLP tasks ranging from syntactic parsing , to machine translation (Kalchbrenner and Blunsom, 2013; Cho et al., 2014; Sutskever et al., 2014) , and image description generation (Karpathy and Fei-Fei, 2015; Vinyals et al., 2015b) . As shown in Figure 1 , we adapt the general encoder-decoder paradigm to the semantic parsing task. Our model learns from natural language descriptions paired with meaning representations; it encodes sentences and decodes logical forms using recurrent neural networks with long short-term memory (LSTM) units. We present two model variants, the first one treats semantic parsing as a vanilla sequence transduction task, whereas our second model is equipped with a hierarchical tree decoder which explicitly captures the compositional structure of logical forms. We also introduce an attention mechanism (Bahdanau et al., 2015; Luong et al., 2015b) allowing the model to learn soft alignments between natural language and logical forms and present an argument identification step to handle rare mentions of entities and numbers.", |
| "cite_spans": [ |
| { |
| "start": 498, |
| "end": 520, |
| "text": "(Tang and Mooney, 2001", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 1115, |
| "end": 1147, |
| "text": "(Kalchbrenner and Blunsom, 2013;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1148, |
| "end": 1165, |
| "text": "Cho et al., 2014;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1166, |
| "end": 1189, |
| "text": "Sutskever et al., 2014)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 1225, |
| "end": 1253, |
| "text": "(Karpathy and Fei-Fei, 2015;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1254, |
| "end": 1276, |
| "text": "Vinyals et al., 2015b)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 1881, |
| "end": 1904, |
| "text": "(Bahdanau et al., 2015;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1905, |
| "end": 1925, |
| "text": "Luong et al., 2015b)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 377, |
| "end": 385, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 1291, |
| "end": 1299, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Evaluation results demonstrate that compared to previous methods our model achieves similar or better performance across datasets and meaning representations, despite using no hand-engineered domain-or representation-specific features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our work synthesizes two strands of research, namely semantic parsing and the encoder-decoder architecture with neural networks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The problem of learning semantic parsers has received significant attention, dating back to Woods (1973) . Many approaches learn from sentences paired with logical forms following various modeling strategies. Examples include the use of parsing models (Miller et al., 1996; Ge and Mooney, 2005; Lu et al., 2008; Zhao and Huang, 2015) , inductive logic programming (Zelle and Mooney, 1996; Tang and Mooney, 2000; Thomspon and Mooney, 2003) , probabilistic automata (He and Young, 2006) , string/tree-to-tree transformation rules (Kate et al., 2005) , classifiers based on string kernels (Kate and Mooney, 2006) , machine translation (Wong and Mooney, 2006; Wong and Mooney, 2007; Andreas et al., 2013) , and combinatory categorial grammar induction techniques (Zettlemoyer and Collins, 2005; Zettlemoyer and Collins, 2007; Kwiatkowski et al., 2010; Kwiatkowski et al., 2011) . Other work learns semantic parsers without relying on logicalfrom annotations, e.g., from sentences paired with conversational logs (Artzi and Zettlemoyer, 2011) , system demonstrations (Chen and Mooney, 2011; Goldwasser and Roth, 2011; , question-answer pairs (Clarke et al., 2010; Liang et al., 2013) , and distant supervision (Krishnamurthy and Mitchell, 2012; Cai and Yates, 2013; Reddy et al., 2014) .", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 104, |
| "text": "Woods (1973)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 252, |
| "end": 273, |
| "text": "(Miller et al., 1996;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 274, |
| "end": 294, |
| "text": "Ge and Mooney, 2005;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 295, |
| "end": 311, |
| "text": "Lu et al., 2008;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 312, |
| "end": 333, |
| "text": "Zhao and Huang, 2015)", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 364, |
| "end": 388, |
| "text": "(Zelle and Mooney, 1996;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 389, |
| "end": 411, |
| "text": "Tang and Mooney, 2000;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 412, |
| "end": 438, |
| "text": "Thomspon and Mooney, 2003)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 464, |
| "end": 484, |
| "text": "(He and Young, 2006)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 528, |
| "end": 547, |
| "text": "(Kate et al., 2005)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 586, |
| "end": 609, |
| "text": "(Kate and Mooney, 2006)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 632, |
| "end": 655, |
| "text": "(Wong and Mooney, 2006;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 656, |
| "end": 678, |
| "text": "Wong and Mooney, 2007;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 679, |
| "end": 700, |
| "text": "Andreas et al., 2013)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 759, |
| "end": 790, |
| "text": "(Zettlemoyer and Collins, 2005;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 791, |
| "end": 821, |
| "text": "Zettlemoyer and Collins, 2007;", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 822, |
| "end": 847, |
| "text": "Kwiatkowski et al., 2010;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 848, |
| "end": 873, |
| "text": "Kwiatkowski et al., 2011)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1008, |
| "end": 1037, |
| "text": "(Artzi and Zettlemoyer, 2011)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1086, |
| "end": 1112, |
| "text": "Goldwasser and Roth, 2011;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1137, |
| "end": 1158, |
| "text": "(Clarke et al., 2010;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1159, |
| "end": 1178, |
| "text": "Liang et al., 2013)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1205, |
| "end": 1239, |
| "text": "(Krishnamurthy and Mitchell, 2012;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1240, |
| "end": 1260, |
| "text": "Cai and Yates, 2013;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1261, |
| "end": 1280, |
| "text": "Reddy et al., 2014)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our model learns from natural language descriptions paired with meaning representations. Most previous systems rely on high-quality lexicons, manually-built templates, and features which are either domain-or representationspecific. We instead present a general method that can be easily adapted to different domains and meaning representations. We adopt the general encoder-decoder framework based on neural networks which has been recently repurposed for various NLP tasks such as syntactic parsing , machine translation (Kalchbrenner and Blunsom, 2013; Cho et al., 2014; Sutskever et al., 2014) , image description generation (Karpathy and Fei-Fei, 2015; Vinyals et al., 2015b) , question answering (Hermann et al., 2015) , and summarization (Rush et al., 2015) . Mei et al. (2016) use a sequence-to-sequence model to map navigational instructions to actions.", |
| "cite_spans": [ |
| { |
| "start": 522, |
| "end": 554, |
| "text": "(Kalchbrenner and Blunsom, 2013;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 555, |
| "end": 572, |
| "text": "Cho et al., 2014;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 573, |
| "end": 596, |
| "text": "Sutskever et al., 2014)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 628, |
| "end": 656, |
| "text": "(Karpathy and Fei-Fei, 2015;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 657, |
| "end": 679, |
| "text": "Vinyals et al., 2015b)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 701, |
| "end": 723, |
| "text": "(Hermann et al., 2015)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 744, |
| "end": 763, |
| "text": "(Rush et al., 2015)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 766, |
| "end": 783, |
| "text": "Mei et al. (2016)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our model works on more well-defined meaning representations (such as Prolog and lambda calculus) and is conceptually simpler; it does not employ bidirectionality or multi-level alignments. Grefenstette et al. (2014) propose a different architecture for semantic parsing based on the combination of two neural network models. The first model learns shared representations from pairs of questions and their translations into knowledge base queries, whereas the second model generates the queries conditioned on the learned representations. However, they do not report empirical evaluation results.", |
| "cite_spans": [ |
| { |
| "start": 190, |
| "end": 216, |
| "text": "Grefenstette et al. (2014)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our aim is to learn a model which maps natural language input", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "q = x 1 \u2022 \u2022 \u2022 x |q| to a logical form representation of its meaning a = y 1 \u2022 \u2022 \u2022 y |a| .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The conditional probability p (a|q) is decomposed as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p (a|q) = |a| t=1 p (y t |y <t , q)", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Problem Formulation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "y <t = y 1 \u2022 \u2022 \u2022 y t\u22121 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our method consists of an encoder which encodes natural language input q into a vector representation and a decoder which learns to generate y 1 , \u2022 \u2022 \u2022 , y |a| conditioned on the encoding vector. In the following we describe two models varying in the way in which p (a|q) is computed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "This model regards both input q and output a as sequences. As shown in Figure 2 , the encoder and decoder are two different L-layer recurrent neural networks with long short-term memory (LSTM) units which recursively process tokens one by one. The first |q| time steps belong to the encoder, while the following |a| time steps belong to the decoder. Let h l t \u2208 R n denote the hidden vector at time step t and layer l. h l t is then computed by:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 71, |
| "end": 79, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sequence-to-Sequence Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h l t = LSTM h l t\u22121 , h l\u22121 t", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Sequence-to-Sequence Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where LSTM refers to the LSTM function being used. In our experiments we follow the architecture described in Zaremba et al. (2015) , however other types of gated activation functions are possible (e.g., Cho et al. (2014) ). For the encoder, h 0 t = W q e(x t ) is the word vector of the current input token, with W q \u2208 R n\u00d7|Vq| being a parameter matrix, and e(\u2022) the index of the corresponding Figure 2 : Sequence-to-sequence (SEQ2SEQ) model with two-layer recurrent neural networks.", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 131, |
| "text": "Zaremba et al. (2015)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 204, |
| "end": 221, |
| "text": "Cho et al. (2014)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 395, |
| "end": 403, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sequence-to-Sequence Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "LSTM LSTM LSTM LSTM LSTM LSTM LSTM LSTM LSTM LSTM LSTM LSTM", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence-to-Sequence Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "token. For the decoder, h 0 t = W a e(y t\u22121 ) is the word vector of the previous predicted word, where W a \u2208 R n\u00d7|Va| . Notice that the encoder and decoder have different LSTM parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence-to-Sequence Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Once the tokens of the input sequence x 1 , \u2022 \u2022 \u2022 , x |q| are encoded into vectors, they are used to initialize the hidden states of the first time step in the decoder. Next, the hidden vector of the topmost LSTM h L t in the decoder is used to predict the t-th output token as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence-to-Sequence Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "p (y t |y <t , q) = softmax W o h L t e (y t ) (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence-to-Sequence Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where W o \u2208 R |Va|\u00d7n is a parameter matrix, and e (y t ) \u2208 {0, 1} |Va| a one-hot vector for computing y t 's probability from the predicted distribution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence-to-Sequence Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We augment every sequence with a \"start-ofsequence\" <s> and \"end-of-sequence\" </s> token. The generation process terminates once </s> is predicted. The conditional probability of generating the whole sequence p (a|q) is then obtained using Equation (1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence-to-Sequence Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The SEQ2SEQ model has a potential drawback in that it ignores the hierarchical structure of logical forms. As a result, it needs to memorize various pieces of auxiliary information (e.g., bracket pairs) to generate well-formed output. In the following we present a hierarchical tree decoder which is more faithful to the compositional nature of meaning representations. A schematic description of the model is shown in Figure 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 419, |
| "end": 427, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sequence-to-Tree Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The present model shares the same encoder with the sequence-to-sequence model described in Section 3.1 (essentially it learns to encode input q as vectors). However, its decoder is fundamentally different as it generates logical forms in a topdown manner. In order to represent tree structure, Figure 3 : Sequence-to-tree (SEQ2TREE) model with a hierarchical tree decoder.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 294, |
| "end": 302, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sequence-to-Tree Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "we define a \"nonterminal\" <n> token which indicates subtrees. As shown in Figure 3 , we preprocess the logical form \"lambda $0 e (and (>(departure time $0) 1600:ti) (from $0 dallas:ci))\" to a tree by replacing tokens between pairs of brackets with nonterminals. Special tokens <s> and <(> denote the beginning of a sequence and nonterminal sequence, respectively (omitted from Figure 3 due to lack of space). Token </s> represents the end of sequence. After encoding input q, the hierarchical tree decoder uses recurrent neural networks to generate tokens at depth 1 of the subtree corresponding to parts of logical form a. If the predicted token is <n>, we decode the sequence by conditioning on the nonterminal's hidden vector. This process terminates when no more nonterminals are emitted. In other words, a sequence decoder is used to hierarchically generate the tree structure.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 74, |
| "end": 82, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 377, |
| "end": 385, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sequence-to-Tree Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In contrast to the sequence decoder described in Section 3.1, the current hidden state does not only depend on its previous time step. In order to better utilize the parent nonterminal's information, we introduce a parent-feeding connection where the hidden vector of the parent nonterminal is concatenated with the inputs and fed into LSTM.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence-to-Tree Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "As an example, Figure 4 shows the decoding tree corresponding to the logical form \"A B (C)\", where y 1 \u2022 \u2022 \u2022 y 6 are predicted tokens, and t 1 \u2022 \u2022 \u2022 t 6 denote different time steps. Span \"(C)\" corresponds to a subtree. Decoding in this example has two steps: once input q has been encoded, we first generate Figure 4 : A SEQ2TREE decoding example for the logical form \"A B (C)\".", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 15, |
| "end": 23, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 308, |
| "end": 316, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sequence-to-Tree Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "y 1 \u2022 \u2022 \u2022 y 4 at depth 1 until token </s> is t 1 t 2 t 3 t 4 t 5 t 6 y 1 =A y 3 =<n> <s> q y 6 =</s> <(> y 4 =</s> y 2 =B y 5 =C", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence-to-Tree Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "predicted; next, we generate y 5 , y 6 by conditioning on nonterminal t 3 's hidden vectors. The probability p (a|q) is the product of these two sequence decoding steps:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence-to-Tree Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "p (a|q) = p (y 1 y 2 y 3 y 4 |q) p (y 5 y 6 |y \u22643 , q) (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence-to-Tree Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where Equation 3is used for the prediction of each output token.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence-to-Tree Model", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "As shown in Equation 3, the hidden vectors of the input sequence are not directly used in the decoding process. However, it makes intuitively sense to consider relevant information from the input to better predict the current token. Following this idea, various techniques have been proposed to integrate encoder-side information (in the form of a context vector) at each time step of the decoder (Bahdanau et al., 2015; Luong et al., 2015b; Xu et al., 2015) .", |
| "cite_spans": [ |
| { |
| "start": 397, |
| "end": 420, |
| "text": "(Bahdanau et al., 2015;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 421, |
| "end": 441, |
| "text": "Luong et al., 2015b;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 442, |
| "end": 458, |
| "text": "Xu et al., 2015)", |
| "ref_id": "BIBREF47" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Mechanism", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "As shown in Figure 5 , in order to find relevant encoder-side context for the current hidden state h L t of decoder, we compute its attention score with the k-th hidden state in the encoder as:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Attention Mechanism", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "s t k = exp{h L k \u2022 h L t } |q| j=1 exp{h L j \u2022 h L t }", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Attention Mechanism", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Mechanism", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "h L 1 , \u2022 \u2022 \u2022 , h L", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Mechanism", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "|q| are the top-layer hidden vectors of the encoder. Then, the context vector is the weighted sum of the hidden vectors in the encoder:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Mechanism", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "c t = |q| k=1 s t k h L k", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Attention Mechanism", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In lieu of Equation 3, we further use this context vector which acts as a summary of the encoder to compute the probability of generating y t as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Mechanism", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "h att t = tanh W 1 h L t + W 2 c t (7) LSTM LSTM LSTM LSTM LSTM", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Mechanism", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Attention Scores Figure 5 : Attention scores are computed by the current hidden vector and all the hidden vectors of encoder. Then, the encoder-side context vector c t is obtained in the form of a weighted sum, which is further used to predict y t .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 17, |
| "end": 25, |
| "text": "Figure 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Attention Mechanism", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "p (y t |y <t , q) = softmax W o h att t e (y t ) (8)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Mechanism", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where W o \u2208 R |Va|\u00d7n and W 1 , W 2 \u2208 R n\u00d7n are three parameter matrices, and e (y t ) is a one-hot vector used to obtain y t 's probability.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Mechanism", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Our goal is to maximize the likelihood of the generated logical forms given natural language utterances as input. So the objective function is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "minimize \u2212 (q,a)\u2208D log p (a|q)", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Model Training", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where D is the set of all natural language-logical form training pairs, and p (a|q) is computed as shown in Equation (1). The RMSProp algorithm (Tieleman and Hinton, 2012) is employed to solve this non-convex optimization problem. Moreover, dropout is used for regularizing the model (Zaremba et al., 2015) . Specifically, dropout operators are used between different LSTM layers and for the hidden layers before the softmax classifiers. This technique can substantially reduce overfitting, especially on datasets of small size.", |
| "cite_spans": [ |
| { |
| "start": 284, |
| "end": 306, |
| "text": "(Zaremba et al., 2015)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "At test time, we predict the logical form for an input utterance q by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "a = arg max a p a |q (10)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "where a represents a candidate output. However, it is impractical to iterate over all possible results to obtain the optimal prediction. According to Equation (1), we decompose the probability p (a|q) so that we can use greedy search (or beam search) to generate tokens one by one.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Algorithm 1 describes the decoding process for SEQ2TREE. The time complexity of both decoders is O(|a|), where |a| is the length of output. The extra computation of SEQ2TREE compared with SEQ2SEQ is to maintain the nonterminal queue, which can be ignored because most of time is spent on matrix operations. We implement the hierarchical tree decoder in a batch mode, so that it can fully utilize GPUs. Specifically, as shown in Algorithm 1, every time we pop multiple nonterminals from the queue and decode these nonterminals in one batch.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "The majority of semantic parsing datasets have been developed with question-answering in mind. In the typical application setting, natural language questions are mapped into logical forms and executed on a knowledge base to obtain an answer. Due to the nature of the question-answering task, many natural language utterances contain entities or numbers that are often parsed as arguments in the logical form. Some of them are unavoidably rare or do not appear in the training set at all (this is especially true for small-scale datasets). Conventional sequence encoders simply replace rare words with a special unknown word symbol (Luong et al., 2015a; Jean et al., 2015) , which would be detrimental for semantic parsing.", |
| "cite_spans": [ |
| { |
| "start": 631, |
| "end": 652, |
| "text": "(Luong et al., 2015a;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 653, |
| "end": 671, |
| "text": "Jean et al., 2015)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument Identification", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "We have developed a simple procedure for argument identification. Specifically, we identify entities and numbers in input questions and replace them with their type names and unique IDs. For instance, we pre-process the training example \"jobs with a salary of 40000\" and its logical form \"job(ANS), salary greater than(ANS, 40000, year)\" as \"jobs with a salary of num 0 \" and \"job(ANS), salary greater than(ANS, num 0 , year)\". We use the pre-processed examples as training data. At inference time, we also mask entities and numbers with their types and IDs. Once we obtain the decoding result, a post-processing step recovers all the markers type i to their corresponding logical constants.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument Identification", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "We compare our method against multiple previous systems on four datasets. We describe these datasets below, and present our experimental settings and results. Finally, we conduct model analysis in order to understand what the model learns. The code is available at https://github. com/donglixp/lang2logic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Our model was trained on the following datasets, covering different domains and using different meaning representations. Examples for each domain are shown in Table 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 159, |
| "end": 166, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "JOBS This benchmark dataset contains 640 queries to a database of job listings. Specifically, questions are paired with Prolog-style queries. We used the same training-test split as Zettlemoyer and Collins (2005) which contains 500 training and 140 test instances. Values for the variables company, degree, language, platform, location, job area, and number are identified.", |
| "cite_spans": [ |
| { |
| "start": 182, |
| "end": 212, |
| "text": "Zettlemoyer and Collins (2005)", |
| "ref_id": "BIBREF50" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "GEO This is a standard semantic parsing benchmark which contains 880 queries to a database of U.S. geography. GEO has 880 instances split into a training set of 680 training examples and 200 test examples (Zettlemoyer and Collins, 2005) . We used the same meaning representation based on lambda-calculus as Kwiatkowski et al. (2011) . Values for the variables city, state, country, river, and number are identified. what is the population of the state with the largest area? (population:i (argmax $0 (state:t $0) (area:i $0)))", |
| "cite_spans": [ |
| { |
| "start": 205, |
| "end": 236, |
| "text": "(Zettlemoyer and Collins, 2005)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 307, |
| "end": 332, |
| "text": "Kwiatkowski et al. (2011)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "11.10 28.10 dallas to san francisco leaving after 4 in the afternoon please (lambda $0 e (and (>(departure time $0) 1600:ti) (from $0 dallas:ci) (to $0 san francisco:ci))) IFTTT 6.95 21.80", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ATIS", |
| "sec_num": null |
| }, |
| { |
| "text": "Turn on heater when temperature drops below 58 degree TRIGGER: Weather -Current temperature drops below -((Temperature (58)) (Degrees in (f))) ACTION: WeMo Insight Switch -Turn on -((Which switch? (\"\"))) Table 1 : Examples of natural language descriptions and their meaning representations from four datasets. The average length of input and output sequences is shown in the second column.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 204, |
| "end": 211, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "ATIS", |
| "sec_num": null |
| }, |
| { |
| "text": "recipes from the IFTTT website 1 . Recipes are simple programs with exactly one trigger and one action which users specify on the site. Whenever the conditions of the trigger are satisfied, the action is performed. Actions typically revolve around home security (e.g., \"turn on my lights when I arrive home\"), automation (e.g., \"text me if the door opens\"), well-being (e.g., \"remind me to drink water if I've been at a bar for more than two hours\"), and so on. Triggers and actions are selected from different channels (160 in total) representing various types of services, devices (e.g., Android), and knowledge sources (such as ESPN or Gmail). In the dataset, there are 552 trigger functions from 128 channels, and 229 action functions from 99 channels. We used Quirk et al.'s (2015) original split which contains 77, 495 training, 5, 171 development, and 4, 294 test examples. The IFTTT programs are represented as abstract syntax trees and are paired with natural language descriptions provided by users (see Table 1 ). Here, numbers and URLs are identified.", |
| "cite_spans": [ |
| { |
| "start": 765, |
| "end": 786, |
| "text": "Quirk et al.'s (2015)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1014, |
| "end": 1021, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "ATIS", |
| "sec_num": null |
| }, |
| { |
| "text": "Natural language sentences were lowercased; misspellings were corrected using a dictionary based on the Wikipedia list of common misspellings. Words were stemmed using NLTK (Bird et al., 2009) . For IFTTT, we filtered tokens, channels and functions which appeared less than five times in the training set. For the other datasets, we filtered input words which did not occur at least two times in the training set, but kept all tokens in the logical forms. Plain string matching was employed to identify augments as described in Section 3.6. More sophisticated approaches could be used, however we leave this future work.", |
| "cite_spans": [ |
| { |
| "start": 173, |
| "end": 192, |
| "text": "(Bird et al., 2009)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Model hyper-parameters were cross-validated 1 http://www.ifttt.com Method Accuracy COCKTAIL (Tang and Mooney, 2001) 79.4 PRECISE (Popescu et al., 2003) 88.0 ZC05 (Zettlemoyer and Collins, 2005) 79.3 DCS+L (Liang et al., 2013) 90.7 TISP (Zhao and Huang, 2015) 85 on the training set for JOBS and GEO. We used the standard development sets for ATIS and IFTTT. We used the RMSProp algorithm (with batch size set to 20) to update the parameters. The smoothing constant of RMSProp was 0.95. Gradients were clipped at 5 to alleviate the exploding gradient problem (Pascanu et al., 2013) . Parameters were randomly initialized from a uniform distribution U (\u22120.08, 0.08). A two-layer LSTM was used for IFTTT, while a one-layer LSTM was employed for the other domains. The dropout rate was selected from {0.2, 0.3, 0.4, 0.5}. Dimensions of hidden vector and word embedding were selected from {150, 200, 250}. Early stopping was used to determine the number of epochs. Input sentences were reversed before feeding into the encoder (Sutskever et al., 2014) . We use greedy search to generate logical forms during inference. Notice that two decoders with shared word embeddings were used to predict triggers and actions for IFTTT, and two softmax classifiers are used to classify channels and functions.", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 115, |
| "text": "(Tang and Mooney, 2001)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 129, |
| "end": 151, |
| "text": "(Popescu et al., 2003)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 162, |
| "end": 193, |
| "text": "(Zettlemoyer and Collins, 2005)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 205, |
| "end": 225, |
| "text": "(Liang et al., 2013)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 236, |
| "end": 258, |
| "text": "(Zhao and Huang, 2015)", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 558, |
| "end": 580, |
| "text": "(Pascanu et al., 2013)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1022, |
| "end": 1046, |
| "text": "(Sutskever et al., 2014)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Settings", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We first discuss the performance of our model on JOBS, GEO, and ATIS, and then examine our results on IFTTT. Tables 2-4 present comparisons against a variety of systems previously described Method Accuracy SCISSOR (Ge and Mooney, 2005) 72.3 KRISP (Kate and Mooney, 2006) 71.7 WASP (Wong and Mooney, 2006) 74.8 \u03bb-WASP (Wong and Mooney, 2007) 86.6 LNLZ08 (Lu et al., 2008) 81.8 ZC05 (Zettlemoyer and Collins, 2005) 79.3 ZC07 (Zettlemoyer and Collins, 2007) 86.1 UBL (Kwiatkowski et al., 2010) 87.9 FUBL (Kwiatkowski et al., 2011) 88.6 KCAZ13 (Kwiatkowski et al., 2013) 89.0 DCS+L (Liang et al., 2013) 87.9 TISP (Zhao and Huang, 2015) 88.9 SEQ2SEQ 84.6 \u2212 attention 72.9 \u2212 argument 68.6 SEQ2TREE 87.1 \u2212 attention 76.8 Table 3 : Evaluation results on GEO. 10-fold crossvalidation is used for the systems shown in the top half of the table. The standard split of ZC05 is used for all other systems.", |
| "cite_spans": [ |
| { |
| "start": 214, |
| "end": 235, |
| "text": "(Ge and Mooney, 2005)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 247, |
| "end": 270, |
| "text": "(Kate and Mooney, 2006)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 281, |
| "end": 304, |
| "text": "(Wong and Mooney, 2006)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 317, |
| "end": 340, |
| "text": "(Wong and Mooney, 2007)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 353, |
| "end": 370, |
| "text": "(Lu et al., 2008)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 381, |
| "end": 412, |
| "text": "(Zettlemoyer and Collins, 2005)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 423, |
| "end": 454, |
| "text": "(Zettlemoyer and Collins, 2007)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 464, |
| "end": 490, |
| "text": "(Kwiatkowski et al., 2010)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 501, |
| "end": 527, |
| "text": "(Kwiatkowski et al., 2011)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 540, |
| "end": 566, |
| "text": "(Kwiatkowski et al., 2013)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 578, |
| "end": 598, |
| "text": "(Liang et al., 2013)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 609, |
| "end": 631, |
| "text": "(Zhao and Huang, 2015)", |
| "ref_id": "BIBREF52" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 714, |
| "end": 721, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Method Accuracy ZC07 (Zettlemoyer and Collins, 2007) 84.6 UBL (Kwiatkowski et al., 2010) 71.4 FUBL (Kwiatkowski et al., 2011) 82.8 GUSP-FULL (Poon, 2013) 74.8 GUSP++ (Poon, 2013) 83.5 TISP (Zhao and Huang, 2015) 84. in the literature. We report results with the full models (SEQ2SEQ, SEQ2TREE) and two ablation variants, i.e., without an attention mechanism (\u2212attention) and without argument identification (\u2212argument). We report accuracy which is defined as the proportion of the input sentences that are correctly parsed to their gold standard logical forms. Notice that DCS+L, KCAZ13 and GUSP output answers directly, so accuracy in this setting is defined as the percentage of correct answers. Overall, SEQ2TREE is superior to SEQ2SEQ. This is to be expected since SEQ2TREE explicitly models compositional structure. On the JOBS and GEO datasets which contain logical forms with nested structures, SEQ2TREE outperforms SEQ2SEQ by 2.9% and 2.5%, respectively. SEQ2TREE achieves better accuracy over SEQ2SEQ on ATIS too, however, the difference is smaller, since ATIS is a simpler domain without complex nested structures. We find that adding at- Figure 6 . Moreover, our results show that argument identification is critical for smallscale datasets. For example, about 92% of city names appear less than 4 times in the GEO training set, so it is difficult to learn reliable parameters for these words. In relation to previous work, the proposed models achieve comparable or better performance. Importantly, we use the same framework (SEQ2SEQ or SEQ2TREE) across datasets and meaning representations (Prolog-style logical forms in JOBS and lambda calculus in the other two datasets) without modification. Despite this relatively simple approach, we observe that SEQ2TREE ranks second on JOBS, and is tied for first place with ZC07 on ATIS. Figure 6 : Alignments (same color rectangles) produced by the attention mechanism (darker color represents higher attention score). Input sentences are reversed and stemmed. Model output is shown for SEQ2SEQ (a, b) and SEQ2TREE (c, d).", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 52, |
| "text": "(Zettlemoyer and Collins, 2007)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 62, |
| "end": 88, |
| "text": "(Kwiatkowski et al., 2010)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 99, |
| "end": 125, |
| "text": "(Kwiatkowski et al., 2011)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 141, |
| "end": 153, |
| "text": "(Poon, 2013)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 166, |
| "end": 178, |
| "text": "(Poon, 2013)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 189, |
| "end": 211, |
| "text": "(Zhao and Huang, 2015)", |
| "ref_id": "BIBREF52" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1149, |
| "end": 1157, |
| "text": "Figure 6", |
| "ref_id": null |
| }, |
| { |
| "start": 1842, |
| "end": 1850, |
| "text": "Figure 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We illustrate examples of alignments produced by SEQ2SEQ in Figures 6a and 6b . Alignments produced by SEQ2TREE are shown in Figures 6c and 6d . Matrices of attention scores are computed using Equation (5) and are represented in grayscale. Aligned input words and logical form predicates are enclosed in (same color) rectangles whose overlapping areas contain the attention scores. Also notice that attention scores are computed by LSTM hidden vectors which encode context information rather than just the words in their current positions. The examples demonstrate that the attention mechanism can successfully model the correspondence between sentences and logical forms, capturing reordering (Figure 6b ), manyto-many ( Figure 6a) , and many-to-one alignments (Figures 6c,d ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 60, |
| "end": 77, |
| "text": "Figures 6a and 6b", |
| "ref_id": null |
| }, |
| { |
| "start": 125, |
| "end": 143, |
| "text": "Figures 6c and 6d", |
| "ref_id": null |
| }, |
| { |
| "start": 695, |
| "end": 705, |
| "text": "(Figure 6b", |
| "ref_id": null |
| }, |
| { |
| "start": 723, |
| "end": 733, |
| "text": "Figure 6a)", |
| "ref_id": null |
| }, |
| { |
| "start": 763, |
| "end": 776, |
| "text": "(Figures 6c,d", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "For IFTTT, we follow the same evaluation protocol introduced in Quirk et al. (2015) . The dataset is extremely noisy and measuring accuracy is problematic since predicted abstract syntax trees (ASTs) almost never exactly match the gold standard. Quirk et al. view an AST as a set of productions and compute balanced F1 instead which we also adopt. The first column in Table 5 shows the percentage of channels selected correctly for both triggers and actions. The second column measures accuracy for both channels and functions. The last column shows balanced F1 against the gold tree over all productions in the proposed derivation. We compare our model against posclass, the method introduced in Quirk et al. and several of their baselines. posclass is reminiscent of KRISP (Kate and Mooney, 2006) , it learns distributions over productions given input sentences represented as a bag of linguistic features. The retrieval baseline finds the closest description in the training data based on character string-edit-distance and returns the recipe for that training program. The phrasal method uses phrase-based machine translation to generate the recipe, whereas sync extracts synchronous grammar rules from the data, essentially recreating WASP (Wong and Mooney, 2006) . Finally, they use a binary classifier to predict whether a production should be present in the derivation tree corresponding to the description. Quirk et al. (2015) report results on the full test data and smaller subsets after noise filtering, e.g., when non-English and unintelligible descriptions are removed (Tables 5a and 5b) . They also ran their system on a high-quality subset of description-program pairs which were found in the gold standard and at least three humans managed to independently reproduce (Table 5c ). Across all subsets our models outperforms posclass and related baselines. Again we observe that SEQ2TREE consistently outperforms SEQ2SEQ, albeit with a small margin. Compared to the previous datasets, the attention mechanism and our argument iden-tification method yield less of an improvement. This may be due to the size of Quirk et al. (2015) and the way it was created -user curated descriptions are often of low quality, and thus align very loosely to their corresponding ASTs.", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 83, |
| "text": "Quirk et al. (2015)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 769, |
| "end": 798, |
| "text": "KRISP (Kate and Mooney, 2006)", |
| "ref_id": null |
| }, |
| { |
| "start": 1245, |
| "end": 1268, |
| "text": "(Wong and Mooney, 2006)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 1416, |
| "end": 1435, |
| "text": "Quirk et al. (2015)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 2124, |
| "end": 2143, |
| "text": "Quirk et al. (2015)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 368, |
| "end": 375, |
| "text": "Table 5", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 1583, |
| "end": 1601, |
| "text": "(Tables 5a and 5b)", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 1784, |
| "end": 1793, |
| "text": "(Table 5c", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Finally, we inspected the output of our model in order to identify the most common causes of errors which we summarize below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Under-Mapping The attention model used in our experiments does not take the alignment history into consideration. So, some question words, expecially in longer questions, may be ignored in the decoding process. This is a common problem for encoder-decoder models and can be addressed by explicitly modelling the decoding coverage of the source words (Tu et al., 2016; Cohn et al., 2016) . Keeping track of the attention history would help adjust future attention and guide the decoder towards untranslated source words.", |
| "cite_spans": [ |
| { |
| "start": 350, |
| "end": 367, |
| "text": "(Tu et al., 2016;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 368, |
| "end": 386, |
| "text": "Cohn et al., 2016)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Argument Identification Some mentions are incorrectly identified as arguments. For example, the word may is sometimes identified as a month when it is simply a modal verb. Moreover, some argument mentions are ambiguous. For instance, 6 o'clock can be used to express either 6 am or 6 pm. We could disambiguate arguments based on contextual information. The execution results of logical forms could also help prune unreasonable arguments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Rare Words Because the data size of JOBS, GEO, and ATIS is relatively small, some question words are rare in the training set, which makes it hard to estimate reliable parameters for them. One solution would be to learn word embeddings on unannotated text data, and then use these as pretrained vectors for question words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "In this paper we presented an encoder-decoder neural network model for mapping natural language descriptions to their meaning representations. We encode natural language utterances into vectors and generate their corresponding logical forms as sequences or trees using recurrent neural networks with long short-term memory units. Experimental results show that enhancing the model with a hierarchical tree decoder and an attention mechanism improves per-formance across the board. Extensive comparisons with previous methods show that our approach performs competitively, without recourse to domain-or representation-specific features. Directions for future work are many and varied. For example, it would be interesting to learn a model from question-answer pairs without access to target logical forms. Beyond semantic parsing, we would also like to apply our SEQ2TREE model to related structured prediction tasks such as constituency parsing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank Luke Zettlemoyer and Tom Kwiatkowski for sharing the ATIS dataset. The support of the European Research Council under award number 681760 \"Translating Multiple Modalities into Text\" is gratefully acknowledged.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Semantic parsing as machine translation", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Andreas", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "47--52", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Andreas, Andreas Vlachos, and Stephen Clark. 2013. Semantic parsing as machine translation. In Proceedings of the 51st ACL, pages 47-52, Sofia, Bulgaria.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Bootstrapping semantic parsers from conversations", |
| "authors": [ |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 2011 EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "421--432", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoav Artzi and Luke Zettlemoyer. 2011. Bootstrap- ping semantic parsers from conversations. In Pro- ceedings of the 2011 EMNLP, pages 421-432, Ed- inburgh, United Kingdom.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Weakly supervised learning of semantic parsers for mapping instructions to actions", |
| "authors": [ |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "TACL", |
| "issue": "", |
| "pages": "49--62", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoav Artzi and Luke Zettlemoyer. 2013. Weakly su- pervised learning of semantic parsers for mapping instructions to actions. TACL, 1(1):49-62.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In Proceedings of the ICLR, San Diego, California.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Natural Language Processing with Python", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bird", |
| "suffix": "" |
| }, |
| { |
| "first": "Ewan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Loper", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Bird, Ewan Klein, and Edward Loper. 2009. Natural Language Processing with Python. O'Reilly Media.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Semantic parsing freebase: Towards open-domain semantic parsing", |
| "authors": [ |
| { |
| "first": "Qingqing", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Yates", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "2nd Joint Conference on Lexical and Computational Semantics", |
| "volume": "", |
| "issue": "", |
| "pages": "328--338", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qingqing Cai and Alexander Yates. 2013. Seman- tic parsing freebase: Towards open-domain seman- tic parsing. In 2nd Joint Conference on Lexical and Computational Semantics, pages 328-338, Atlanta, Georgia.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Learning to interpret natural language navigation instructions from observations", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 15th AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "859--865", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David L. Chen and Raymond J. Mooney. 2011. Learn- ing to interpret natural language navigation instruc- tions from observations. In Proceedings of the 15th AAAI, pages 859-865, San Francisco, California.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Learning phrase representations using RNN encoder-decoder for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merrienboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Caglar", |
| "middle": [], |
| "last": "Gulcehre", |
| "suffix": "" |
| }, |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Fethi", |
| "middle": [], |
| "last": "Bougares", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1724--1734", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyunghyun Cho, Bart van Merrienboer, Caglar Gul- cehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using RNN encoder-decoder for statistical machine translation. In Proceedings of the 2014 EMNLP, pages 1724-1734, Doha, Qatar.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Driving semantic parsing from the world's response", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Clarke", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Goldwasser", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [ |
| "Roth" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of CONLL", |
| "volume": "", |
| "issue": "", |
| "pages": "18--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Clarke, Dan Goldwasser, Ming-Wei Chang, and Dan Roth. 2010. Driving semantic parsing from the world's response. In Proceedings of CONLL, pages 18-27, Uppsala, Sweden.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Incorporating structural alignment biases into an attentional neural translation model", |
| "authors": [ |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| }, |
| { |
| "first": "Cong Duy Vu", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Vymolova", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaisheng", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Gholamreza", |
| "middle": [], |
| "last": "Haffari", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Trevor Cohn, Cong Duy Vu Hoang, Ekaterina Vy- molova, Kaisheng Yao, Chris Dyer, and Gholamreza Haffari. 2016. Incorporating structural alignment biases into an attentional neural translation model. In Proceedings of the 2016 NAACL, San Diego, Cal- ifornia.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A statistical semantic parser that integrates syntax and semantics", |
| "authors": [ |
| { |
| "first": "Ruifang", |
| "middle": [], |
| "last": "Ge", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "9--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruifang Ge and Raymond J. Mooney. 2005. A statisti- cal semantic parser that integrates syntax and seman- tics. In Proceedings of CoNLL, pages 9-16, Ann Arbor, Michigan.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Learning from natural instructions", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Goldwasser", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 22nd IJ-CAI", |
| "volume": "", |
| "issue": "", |
| "pages": "1794--1800", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Goldwasser and Dan Roth. 2011. Learning from natural instructions. In Proceedings of the 22nd IJ- CAI, pages 1794-1800, Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A deep architecture for semantic parsing", |
| "authors": [ |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [ |
| "Moritz" |
| ], |
| "last": "Nando De Freitas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hermann", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the ACL 2014 Workshop on Semantic Parsing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edward Grefenstette, Phil Blunsom, Nando de Freitas, and Karl Moritz Hermann. 2014. A deep architec- ture for semantic parsing. In Proceedings of the ACL 2014 Workshop on Semantic Parsing, Atlanta, Geor- gia.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Semantic processing using the hidden vector state model", |
| "authors": [ |
| { |
| "first": "Yulan", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Speech Communication", |
| "volume": "48", |
| "issue": "3-4", |
| "pages": "262--275", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yulan He and Steve Young. 2006. Semantic process- ing using the hidden vector state model. Speech Communication, 48(3-4):262-275.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Teaching machines to read and comprehend", |
| "authors": [ |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Moritz Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Kocisky", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| }, |
| { |
| "first": "Lasse", |
| "middle": [], |
| "last": "Espeholt", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Kay", |
| "suffix": "" |
| }, |
| { |
| "first": "Mustafa", |
| "middle": [], |
| "last": "Suleyman", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "28", |
| "issue": "", |
| "pages": "1684--1692", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karl Moritz Hermann, Tomas Kocisky, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Su- leyman, and Phil Blunsom. 2015. Teaching ma- chines to read and comprehend. In Advances in Neu- ral Information Processing Systems 28, pages 1684- 1692. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "On using very large target vocabulary for neural machine translation", |
| "authors": [ |
| { |
| "first": "S\u00e9bastien", |
| "middle": [], |
| "last": "Jean", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Memisevic", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of 53rd ACL and 7th IJCNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S\u00e9bastien Jean, Kyunghyun Cho, Roland Memisevic, and Yoshua Bengio. 2015. On using very large tar- get vocabulary for neural machine translation. In Proceedings of 53rd ACL and 7th IJCNLP, pages 1- 10, Beijing, China.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Recurrent continuous translation models", |
| "authors": [ |
| { |
| "first": "Nal", |
| "middle": [], |
| "last": "Kalchbrenner", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1700--1709", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nal Kalchbrenner and Phil Blunsom. 2013. Recurrent continuous translation models. In Proceedings of the 2013 EMNLP, pages 1700-1709, Seattle, Wash- ington.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Deep visualsemantic alignments for generating image descriptions", |
| "authors": [ |
| { |
| "first": "Andrej", |
| "middle": [], |
| "last": "Karpathy", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Fei-Fei", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of CVPR", |
| "volume": "", |
| "issue": "", |
| "pages": "3128--3137", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrej Karpathy and Li Fei-Fei. 2015. Deep visual- semantic alignments for generating image descrip- tions. In Proceedings of CVPR, pages 3128-3137, Boston, Massachusetts.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Using string-kernels for learning semantic parsers", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Rohit", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Kate", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 21st COLING and 44th ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "913--920", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rohit J. Kate and Raymond J. Mooney. 2006. Using string-kernels for learning semantic parsers. In Pro- ceedings of the 21st COLING and 44th ACL, pages 913-920, Sydney, Australia.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Learning to transform natural to formal languages", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Rohit", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuk", |
| "middle": [ |
| "Wah" |
| ], |
| "last": "Kate", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 20th AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "1062--1068", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rohit J. Kate, Yuk Wah Wong, and Raymond J. Mooney. 2005. Learning to transform natural to formal languages. In Proceedings of the 20th AAAI, pages 1062-1068, Pittsburgh, Pennsylvania.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Weakly supervised training of semantic parsers", |
| "authors": [ |
| { |
| "first": "Jayant", |
| "middle": [], |
| "last": "Krishnamurthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "754--765", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jayant Krishnamurthy and Tom Mitchell. 2012. Weakly supervised training of semantic parsers. In Proceedings of the 2012 EMNLP, pages 754-765, Jeju Island, Korea.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Inducing probabilistic CCG grammars from logical form with higher-order unification", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1223--1233", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kwiatkowski, Luke Zettlemoyer, Sharon Gold- water, and Mark Steedman. 2010. Inducing prob- abilistic CCG grammars from logical form with higher-order unification. In Proceedings of the 2010 EMNLP, pages 1223-1233, Cambridge, Mas- sachusetts.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Lexical generalization in CCG grammar induction for semantic parsing", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 2011 EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1512--1523", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kwiatkowski, Luke Zettlemoyer, Sharon Gold- water, and Mark Steedman. 2011. Lexical gener- alization in CCG grammar induction for semantic parsing. In Proceedings of the 2011 EMNLP, pages 1512-1523, Edinburgh, United Kingdom.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Scaling semantic parsers with on-the-fly ontology matching", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Eunsol", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1545--1556", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kwiatkowski, Eunsol Choi, Yoav Artzi, and Luke Zettlemoyer. 2013. Scaling semantic parsers with on-the-fly ontology matching. In Proceedings of the 2013 EMNLP, pages 1545-1556, Seattle, Washing- ton.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Learning dependency-based compositional semantics", |
| "authors": [ |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "I" |
| ], |
| "last": "Jordan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Computational Linguistics", |
| "volume": "39", |
| "issue": "2", |
| "pages": "389--446", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Percy Liang, Michael I. Jordan, and Dan Klein. 2013. Learning dependency-based compositional seman- tics. Computational Linguistics, 39(2):389-446.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "A generative model for parsing natural language to meaning representations", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wee", |
| "middle": [], |
| "last": "Hwee Tou Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [ |
| "S" |
| ], |
| "last": "Sun Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 2008 EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "783--792", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Lu, Hwee Tou Ng, Wee Sun Lee, and Luke S. Zettlemoyer. 2008. A generative model for pars- ing natural language to meaning representations. In Proceedings of the 2008 EMNLP, pages 783-792, Honolulu, Hawaii.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Addressing the rare word problem in neural machine translation", |
| "authors": [ |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zaremba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd ACL and 7th IJC-NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "11--19", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minh-Thang Luong, Ilya Sutskever, Quoc V Le, Oriol Vinyals, and Wojciech Zaremba. 2015a. Address- ing the rare word problem in neural machine trans- lation. In Proceedings of the 53rd ACL and 7th IJC- NLP, pages 11-19, Beijing, China.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Effective approaches to attentionbased neural machine translation", |
| "authors": [ |
| { |
| "first": "Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Hieu", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "Portu-- gal", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thang Luong, Hieu Pham, and Christopher D. Man- ning. 2015b. Effective approaches to attention- based neural machine translation. In Proceedings of the 2015 EMNLP, pages 1412-1421, Lisbon, Portu- gal.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Listen, attend, and walk: Neural mapping of navigational instructions to action sequences", |
| "authors": [ |
| { |
| "first": "Hongyuan", |
| "middle": [], |
| "last": "Mei", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew R", |
| "middle": [], |
| "last": "Walter", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 30th AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hongyuan Mei, Mohit Bansal, and Matthew R Wal- ter. 2016. Listen, attend, and walk: Neural mapping of navigational instructions to action sequences. In Proceedings of the 30th AAAI, Phoenix, Arizona. to appear.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "A fully statistical approach to natural language interfaces", |
| "authors": [ |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Stallard", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Bobrow", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "55--61", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Scott Miller, David Stallard, Robert Bobrow, and Richard Schwartz. 1996. A fully statistical ap- proach to natural language interfaces. In ACL, pages 55-61.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "On the difficulty of training recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Razvan", |
| "middle": [], |
| "last": "Pascanu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 30th ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "1310--1318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Razvan Pascanu, Tomas Mikolov, and Yoshua Bengio. 2013. On the difficulty of training recurrent neural networks. In Proceedings of the 30th ICML, pages 1310-1318, Atlanta, Georgia.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Grounded unsupervised semantic parsing", |
| "authors": [ |
| { |
| "first": "Hoifung", |
| "middle": [], |
| "last": "Poon", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "933--943", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hoifung Poon. 2013. Grounded unsupervised seman- tic parsing. In Proceedings of the 51st ACL, pages 933-943, Sofia, Bulgaria.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Towards a theory of natural language interfaces to databases", |
| "authors": [ |
| { |
| "first": "Ana-Maria", |
| "middle": [], |
| "last": "Popescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| }, |
| { |
| "first": "Henry", |
| "middle": [], |
| "last": "Kautz", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 8th IUI", |
| "volume": "", |
| "issue": "", |
| "pages": "149--157", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ana-Maria Popescu, Oren Etzioni, and Henry Kautz. 2003. Towards a theory of natural language inter- faces to databases. In Proceedings of the 8th IUI, pages 149-157, Miami, Florida.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Language to code: Learning semantic parsers for if-this-then-that recipes", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Quirk", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of 53rd ACL and 7th IJCNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "878--888", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Quirk, Raymond Mooney, and Michel Galley. 2015. Language to code: Learning semantic parsers for if-this-then-that recipes. In Proceedings of 53rd ACL and 7th IJCNLP, pages 878-888, Beijing, China.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Large-scale semantic parsing without question-answer pairs", |
| "authors": [ |
| { |
| "first": "Siva", |
| "middle": [], |
| "last": "Reddy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "TACL", |
| "volume": "2", |
| "issue": "", |
| "pages": "377--392", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siva Reddy, Mirella Lapata, and Mark Steedman. 2014. Large-scale semantic parsing without question-answer pairs. TACL, 2(Oct):377-392.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "A neural attention model for abstractive sentence summarization", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [ |
| "M" |
| ], |
| "last": "Rush", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "379--389", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander M. Rush, Sumit Chopra, and Jason Weston. 2015. A neural attention model for abstractive sen- tence summarization. In Proceedings of the 2015 EMNLP, pages 379-389, Lisbon, Portugal.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "27", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural net- works. In Advances in Neural Information Pro- cessing Systems 27, pages 3104-3112. Curran As- sociates, Inc.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Automated construction of database interfaces: Intergrating statistical and relational learning for semantic parsing", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Lappoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of the 2000 EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "133--141", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lappoon R. Tang and Raymond J. Mooney. 2000. Au- tomated construction of database interfaces: Inter- grating statistical and relational learning for seman- tic parsing. In Proceedings of the 2000 EMNLP, pages 133-141, Hong Kong, China.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Using multiple clause constructors in inductive logic programming for semantic parsing", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Lappoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the 12th ECML", |
| "volume": "", |
| "issue": "", |
| "pages": "466--477", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lappoon R. Tang and Raymond J. Mooney. 2001. Us- ing multiple clause constructors in inductive logic programming for semantic parsing. In Proceedings of the 12th ECML, pages 466-477, Freiburg, Ger- many.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Acquiring word-meaning mappings for natural language interfaces", |
| "authors": [ |
| { |
| "first": "Cynthia", |
| "middle": [ |
| "A" |
| ], |
| "last": "Thomspon", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Artifical Intelligence Research", |
| "volume": "18", |
| "issue": "", |
| "pages": "1--44", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cynthia A. Thomspon and Raymond J. Mooney. 2003. Acquiring word-meaning mappings for natural lan- guage interfaces. Journal of Artifical Intelligence Research, 18:1-44.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Lecture 6.5-RmsProp: Divide the gradient by a running average of its recent magnitude", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Tieleman", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Tieleman and G. Hinton. 2012. Lecture 6.5- RmsProp: Divide the gradient by a running average of its recent magnitude. Technical report.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Modeling coverage for neural machine translation", |
| "authors": [ |
| { |
| "first": "Zhaopeng", |
| "middle": [], |
| "last": "Tu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengdong", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaohua", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhaopeng Tu, Zhengdong Lu, Yang Liu, Xiaohua Liu, and Hang Li. 2016. Modeling coverage for neu- ral machine translation. In Proceedings of the 54th ACL, Berlin, Germany.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Grammar as a foreign language", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Terry", |
| "middle": [], |
| "last": "Koo", |
| "suffix": "" |
| }, |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "28", |
| "issue": "", |
| "pages": "2755--2763", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, Lukasz Kaiser, Terry Koo, Slav Petrov, Ilya Sutskever, and Geoffrey Hinton. 2015a. Gram- mar as a foreign language. In Advances in Neu- ral Information Processing Systems 28, pages 2755- 2763. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Show and tell: A neural image caption generator", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Toshev", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Dumitru", |
| "middle": [], |
| "last": "Erhan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of CVPR", |
| "volume": "", |
| "issue": "", |
| "pages": "3156--3164", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, Alexander Toshev, Samy Bengio, and Dumitru Erhan. 2015b. Show and tell: A neural image caption generator. In Proceedings of CVPR, pages 3156-3164, Boston, Massachusetts.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Learning for semantic parsing with statistical machine translation", |
| "authors": [ |
| { |
| "first": "Yuk", |
| "middle": [ |
| "Wah" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 2006 NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "439--446", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuk Wah Wong and Raymond J. Mooney. 2006. Learning for semantic parsing with statistical ma- chine translation. In Proceedings of the 2006 NAACL, pages 439-446, New York, New York.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Learning synchronous grammars for semantic parsing with lambda calculus", |
| "authors": [ |
| { |
| "first": "Yuk", |
| "middle": [ |
| "Wah" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "960--967", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuk Wah Wong and Raymond J. Mooney. 2007. Learning synchronous grammars for semantic pars- ing with lambda calculus. In Proceedings of the 45th ACL, pages 960-967, Prague, Czech Republic.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Progress in natural language understanding: An application to lunar geology", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [ |
| "A" |
| ], |
| "last": "Woods", |
| "suffix": "" |
| } |
| ], |
| "year": 1973, |
| "venue": "National Computer Conference and Exposition", |
| "volume": "", |
| "issue": "", |
| "pages": "441--450", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "W. A. Woods. 1973. Progress in natural language un- derstanding: An application to lunar geology. In Proceedings of the June 4-8, 1973, National Com- puter Conference and Exposition, pages 441-450, New York, NY.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Show, attend and tell: Neural image caption generation with visual attention", |
| "authors": [ |
| { |
| "first": "Kelvin", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Kiros", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhudinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Rich", |
| "middle": [], |
| "last": "Zemel", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 32nd ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "2048--2057", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kelvin Xu, Jimmy Ba, Ryan Kiros, Kyunghyun Cho, Aaron Courville, Ruslan Salakhudinov, Rich Zemel, and Yoshua Bengio. 2015. Show, attend and tell: Neural image caption generation with visual atten- tion. In Proceedings of the 32nd ICML, pages 2048- 2057, Lille, France.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Recurrent neural network regularization", |
| "authors": [ |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Zaremba", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wojciech Zaremba, Ilya Sutskever, and Oriol Vinyals. 2015. Recurrent neural network regularization. In Proceedings of the ICLR, San Diego, California.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Learning to parse database queries using inductive logic programming", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Zelle", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of the 19th AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "1050--1055", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John M. Zelle and Raymond J. Mooney. 1996. Learn- ing to parse database queries using inductive logic programming. In Proceedings of the 19th AAAI, pages 1050-1055, Portland, Oregon.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Learning to map sentences to logical form: Structured classification with probabilistic categorial grammars", |
| "authors": [ |
| { |
| "first": "Luke", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 21st UAI", |
| "volume": "", |
| "issue": "", |
| "pages": "658--666", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luke S. Zettlemoyer and Michael Collins. 2005. Learning to map sentences to logical form: Struc- tured classification with probabilistic categorial grammars. In Proceedings of the 21st UAI, pages 658-666, Toronto, ON.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Online learning of relaxed CCG grammars for parsing to logical form", |
| "authors": [ |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the EMNLP-CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "678--687", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luke Zettlemoyer and Michael Collins. 2007. On- line learning of relaxed CCG grammars for parsing to logical form. In In Proceedings of the EMNLP- CoNLL, pages 678-687, Prague, Czech Republic.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Type-driven incremental semantic parsing with polymorphism", |
| "authors": [ |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1416--1421", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Zhao and Liang Huang. 2015. Type-driven in- cremental semantic parsing with polymorphism. In Proceedings of the 2015 NAACL, pages 1416-1421, Denver, Colorado.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "text": "Input utterances and their logical forms are encoded and decoded with neural networks. An attention layer is used to learn soft alignments.", |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "num": null, |
| "text": "This dataset has 5, 410 queries to a flight booking system. The standard split has 4, 480 training instances, 480 development instances, and 450 test instances. Sentences are paired with lambda-calculus expressions. Values for the variables date, time, city, aircraft code, airport, airline, and number are identified.IFTTT Quirk et al. (2015) created this dataset by extracting a large number of if-this-then-that do not require a bscs? answer(company(J,'microsoft'),job(J),not((req deg(J,'bscs')", |
| "type_str": "figure" |
| }, |
| "TABREF2": { |
| "text": "Evaluation results on JOBS.", |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "text": "Evaluation results on ATIS.", |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF6": { |
| "text": "Evaluation results on IFTTT.", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td>tention substantially improves performance on all</td></tr><tr><td>three datasets. This underlines the importance of</td></tr><tr><td>utilizing soft alignments between inputs and out-</td></tr><tr><td>puts. We further analyze what the attention layer</td></tr><tr><td>learns in</td></tr></table>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |