| { |
| "paper_id": "P17-1005", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:17:42.181552Z" |
| }, |
| "title": "Learning Structured Natural Language Representations for Semantic Parsing", |
| "authors": [ |
| { |
| "first": "Jianpeng", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh \u2021 IBM T.J. Watson Research", |
| "location": {} |
| }, |
| "email": "jianpeng.cheng@ed.ac.uk" |
| }, |
| { |
| "first": "Siva", |
| "middle": [], |
| "last": "Reddy", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh \u2021 IBM T.J. Watson Research", |
| "location": {} |
| }, |
| "email": "siva.reddy@ed.ac.uk" |
| }, |
| { |
| "first": "Vijay", |
| "middle": [], |
| "last": "Saraswat", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh \u2021 IBM T.J. Watson Research", |
| "location": {} |
| }, |
| "email": "vsaraswa@us.ibm.com" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh \u2021 IBM T.J. Watson Research", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We introduce a neural semantic parser which is interpretable and scalable. Our model converts natural language utterances to intermediate, domain-general natural language representations in the form of predicate-argument structures, which are induced with a transition system and subsequently mapped to target domains. The semantic parser is trained end-to-end using annotated logical forms or their denotations. We achieve the state of the art on SPADES and GRAPHQUESTIONS and obtain competitive results on GEO-QUERY and WEBQUESTIONS. The induced predicate-argument structures shed light on the types of representations useful for semantic parsing and how these are different from linguistically motivated ones. 1", |
| "pdf_parse": { |
| "paper_id": "P17-1005", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We introduce a neural semantic parser which is interpretable and scalable. Our model converts natural language utterances to intermediate, domain-general natural language representations in the form of predicate-argument structures, which are induced with a transition system and subsequently mapped to target domains. The semantic parser is trained end-to-end using annotated logical forms or their denotations. We achieve the state of the art on SPADES and GRAPHQUESTIONS and obtain competitive results on GEO-QUERY and WEBQUESTIONS. The induced predicate-argument structures shed light on the types of representations useful for semantic parsing and how these are different from linguistically motivated ones. 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Semantic parsing is the task of mapping natural language utterances to machine interpretable meaning representations. Despite differences in the choice of meaning representation and model structure, most existing work conceptualizes semantic parsing following two main approaches. Under the first approach, an utterance is parsed and grounded to a meaning representation directly via learning a task-specific grammar (Zelle and Mooney, 1996; Zettlemoyer and Collins, 2005; Wong and Mooney, 2006; Kwiatkowksi et al., 2010; Liang et al., 2011; Berant et al., 2013; Flanigan et al., 2014; Pasupat and Liang, 2015; Groschwitz et al., 2015) . Under the second approach, the utterance is first parsed to an intermediate task-independent representation tied to a syntactic parser and then mapped to a grounded representation (Kwiatkowski et al., 2013; Reddy et al., , 2014 Krishnamurthy and Mitchell, 2015; Gardner and Krishnamurthy, 2017) . A merit of the two-stage approach is that it creates reusable intermediate interpretations, which potentially enables the handling of unseen words and knowledge transfer across domains (Bender et al., 2015) .", |
| "cite_spans": [ |
| { |
| "start": 417, |
| "end": 441, |
| "text": "(Zelle and Mooney, 1996;", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 442, |
| "end": 472, |
| "text": "Zettlemoyer and Collins, 2005;", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 473, |
| "end": 495, |
| "text": "Wong and Mooney, 2006;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 496, |
| "end": 521, |
| "text": "Kwiatkowksi et al., 2010;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 522, |
| "end": 541, |
| "text": "Liang et al., 2011;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 542, |
| "end": 562, |
| "text": "Berant et al., 2013;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 563, |
| "end": 585, |
| "text": "Flanigan et al., 2014;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 586, |
| "end": 610, |
| "text": "Pasupat and Liang, 2015;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 611, |
| "end": 635, |
| "text": "Groschwitz et al., 2015)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 818, |
| "end": 844, |
| "text": "(Kwiatkowski et al., 2013;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 845, |
| "end": 865, |
| "text": "Reddy et al., , 2014", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 866, |
| "end": 899, |
| "text": "Krishnamurthy and Mitchell, 2015;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 900, |
| "end": 932, |
| "text": "Gardner and Krishnamurthy, 2017)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1120, |
| "end": 1141, |
| "text": "(Bender et al., 2015)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The successful application of encoder-decoder models (Bahdanau et al., 2015; Sutskever et al., 2014) to a variety of NLP tasks has provided strong impetus to treat semantic parsing as a sequence transduction problem where an utterance is mapped to a target meaning representation in string format (Dong and Lapata, 2016; Jia and Liang, 2016; Ko\u010disk\u00fd et al., 2016) . Such models still fall under the first approach, however, in contrast to previous work (Zelle and Mooney, 1996; Zettlemoyer and Collins, 2005; Liang et al., 2011) they reduce the need for domain-specific assumptions, grammar learning, and more generally extensive feature engineering. But this modeling flexibility comes at a cost since it is no longer possible to interpret how meaning composition is performed. Such knowledge plays a critical role in understand modeling limitations so as to build better semantic parsers. Moreover, without any taskspecific prior knowledge, the learning problem is fairly unconstrained, both in terms of the possible derivations to consider and in terms of the target output which can be ill-formed (e.g., with extra or missing brackets).", |
| "cite_spans": [ |
| { |
| "start": 53, |
| "end": 76, |
| "text": "(Bahdanau et al., 2015;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 77, |
| "end": 100, |
| "text": "Sutskever et al., 2014)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 297, |
| "end": 320, |
| "text": "(Dong and Lapata, 2016;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 321, |
| "end": 341, |
| "text": "Jia and Liang, 2016;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 342, |
| "end": 363, |
| "text": "Ko\u010disk\u00fd et al., 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 453, |
| "end": 477, |
| "text": "(Zelle and Mooney, 1996;", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 478, |
| "end": 508, |
| "text": "Zettlemoyer and Collins, 2005;", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 509, |
| "end": 528, |
| "text": "Liang et al., 2011)", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we propose a neural semantic parser that alleviates the aforementioned problems. Our model falls under the second class of approaches where utterances are first mapped to an intermediate representation containing natural language predicates. However, rather than using an external parser (Reddy et al., 2014 or manually specified CCG grammars (Kwiatkowski et al., 2013) , we induce intermediate representations in the form of predicate-argument structures from data. This is achieved with a transition-based approach which by design yields recursive semantic structures, avoiding the problem of generating ill-formed meaning representations. Compared to most existing semantic parsers which employ a CKY style bottom-up parsing strategy (Krishnamurthy and Mitchell, 2012; Cai and Yates, 2013; Berant et al., 2013; Berant and Liang, 2014) , the transition-based approach we proposed does not require feature decomposition over structures and thereby enables the exploration of rich, non-local features. The output of the transition system is then grounded (e.g., to a knowledge base) with a neural mapping model under the assumption that grounded and ungrounded structures are isomorphic. 2 As a result, we obtain a neural model that jointly learns to parse natural language semantics and induce a lexicon that helps grounding.", |
| "cite_spans": [ |
| { |
| "start": 302, |
| "end": 321, |
| "text": "(Reddy et al., 2014", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 357, |
| "end": 383, |
| "text": "(Kwiatkowski et al., 2013)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 751, |
| "end": 785, |
| "text": "(Krishnamurthy and Mitchell, 2012;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 786, |
| "end": 806, |
| "text": "Cai and Yates, 2013;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 807, |
| "end": 827, |
| "text": "Berant et al., 2013;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 828, |
| "end": 851, |
| "text": "Berant and Liang, 2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The whole network is trained end-to-end on natural language utterances paired with annotated logical forms or their denotations. We conduct experiments on four datasets, including GEOQUERY (which has logical forms; Zelle and Mooney 1996), SPADES (Bisk et al., 2016) , WEB-QUESTIONS (Berant et al., 2013) , and GRAPH-QUESTIONS (Su et al., 2016 ) (which have denotations). Our semantic parser achieves the state of the art on SPADES and GRAPHQUESTIONS, while obtaining competitive results on GEOQUERY and WEBQUESTIONS. A side-product of our modeling framework is that the induced intermediate representations can contribute to rationalizing neural predictions (Lei et al., 2016) . Specifically, they can shed light on the kinds of representations (especially predicates) useful for semantic parsing. Evaluation of the induced predicate-argument relations against syntax-based ones reveals that they are interpretable and meaningful compared to heuristic baselines, but they sometimes deviate from linguistic conventions.", |
| "cite_spans": [ |
| { |
| "start": 246, |
| "end": 265, |
| "text": "(Bisk et al., 2016)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 282, |
| "end": 303, |
| "text": "(Berant et al., 2013)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 326, |
| "end": 342, |
| "text": "(Su et al., 2016", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 658, |
| "end": 676, |
| "text": "(Lei et al., 2016)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Problem Formulation Let K denote a knowledge base or more generally a reasoning system, and x an utterance paired with a grounded meaning representation G or its denotation y. Our problem is to learn a semantic parser that maps x to G via an intermediate ungrounded representation U . When G is executed against K, it outputs denota- tion y.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preliminaries", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We represent grounded meaning representations in FunQL (Kate et al., 2005 ) amongst many other alternatives such as lambda calculus (Zettlemoyer and Collins, 2005) , \u03bb-DCS (Liang, 2013) or graph queries (Holzschuher and Peinl, 2013; Harris et al., 2013) . FunQL is a variable-free query language, where each predicate is treated as a function symbol that modifies an argument list. For example, the FunQL representation for the utterance which states do not border texas is:", |
| "cite_spans": [ |
| { |
| "start": 49, |
| "end": 73, |
| "text": "FunQL (Kate et al., 2005", |
| "ref_id": null |
| }, |
| { |
| "start": 132, |
| "end": 163, |
| "text": "(Zettlemoyer and Collins, 2005)", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 172, |
| "end": 185, |
| "text": "(Liang, 2013)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 203, |
| "end": 232, |
| "text": "(Holzschuher and Peinl, 2013;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 233, |
| "end": 253, |
| "text": "Harris et al., 2013)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grounded Meaning Representation", |
| "sec_num": null |
| }, |
| { |
| "text": "answer(exclude(state(all), next to(texas)))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grounded Meaning Representation", |
| "sec_num": null |
| }, |
| { |
| "text": "where next to is a domain-specific binary predicate that takes one argument (i.e., the entity texas) and returns a set of entities (e.g., the states bordering Texas) as its denotation. all is a special predicate that returns a collection of entities. exclude is a predicate that returns the difference between two input sets. An advantage of FunQL is that the resulting s-expression encodes semantic compositionality and derivation of the logical forms. This property makes FunQL logical forms convenient to be predicted with recurrent neural networks (Vinyals et al., 2015; Choe and Charniak, 2016; . However, FunQL is less expressive than lambda calculus, partially due to the elimination of variables. A more compact logical formulation which our method also applies to is \u03bb-DCS (Liang, 2013) . In the absence of anaphora and composite binary predicates, conversion algorithms exist between FunQL and \u03bb-DCS. However, we leave this to future work.", |
| "cite_spans": [ |
| { |
| "start": 552, |
| "end": 574, |
| "text": "(Vinyals et al., 2015;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 575, |
| "end": 599, |
| "text": "Choe and Charniak, 2016;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 782, |
| "end": 795, |
| "text": "(Liang, 2013)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grounded Meaning Representation", |
| "sec_num": null |
| }, |
| { |
| "text": "We also use FunQL to express ungrounded meaning representations. The latter consist primarily of natural language predicates and domain-general predicates. Assuming for simplicity that domaingeneral predicates share the same vocabulary in ungrounded and grounded representations, the ungrounded representation for the example utterance is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ungrounded Meaning Representation", |
| "sec_num": null |
| }, |
| { |
| "text": "answer(exclude(states(all), border(texas)))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ungrounded Meaning Representation", |
| "sec_num": null |
| }, |
| { |
| "text": "where states and border are natural language predicates. In this work we consider five types of domain-general predicates illustrated in Table 1 . Notice that domain-general predicates are often implicit, or represent extra-sentential knowledge. For example, the predicate all in the above utterance represents all states in the domain which are not mentioned in the utterance but are critical for working out the utterance denotation. Finally, note that for certain domain-general predicates, it also makes sense to extract natural language rationales (e.g., not is indicative for exclude). But we do not find this helpful in experiments.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 137, |
| "end": 144, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ungrounded Meaning Representation", |
| "sec_num": null |
| }, |
| { |
| "text": "In this work we constrain ungrounded representations to be structurally isomorphic to grounded ones. In order to derive the target logical forms, all we have to do is replacing predicates in the ungrounded representations with symbols in the knowledge base.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ungrounded Meaning Representation", |
| "sec_num": null |
| }, |
| { |
| "text": "In this section, we discuss our neural model which maps utterances to target logical forms. The semantic parsing task is decomposed in two stages: we first explain how an utterance is converted to an intermediate representation (Section 3.1), and then describe how it is grounded to a knowledge base (Section 3.2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modeling", |
| "sec_num": "3" |
| }, |
| { |
| "text": "At this stage, utterances are mapped to intermediate representations with a transition-based algorithm. In general, the transition system generates the representation by following a derivation tree (which contains a set of applied rules) and some canonical generation order (e.g., depth-first). For FunQL, a simple solution exists since the representation itself encodes the derivation. Consider again answer(exclude(states(all), border(texas))) which is tree structured. Each predicate (e.g., border) can be visualized as a non-terminal node of the tree and each entity (e.g., texas) as a terminal. The predicate all is a special case which acts as a terminal directly. We can generate the tree with a top-down, depth first transition system reminiscent of recurrent neural network grammars (RN-NGs; . Similar to RNNG, our algorithm uses a buffer to store input tokens in the utterance and a stack to store partially completed trees. A major difference in our semantic parsing scenario is that tokens in the buffer are not fetched in a sequential order or removed from the buffer. This is because the lexical alignment between an utterance and its semantic representation is hidden. Moreover, some predicates cannot be clearly anchored to a token span. Therefore, we allow the generation algorithm to pick tokens and combine logical forms in arbitrary orders, conditioning on the entire set of sentential features. Alternative solutions in the traditional semantic parsing literature include a floating chart parser (Pasupat and Liang, 2015) which allows to construct logical predicates out of thin air.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Our transition system defines three actions, namely NT, TER, and RED, explained below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "NT(X) generates a Non-Terminal predicate. This predicate is either a natural language expression such as border, or one of the domain-general predicates exemplified in Table 1 (e.g., exclude). The type of predicate is determined by the placeholder X and once generated, it is pushed onto the stack and represented as a non-terminal followed by an open bracket (e.g., 'border('). The open bracket will be closed by a reduce operation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 168, |
| "end": 175, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "TER(X) generates a TERminal entity or the special predicate all. Note that the terminal choice does not include variable (e.g., $0, $1), since FunQL is a variable-free language which sufficiently captures the semantics of the datasets we work with. The framework could be extended to generate directly acyclic graphs by incorporating variables with additional transition actions for handling variable mentions and co-reference.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "RED stands for REDuce and is used for subtree completion. It recursively pops elements from the stack until an open non-terminal node is encountered. The non-terminal is popped as well, after which a composite term representing the entire subtree, e.g., border(texas), is pushed back to the stack. If a RED action results in having no more open non-terminals left on the stack, the transition system terminates. Table 2 shows the transition actions used to generate our running example.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 412, |
| "end": 419, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The model generates the ungrounded representation U conditioned on utterance x by recursively calling one of the above three actions. Note that U is defined by a sequence of actions (denoted Sentence: which states do not border texas Non-terminal symbols in buffer: which, states, do, not, border by a) and a sequence of term choices (denoted by u) as shown in Table 2 . The conditional probability p(U |x) is factorized over time steps as:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 361, |
| "end": 368, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "p(U |x) = p(a, u|x) = T t=1 p(a t |a <t , x)p(u t |a <t , x) I(at =RED) (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where I is an indicator function.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To predict the actions of the transition system, we encode the input buffer with a bidirectional LSTM (Hochreiter and Schmidhuber, 1997) and the output stack with a stack-LSTM (Dyer et al., 2015) . At each time step, the model uses the representation of the transition system e t to predict an action:", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 136, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 176, |
| "end": 195, |
| "text": "(Dyer et al., 2015)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(a t |a <t , x) \u221d exp(W a \u2022 e t )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where e t is the concatenation of the buffer representation b t and the stack representation s t . While the stack representation s t is easy to retrieve as the top state of the stack-LSTM, obtaining the buffer representation b t is more involved. This is because we do not have an explicit buffer representation due to the non-projectivity of semantic parsing. We therefore compute at each time step an adaptively weighted representation of b t (Bahdanau et al., 2015) conditioned on the stack representation s t . This buffer representation is then concatenated with the stack representation to form the system representation e t . When the predicted action is either NT or TER, an ungrounded term u t (either a predicate or an entity) needs to be chosen from the candidate list depending on the specific placeholder X. To select a domain-general term, we use the same representation of the transition system e t to compute a probability distribution over candidate terms:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "p(u GENERAL t |a <t , x) \u221d exp(W p \u2022 e t ) (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To choose a natural language term, we directly compute a probability distribution of all natural language terms (in the buffer) conditioned on the stack representation s t and select the most relevant term (Jia and Liang, 2016) :", |
| "cite_spans": [ |
| { |
| "start": 206, |
| "end": 227, |
| "text": "(Jia and Liang, 2016)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p(u NL t |a <t , x) \u221d exp(s t )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "When the predicted action is RED, the completed subtree is composed into a single representation on the stack. For the choice of composition function, we use a single-layer neural network as in Dyer et al. (2015) , which takes as input the concatenated representation of the predicate and argument of the subtree.", |
| "cite_spans": [ |
| { |
| "start": 194, |
| "end": 212, |
| "text": "Dyer et al. (2015)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Ungrounded Representations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Since we constrain the network to learn ungrounded structures that are isomorphic to the target meaning representation, converting ungrounded representations to grounded ones becomes a simple lexical mapping problem. For simplicity, hereafter we do not differentiate natural language and domain-general predicates.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Grounded Representations", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To map an ungrounded term u t to a grounded term g t , we compute the conditional probability of g t given u t with a bi-linear neural network:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Grounded Representations", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "p(g t |u t ) \u221d exp u t \u2022 W ug \u2022 g t (5)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Grounded Representations", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where u t is the contextual representation of the ungrounded term given by the bidirectional LSTM, g t is the grounded term embedding, and W ug is the weight matrix. The above grounding step can be interpreted as learning a lexicon: the model exclusively relies on the intermediate representation U to predict the target meaning representation G without taking into account any additional features based on the utterance. In practice, U may provide sufficient contextual background for closed domain semantic parsing where an ungrounded predicate often maps to a single grounded predicate, but is a relatively impoverished representation for parsing large open-domain knowledge bases like Freebase. In this case, we additionally rely on a discriminative reranker which ranks the grounded representations derived from ungrounded representations (see Section 3.4).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generating Grounded Representations", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "When the target meaning representation is available, we directly compare it against our predictions and back-propagate. When only denotations are available, we compare surrogate meaning representations against our predictions (Reddy et al., 2014) . Surrogate representations are those with the correct denotations. When there exist multiple surrogate representations, 3 we select one randomly and back-propagate. The global effect of the above update rule is close to maximizing the marginal likelihood of denotations, which differs from recent work on weakly-supervised semantic parsing based on reinforcement learning (Neelakantan et al., 2017).", |
| "cite_spans": [ |
| { |
| "start": 226, |
| "end": 246, |
| "text": "(Reddy et al., 2014)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Objective", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Consider utterance x with ungrounded meaning representation U , and grounded meaning representation G. Both U and G are defined with a sequence of transition actions (same for U and G) and a sequence of terms (different for U and G). Recall that a = [a 1 , \u2022 \u2022 \u2022 , a n ] denotes the transition action sequence defining U and G; let u = [u 1 , \u2022 \u2022 \u2022 , u k ] denote the ungrounded terms (e.g., predicates), and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Objective", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "g = [g 1 , \u2022 \u2022 \u2022 , g k ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Objective", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "the grounded terms. We aim to maximize the likelihood of the grounded meaning representation p(G|x) over all training examples. This likelihood can be decomposed into the likelihood of the grounded action sequence p(a|x) and the grounded term sequence p(g|x), which we optimize separately.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Objective", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For the grounded action sequence (which by design is the same as the ungrounded action sequence and therefore the output of the transition system), we can directly maximize the log likelihood log p(a|x) for all examples:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Objective", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "L a = x\u2208T log p(a|x) = x\u2208T n t=1 log p(a t |x) (6)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Objective", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where T denotes examples in the training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Objective", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For the grounded term sequence g, since the intermediate ungrounded terms are latent, we maximize the expected log likelihood of the grounded terms u [p(u|x) log p(g|u, x)] for all examples, which is a lower bound of the log likelihood log p(g|x):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Objective", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "L g = x\u2208T u [p(u|x) log p(g|u, x)] = x\u2208T u p(u|x) k t=1 log p(g t |u t ) (7)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Objective", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The final objective is the combination of L a and L g , denoted as L G = L a + L g . We optimize this objective with the method described in Lei et al. (2016) .", |
| "cite_spans": [ |
| { |
| "start": 141, |
| "end": 158, |
| "text": "Lei et al. (2016)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Objective", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "As discussed above, for open domain semantic parsing, solely relying on the ungrounded representation would result in an impoverished model lacking sentential context useful for disambiguation decisions. For all Freebase experiments, we followed previous work (Berant et al., 2013; Berant and Liang, 2014; Reddy et al., 2014) in additionally training a discriminative ranker to re-rank grounded representations globally.", |
| "cite_spans": [ |
| { |
| "start": 260, |
| "end": 281, |
| "text": "(Berant et al., 2013;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 282, |
| "end": 305, |
| "text": "Berant and Liang, 2014;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 306, |
| "end": 325, |
| "text": "Reddy et al., 2014)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reranker", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "The discriminative ranker is a maximumentropy model (Berant et al., 2013) . The objective is to maximize the log likelihood of the correct answer y given x by summing over all grounded candidates G with denotation y (i.e., [[G] ] K = y):", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 73, |
| "text": "(Berant et al., 2013)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 223, |
| "end": 227, |
| "text": "[[G]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reranker", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L y = (x,y)\u2208T log [[G]] K =y p(G|x) (8) p(G|x) \u221d exp{f (G, x)}", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Reranker", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where f (G, x) is a feature function that maps pair (G, x) into a feature vector. We give details on the features we used in Section 4.2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reranker", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "In this section, we verify empirically that our semantic parser derives useful meaning representations. We give details on the evaluation datasets and baselines used for comparison. We also describe implementation details and the features used in the discriminative ranker.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We evaluated our model on the following datasets which cover different domains, and use different types of training data, i.e., pairs of natural language utterances and grounded meanings or question-answer pairs. GEOQUERY (Zelle and Mooney, 1996) contains 880 questions and database queries about US geography. The utterances are compositional, but the language is simple and vocabulary size small. The majority of questions include at most one entity. SPADES (Bisk et al., 2016) contains 93,319 questions derived from CLUEWEB09 (Gabrilovich et al., 2013) sentences. Specifically, the questions were created by randomly removing an entity, thus producing sentence-denotation pairs (Reddy et al., 2014) . The sentences include two or more entities and although they are not very compositional, they constitute a large-scale dataset for neural network training. WEBQUESTIONS (Berant et al., 2013) contains 5,810 question-answer pairs. Similar to SPADES, it is based on Freebase and the questions are not very compositional. However, they are real questions asked by people on the Web. Finally, GRAPHQUESTIONS (Su et al., 2016) contains 5,166 question-answer pairs which were created by showing 500 Freebase graph queries to Amazon Mechanical Turk workers and asking them to paraphrase them into natural language.", |
| "cite_spans": [ |
| { |
| "start": 460, |
| "end": 479, |
| "text": "(Bisk et al., 2016)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 529, |
| "end": 555, |
| "text": "(Gabrilovich et al., 2013)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 681, |
| "end": 701, |
| "text": "(Reddy et al., 2014)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 873, |
| "end": 894, |
| "text": "(Berant et al., 2013)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1107, |
| "end": 1124, |
| "text": "(Su et al., 2016)", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Amongst the four datasets described above, GEO-QUERY has annotated logical forms which we directly use for training. For the other three datasets, we treat surrogate meaning representations which lead to the correct answer as gold standard. The surrogates were selected from a subset of candidate Freebase graphs, which were obtained by entity linking. Entity mentions in SPADES have been automatically annotated with Freebase entities (Gabrilovich et al., 2013) . For WEBQUESTIONS and GRAPHQUESTIONS, we follow the procedure described in . We identify po-tential entity spans using seven handcrafted partof-speech patterns and associate them with Freebase entities obtained from the Freebase/KG API. 4 We use a structured perceptron trained on the entities found in WEBQUESTIONS and GRAPHQUES-TIONS to select the top 10 non-overlapping entity disambiguation possibilities. We treat each possibility as a candidate input utterance, and use the perceptron score as a feature in the discriminative reranker, thus leaving the final disambiguation to the semantic parser.", |
| "cite_spans": [ |
| { |
| "start": 436, |
| "end": 462, |
| "text": "(Gabrilovich et al., 2013)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 701, |
| "end": 702, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Apart from the entity score, the discriminative ranker uses the following basic features. The first feature is the likelihood score of a grounded representation aggregating all intermediate representations. The second set of features include the embedding similarity between the relation and the utterance, as well as the similarity between the relation and the question words. The last set of features includes the answer type as indicated by the last word in the Freebase relation (Xu et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 483, |
| "end": 500, |
| "text": "(Xu et al., 2016)", |
| "ref_id": "BIBREF50" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We used the Adam optimizer for training with an initial learning rate of 0.001, two momentum parameters [0.99, 0.999], and batch size 1. The dimensions of the word embeddings, LSTM states, entity embeddings and relation embeddings are [50, 100, 100, 100] . The word embeddings were initialized with Glove embeddings (Pennington et al., 2014) . All other embeddings were randomly initialized.", |
| "cite_spans": [ |
| { |
| "start": 235, |
| "end": 239, |
| "text": "[50,", |
| "ref_id": null |
| }, |
| { |
| "start": 240, |
| "end": 244, |
| "text": "100,", |
| "ref_id": null |
| }, |
| { |
| "start": 245, |
| "end": 249, |
| "text": "100,", |
| "ref_id": null |
| }, |
| { |
| "start": 250, |
| "end": 254, |
| "text": "100]", |
| "ref_id": null |
| }, |
| { |
| "start": 316, |
| "end": 341, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation Details", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Experimental results on the four datasets are summarized in Tables 3-6 . We present comparisons of our system which we call SCANNER (as a shorthand for SymboliC meANiNg rEpResentation) against a variety of models previously described in the literature.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 60, |
| "end": 70, |
| "text": "Tables 3-6", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "GEOQUERY results are shown in Table 5 . The first block contains symbolic systems, whereas neural models are presented in the second block. We report accuracy which is defined as the proportion of the utterance that are correctly parsed to their gold standard logical forms. All previous neural systems (Dong and Lapata, 2016; Jia and Liang, 2016) treat semantic parsing as a sequence transduction problem and use LSTMs to directly map utterances to logical forms. SCAN-NER yields performance improvements over these Models F1 Berant et al. (2013) 35.7 Yao and Van Durme (2014) 33.0 Berant and Liang (2014) 39.9 Bast and Haussmann (2015) 49.4 Berant and Liang (2015) 49.7 50.3 Bordes et al. (2014) 39.2 Dong et al. (2015) 40.8 Yih et al. 201552.5 Xu et al. (2016) 53.3 Neural Baseline 48.3 SCANNER 49.4 (Berant et al., 2013) 10.80 PARASEMPRE (Berant and Liang, 2014) 12.79 JACANA (Yao and Van Durme, 2014) 5.08 Neural Baseline 16.24 SCANNER 17.02 Table 4 : GRAPHQUESTIONS results. Numbers for comparison systems are from Su et al. (2016) .", |
| "cite_spans": [ |
| { |
| "start": 303, |
| "end": 326, |
| "text": "(Dong and Lapata, 2016;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 327, |
| "end": 347, |
| "text": "Jia and Liang, 2016)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 524, |
| "end": 547, |
| "text": "F1 Berant et al. (2013)", |
| "ref_id": null |
| }, |
| { |
| "start": 553, |
| "end": 577, |
| "text": "Yao and Van Durme (2014)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 583, |
| "end": 606, |
| "text": "Berant and Liang (2014)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 643, |
| "end": 666, |
| "text": "Berant and Liang (2015)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 677, |
| "end": 697, |
| "text": "Bordes et al. (2014)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 703, |
| "end": 721, |
| "text": "Dong et al. (2015)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 747, |
| "end": 763, |
| "text": "Xu et al. (2016)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 803, |
| "end": 824, |
| "text": "(Berant et al., 2013)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 842, |
| "end": 866, |
| "text": "(Berant and Liang, 2014)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 880, |
| "end": 905, |
| "text": "(Yao and Van Durme, 2014)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 1021, |
| "end": 1037, |
| "text": "Su et al. (2016)", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 30, |
| "end": 37, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 947, |
| "end": 954, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "systems when using comparable data sources for training. Jia and Liang (2016) achieve better results with synthetic data that expands GEO-QUERY; we could adopt their approach to improve model performance, however, we leave this to future work. Table 6 reports SCANNER's performance on SPADES. For all Freebase related datasets we use average F1 (Berant et al., 2013) as our evaluation metric. Previous work on this dataset has used a semantic parsing framework similar to ours where natural language is converted to an intermediate syntactic representation and then grounded to Freebase. Specifically, Bisk et al. (2016) evaluate the effectiveness of four different CCG parsers on the semantic parsing task when varying the amount of supervision required. As can be seen, SCANNER outperforms all CCG variants (from unsupervised to fully supervised) without having access to any manually annotated derivations or lexicons. For fair comparison, we also built a neural baseline that encodes an utterance with a recurrent neural network and then predicts a grounded meaning representation directly (Ture and Jojic, 2016; Yih et al., 2016) . Again, we observe that SCANNER outperforms this baseline.", |
| "cite_spans": [ |
| { |
| "start": 345, |
| "end": 366, |
| "text": "(Berant et al., 2013)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 602, |
| "end": 620, |
| "text": "Bisk et al. (2016)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1094, |
| "end": 1116, |
| "text": "(Ture and Jojic, 2016;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 1117, |
| "end": 1134, |
| "text": "Yih et al., 2016)", |
| "ref_id": "BIBREF53" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 244, |
| "end": 251, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Results on WEBQUESTIONS are summarized in Table 3 . SCANNER obtains performance on par with the best symbolic systems (see the first block in the table). It is important to note that Bast and Haussmann (2015) develop a question answering system, which contrary to ours can-Models Accuracy Zettlemoyer and Collins (2005) 79.3 Zettlemoyer and Collins (2007) 86.1 Kwiatkowksi et al. (2010) 87.9 Kwiatkowski et al. (2011) 88.6 Kwiatkowski et al. (2013) 88.0 Zhao and Huang (2015) 88.9 Liang et al. (2011) 91.1 Dong and Lapata (2016) 84.6 Jia and Liang (2016) 85.0 Jia and Liang (2016) with extra data 89.1 SCANNER 86.7 (Bisk et al., 2016) 24.8 Semi-supervised CCG (Bisk et al., 2016) 28.4 Neural baseline 28.6 Supervised CCG (Bisk et al., 2016) 30.9 Rule-based system (Bisk et al., 2016) 31.4 SCANNER 31.5 Table 6 : SPADES results.", |
| "cite_spans": [ |
| { |
| "start": 289, |
| "end": 319, |
| "text": "Zettlemoyer and Collins (2005)", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 325, |
| "end": 355, |
| "text": "Zettlemoyer and Collins (2007)", |
| "ref_id": "BIBREF55" |
| }, |
| { |
| "start": 361, |
| "end": 386, |
| "text": "Kwiatkowksi et al. (2010)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 392, |
| "end": 417, |
| "text": "Kwiatkowski et al. (2011)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 423, |
| "end": 448, |
| "text": "Kwiatkowski et al. (2013)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 454, |
| "end": 475, |
| "text": "Zhao and Huang (2015)", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 481, |
| "end": 500, |
| "text": "Liang et al. (2011)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 506, |
| "end": 528, |
| "text": "Dong and Lapata (2016)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 534, |
| "end": 554, |
| "text": "Jia and Liang (2016)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 560, |
| "end": 580, |
| "text": "Jia and Liang (2016)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 615, |
| "end": 634, |
| "text": "(Bisk et al., 2016)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 660, |
| "end": 679, |
| "text": "(Bisk et al., 2016)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 721, |
| "end": 740, |
| "text": "(Bisk et al., 2016)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 764, |
| "end": 783, |
| "text": "(Bisk et al., 2016)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 42, |
| "end": 49, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 802, |
| "end": 809, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "not produce meaning representations whereas Berant and Liang (2015) propose a sophisticated agenda-based parser which is trained borrowing ideas from imitation learning. SCANNER is conceptually similar to who also learn a semantic parser via intermediate representations which they generate based on the output of a dependency parser. SCANNER performs competitively despite not having access to any linguistically-informed syntactic structures. The second block in Table 3 reports the results of several neural systems. Xu et al. (2016) represent the state of the art on WEBQUESTIONS. Their system uses Wikipedia to prune out erroneous candidate answers extracted from Freebase. Our model would also benefit from a similar post-processing step. As in previous experiments, SCANNER outperforms the neural baseline, too. Finally, Table 4 presents our results on GRAPHQUESTIONS. We report F1 for SCANNER, the neural baseline model, and three symbolic systems presented in Su et al. (2016) . SCANNER achieves a new state of the art on this dataset with a gain of 4.23 F1 points over the best previously reported model.", |
| "cite_spans": [ |
| { |
| "start": 520, |
| "end": 536, |
| "text": "Xu et al. (2016)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 969, |
| "end": 985, |
| "text": "Su et al. (2016)", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 465, |
| "end": 472, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 828, |
| "end": 835, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Since a central feature of our parser is that it learns intermediate representations with natural language predicates, we conducted additional experiments in order to inspect their quality. For GEOQUERY Table 7 . The first row shows the percentage of exact matches between the predicted representations and the human annotations. The second row refers to the percentage of structure matches, where the predicted representations have the same structure as the human annotations, but may not use the same lexical terms. Among structurally correct predictions, we additionally compute how many tokens are correct, as shown in the third row. As can be seen, the induced meaning representations overlap to a large extent with the human gold standard. We also evaluated the intermediate representations created by SCANNER on the other three (Freebase) datasets.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 203, |
| "end": 210, |
| "text": "Table 7", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis of Intermediate Representations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Since creating a manual gold standard for these large datasets is time-consuming, we compared the induced representations against the output of a syntactic parser. Specifically, we converted the questions to event-argument structures with EASY-CCG (Lewis and Steedman, 2014) , a high coverage and high accuracy CCG parser. EASYCCG extracts predicate-argument structures with a labeled F-score of 83.37%. For further comparison, we built a simple baseline which identifies predicates based on the output of the Stanford POStagger following the ordering VBD VBN VB VBP VBZ MD.", |
| "cite_spans": [ |
| { |
| "start": 248, |
| "end": 274, |
| "text": "(Lewis and Steedman, 2014)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Intermediate Representations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "As shown in Table 8 , on SPADES and WE-BQUESTIONS, the predicates learned by our model match the output of EASYCCG more closely than the heuristic baseline. But for GRAPHQUESTIONS which contains more compositional questions, the mismatch is higher. However, since the key idea of our model is to capture salient meaning for the task at hand rather than strictly obey syntax, we would not expect the Table 8 : Evaluation of predicates induced by SCANNER against EASYCCG. We report F1(%) across datasets. For SPADES, we also provide a breakdown for various utterance types.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 8", |
| "ref_id": null |
| }, |
| { |
| "start": 399, |
| "end": 406, |
| "text": "Table 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis of Intermediate Representations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "predicates induced by our system to entirely agree with those produced by the syntactic parser. To further analyze how the learned predicates differ from syntax-based ones, we grouped utterances in SPADES into four types of linguistic constructions: coordination (conj), control and raising (control), prepositional phrase attachment (pp), and subordinate clauses (subord). Table 8 also shows the breakdown of matching scores per linguistic construction, with the number of utterances in each type. In Table 9 , we provide examples of predicates identified by SCANNER, indicating whether they agree or not with the output of EASYCCG. As a reminder, the task in SPADES is to predict the entity masked by a blank symbol ( ). As can be seen in Table 8 , the matching score is relatively high for utterances involving coordination and prepositional phrase attachments.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 374, |
| "end": 381, |
| "text": "Table 8", |
| "ref_id": null |
| }, |
| { |
| "start": 502, |
| "end": 509, |
| "text": "Table 9", |
| "ref_id": "TABREF9" |
| }, |
| { |
| "start": 741, |
| "end": 748, |
| "text": "Table 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis of Intermediate Representations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The model will often identify informative predicates (e.g., nouns) which do not necessarily agree with linguistic intuition. For example, in the utterance wilhelm maybach and his son started maybach in 1909 (see Table 9 ), SCANNER identifies the predicateargument structure son(wilhelm maybach) rather than started(wilhelm maybach). We also observed that the model struggles with control and subordinate constructions. It has difficulty distinguishing control from raising predicates as exemplified in the utterance ceo john thain agreed to leave from Table 9 , where it identifies the raising predicate agreed. For subordinate clauses, SCANNER tends to take shortcuts identifying as predicates words closest to the blank symbol.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 212, |
| "end": 219, |
| "text": "Table 9", |
| "ref_id": "TABREF9" |
| }, |
| { |
| "start": 552, |
| "end": 559, |
| "text": "Table 9", |
| "ref_id": "TABREF9" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis of Intermediate Representations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We presented a neural semantic parser which converts natural language utterances to grounded meaning representations via intermediate predicate-argument structures.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our model conj the boeing company was founded in 1916 and is headquartered in , illinois . nstar was founded in 1886 and is based in boston , . the is owned and operated by zuffa , llc , headquarted in las vegas , nevada . hugh attended and then shifted to uppingham school in england .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "was incorporated in 1947 and is based in new york city . the ifbb was formed in 1946 by president ben weider and his brother . wilhelm maybach and his son started maybach in 1909 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "was founded in 1996 and is headquartered in chicago . control threatened to kidnap russ . has also been confirmed to play captain haddock . hoffenberg decided to leave .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "is reportedly trying to get impregnated by djimon now . for right now , are inclined to trust obama to do just that .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "agreed to purchase wachovia corp . ceo john thain agreed to leave . so nick decided to create . salva later went on to make the non clown-based horror . eddie dumped debbie to marry when carrie was 2 . pp is the home of the university of tennessee . chu is currently a physics professor at . youtube is based in , near san francisco , california . mathematica is a product of . jobs will retire from . the nab is a strong advocacy group in . this one starred robert reed , known mostly as .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "is positively frightening as detective bud white . subord the is a national testing board that is based in toronto . is a corporation that is wholly owned by the city of edmonton . unborn is a scary movie that stars .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "'s third wife was actress melina mercouri , who died in 1994 . sure , there were who liked the shah .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "founded the , which is now also a designated terrorist group .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "is an online bank that ebay owns . zoya akhtar is a director , who has directed the upcoming movie . imelda staunton , who plays , is genius .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "is the important president that american ever had . plus mitt romney is the worst governor that has had . An assumption our model imposes is that ungrounded and grounded representations are structurally isomorphic. An advantage of this assumption is that tokens in the ungrounded and grounded representations are strictly aligned. This allows the neural network to focus on parsing and lexical mapping, sidestepping the challenging structure mapping problem which would result in a larger search space and higher variance. On the negative side, the structural isomorphism assumption restricts the expressiveness of the model, especially since one of the main benefits of adopting a two-stage parser is the potential of capturing domain-independent semantic information via the intermediate representation. While it would be challenging to handle drastically non-isomorphic structures in the current model, it is possible to perform local structure matching, i.e., when the mapping between natural language and domainspecific predicates is many-to-one or one-to-many.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For instance, Freebase does not contain a relation representing daughter, using instead two relations representing female and child. Previous work (Kwiatkowski et al., 2013) models such cases by introducing collapsing (for many-to-one mapping) and expansion (for one-to-many mapping) operators. Within our current framework, these two types of structural mismatches can be handled with semi-Markov assumptions (Sarawagi and Cohen, 2005; Kong et al., 2016) in the parsing (i.e., predicate selection) and the grounding steps, respectively. Aside from relaxing strict isomorphism, we would also like to perform crossdomain semantic parsing where the first stage of the semantic parser is shared across domains.", |
| "cite_spans": [ |
| { |
| "start": 147, |
| "end": 173, |
| "text": "(Kwiatkowski et al., 2013)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 410, |
| "end": 436, |
| "text": "(Sarawagi and Cohen, 2005;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 437, |
| "end": 455, |
| "text": "Kong et al., 2016)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our code is available at https://github.com/ cheng6076/scanner.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We discuss the merits and limitations of this assumption in Section 5", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The average Freebase surrogate representations obtained with highest denotation match (F1) is 1.4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://developers.google.com/ freebase/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank three anonymous reviewers, members of the Edinburgh ILCC and the IBM Watson, and Abulhair Saparov for feedback. The support of the European Research Council under award number 681760 \"Translating Multiple Modalities into Text\" is gratefully acknowledged.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Neural machine translation by jointly 52 learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ICLR 2015", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly 52 learning to align and translate. In Proceedings of ICLR 2015. San Diego, California.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "More accurate question answering on Freebase", |
| "authors": [ |
| { |
| "first": "Hannah", |
| "middle": [], |
| "last": "Bast", |
| "suffix": "" |
| }, |
| { |
| "first": "Elmar", |
| "middle": [], |
| "last": "Haussmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 24th ACM International on Conference on Information and Knowledge Management", |
| "volume": "", |
| "issue": "", |
| "pages": "1431--1440", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hannah Bast and Elmar Haussmann. 2015. More ac- curate question answering on Freebase. In Proceed- ings of the 24th ACM International on Conference on Information and Knowledge Management. ACM, pages 1431-1440.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Layers of interpretation: On grammar and compositionality", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Emily", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Bender", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Flickinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Woodley", |
| "middle": [], |
| "last": "Oepen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ann", |
| "middle": [], |
| "last": "Packard", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Copestake", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 11th International Conference on Computational Semantics", |
| "volume": "", |
| "issue": "", |
| "pages": "239--249", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily M Bender, Dan Flickinger, Stephan Oepen, Woodley Packard, and Ann Copestake. 2015. Lay- ers of interpretation: On grammar and composition- ality. In Proceedings of the 11th International Con- ference on Computational Semantics. London, UK, pages 239-249.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Semantic parsing on Freebase from question-answer pairs", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Berant", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Chou", |
| "suffix": "" |
| }, |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Frostig", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1533--1544", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan Berant, Andrew Chou, Roy Frostig, and Percy Liang. 2013. Semantic parsing on Freebase from question-answer pairs. In Proceedings of the 2013 Conference on Empirical Methods in Natural Lan- guage Processing. Seattle, Washington, pages 1533- 1544.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Semantic parsing via paraphrasing", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Berant", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1415--1425", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan Berant and Percy Liang. 2014. Semantic parsing via paraphrasing. In Proceedings of the 52nd Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers). Balti- more, Maryland, pages 1415-1425.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Imitation learning of agenda-based semantic parsers", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Berant", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "3", |
| "issue": "", |
| "pages": "545--558", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan Berant and Percy Liang. 2015. Imitation learning of agenda-based semantic parsers. Trans- actions of the Association for Computational Lin- guistics 3:545-558.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Evaluating induced CCG parsers on grounded semantic parsing", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Bisk", |
| "suffix": "" |
| }, |
| { |
| "first": "Siva", |
| "middle": [], |
| "last": "Reddy", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Blitzer", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2022--2027", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Bisk, Siva Reddy, John Blitzer, Julia Hock- enmaier, and Mark Steedman. 2016. Evaluating in- duced CCG parsers on grounded semantic parsing. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing. Austin, Texas, pages 2022-2027.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Question answering with subgraph embeddings", |
| "authors": [ |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "615--620", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antoine Bordes, Sumit Chopra, and Jason Weston. 2014. Question answering with subgraph embed- dings. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP). Doha, Qatar, pages 615-620.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Large-scale semantic parsing via schema matching and lexicon extension", |
| "authors": [ |
| { |
| "first": "Qingqing", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Yates", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "423--433", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qingqing Cai and Alexander Yates. 2013. Large-scale semantic parsing via schema matching and lexicon extension. In Proceedings of the 51st Annual Meet- ing of the Association for Computational Linguis- tics (Volume 1: Long Papers). Sofia, Bulgaria, pages 423-433.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Parsing as language modeling", |
| "authors": [ |
| { |
| "first": "Kook", |
| "middle": [], |
| "last": "Do", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Choe", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2331--2336", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Do Kook Choe and Eugene Charniak. 2016. Parsing as language modeling. In Proceedings of the 2016 Conference on Empirical Methods in Natural Lan- guage Processing. Austin, Texas, pages 2331-2336.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Language to logical form with neural attention", |
| "authors": [ |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li Dong and Mirella Lapata. 2016. Language to logi- cal form with neural attention. In Proceedings of the 54th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers).", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Question answering over Freebase with multicolumn convolutional neural networks", |
| "authors": [ |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Ke", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "260--269", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li Dong, Furu Wei, Ming Zhou, and Ke Xu. 2015. Question answering over Freebase with multi- column convolutional neural networks. In Proceed- ings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th Interna- tional Joint Conference on Natural Language Pro- cessing (Volume 1: Long Papers). Beijing, China, pages 260-269.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Transitionbased dependency parsing with stack long shortterm memory", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Ballesteros", |
| "suffix": "" |
| }, |
| { |
| "first": "Wang", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Austin", |
| "middle": [], |
| "last": "Matthews", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "334--343", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Dyer, Miguel Ballesteros, Wang Ling, Austin Matthews, and Noah A. Smith. 2015. Transition- based dependency parsing with stack long short- term memory. In Proceedings of the 53rd Annual Meeting of the Association for Computational Lin- guistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers). Beijing, China, pages 334-343.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Recurrent neural network grammars", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Adhiguna", |
| "middle": [], |
| "last": "Kuncoro", |
| "suffix": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Ballesteros", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "199--209", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Dyer, Adhiguna Kuncoro, Miguel Ballesteros, and Noah A. Smith. 2016. Recurrent neural net- work grammars. In Proceedings of the 2016 Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies. San Diego, California, pages 199-209.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A discriminative graph-based parser for the abstract meaning representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Flanigan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1426--1436", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Flanigan, Sam Thomson, Jaime Carbonell, Chris Dyer, and Noah A. Smith. 2014. A discrim- inative graph-based parser for the abstract meaning representation. In Proceedings of the 52nd Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers). Baltimore, Mary- land, pages 1426-1436.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "FACC1: Freebase annotation of ClueWeb corpora, version 1 (release date 2013-06-26", |
| "authors": [ |
| { |
| "first": "Evgeniy", |
| "middle": [], |
| "last": "Gabrilovich", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Ringgaard", |
| "suffix": "" |
| }, |
| { |
| "first": "Amarnag", |
| "middle": [], |
| "last": "Subramanya", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Evgeniy Gabrilovich, Michael Ringgaard, and Amar- nag Subramanya. 2013. FACC1: Freebase anno- tation of ClueWeb corpora, version 1 (release date 2013-06-26, format version 1, correction level 0) .", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Open-Vocabulary Semantic Parsing with both Distributional Statistics and Formal Knowledge", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Jayant", |
| "middle": [], |
| "last": "Krishnamurthy", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 31st AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "3195--3201", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Gardner and Jayant Krishnamurthy. 2017. Open- Vocabulary Semantic Parsing with both Distribu- tional Statistics and Formal Knowledge. In Pro- ceedings of the 31st AAAI Conference on Artificial Intelligence. San Francisco, California, pages 3195- 3201.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Graph parsing with s-graph grammars", |
| "authors": [ |
| { |
| "first": "Jonas", |
| "middle": [], |
| "last": "Groschwitz", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Koller", |
| "suffix": "" |
| }, |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "Teichmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonas Groschwitz, Alexander Koller, and Christoph Te- ichmann. 2015. Graph parsing with s-graph gram- mars. In Proceedings of the 53rd Annual Meet- ing of the Association for Computational Linguistics and the 7th International Joint Conference on Natu- ral Language Processing (Volume 1: Long Papers).", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Performance of graph query languages: comparison of cypher, gremlin and native access in Neo4j", |
| "authors": [ |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Holzschuher", |
| "suffix": "" |
| }, |
| { |
| "first": "Ren\u00e9", |
| "middle": [], |
| "last": "Peinl", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Joint EDBT/ICDT 2013 Workshops", |
| "volume": "", |
| "issue": "", |
| "pages": "195--204", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Florian Holzschuher and Ren\u00e9 Peinl. 2013. Perfor- mance of graph query languages: comparison of cypher, gremlin and native access in Neo4j. In Pro- ceedings of the Joint EDBT/ICDT 2013 Workshops. ACM, pages 195-204.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Data recombination for neural semantic parsing", |
| "authors": [ |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robin Jia and Percy Liang. 2016. Data recombination for neural semantic parsing. In Proceedings of the 54th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers).", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Learning to Transform Natural to Formal Languages", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Rohit", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuk", |
| "middle": [ |
| "Wah" |
| ], |
| "last": "Kate", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings for the 20th National Conference on Artificial Intelligence. Pittsburgh", |
| "volume": "", |
| "issue": "", |
| "pages": "1062--1068", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rohit J. Kate, Yuk Wah Wong, and Raymond J. Mooney. 2005. Learning to Transform Natural to Formal Languages. In Proceedings for the 20th Na- tional Conference on Artificial Intelligence. Pitts- burgh, Pennsylvania, pages 1062-1068.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Segmental recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Lingpeng", |
| "middle": [], |
| "last": "Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ICLR 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lingpeng Kong, Chris Dyer, and Noah A Smith. 2016. Segmental recurrent neural networks. In Proceed- ings of ICLR 2016. San Juan, Puerto Rico.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Semantic parsing with semi-supervised sequential autoencoders", |
| "authors": [ |
| { |
| "first": "Karl Moritz", |
| "middle": [], |
| "last": "Hermann", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1078--1087", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karl Moritz Hermann. 2016. Semantic parsing with semi-supervised sequential autoencoders. In Pro- ceedings of the 2016 Conference on Empirical Meth- ods in Natural Language Processing. Austin, Texas, pages 1078-1087.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Weakly supervised training of semantic parsers", |
| "authors": [ |
| { |
| "first": "Jayant", |
| "middle": [], |
| "last": "Krishnamurthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning. Jeju Island", |
| "volume": "", |
| "issue": "", |
| "pages": "754--765", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jayant Krishnamurthy and Tom Mitchell. 2012. Weakly supervised training of semantic parsers. In Proceedings of the 2012 Joint Conference on Empir- ical Methods in Natural Language Processing and Computational Natural Language Learning. Jeju Is- land, Korea, pages 754-765.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Learning a Compositional Semantics for Freebase with an Open Predicate Vocabulary", |
| "authors": [ |
| { |
| "first": "Jayant", |
| "middle": [], |
| "last": "Krishnamurthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "3", |
| "issue": "", |
| "pages": "257--270", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jayant Krishnamurthy and Tom M. Mitchell. 2015. Learning a Compositional Semantics for Freebase with an Open Predicate Vocabulary. Transactions of the Association for Computational Linguistics 3:257-270.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Inducing probabilistic CCG grammars from logical form with higherorder unification", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowksi", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1223--1233", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kwiatkowksi, Luke Zettlemoyer, Sharon Goldwa- ter, and Mark Steedman. 2010. Inducing probabilis- tic CCG grammars from logical form with higher- order unification. In Proceedings of the 2010 Con- ference on Empirical Methods in Natural Language Processing. Cambridge, MA, pages 1223-1233.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Scaling Semantic Parsers with On-the-Fly Ontology Matching", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Eunsol", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of Empirical Methods on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1545--1556", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kwiatkowski, Eunsol Choi, Yoav Artzi, and Luke Zettlemoyer. 2013. Scaling Semantic Parsers with On-the-Fly Ontology Matching. In Proceedings of Empirical Methods on Natural Language Process- ing. pages 1545-1556.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Lexical generalization in CCG grammar induction for semantic parsing", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1512--1523", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kwiatkowski, Luke Zettlemoyer, Sharon Goldwa- ter, and Mark Steedman. 2011. Lexical generaliza- tion in CCG grammar induction for semantic pars- ing. In Proceedings of the 2011 Conference on Em- pirical Methods in Natural Language Processing. Edinburgh, Scotland, pages 1512-1523.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Rationalizing neural predictions", |
| "authors": [ |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Lei", |
| "suffix": "" |
| }, |
| { |
| "first": "Regina", |
| "middle": [], |
| "last": "Barzilay", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jaakkola", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "107--117", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tao Lei, Regina Barzilay, and Tommi Jaakkola. 2016. Rationalizing neural predictions. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing. Austin, Texas, pages 107- 117.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "A* CCG parsing with a supertag-factored model", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "990--1000", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Lewis and Mark Steedman. 2014. A* CCG pars- ing with a supertag-factored model. In Proceed- ings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP). Doha, Qatar, pages 990-1000.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Lambda dependency-based compositional semantics", |
| "authors": [ |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1309.4408" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Percy Liang. 2013. Lambda dependency-based compositional semantics. arXiv preprint arXiv:1309.4408 .", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Learning dependency-based compositional semantics", |
| "authors": [ |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Jordan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "590--599", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Percy Liang, Michael Jordan, and Dan Klein. 2011. Learning dependency-based compositional seman- tics. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies. Portland, Oregon, pages 590-599.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "The stanford corenlp natural language processing toolkit", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jenny", |
| "middle": [], |
| "last": "Finkel", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mcclosky", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of 52nd Annual Meeting of the Association for Computational Linguistics: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "55--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher Manning, Mihai Surdeanu, John Bauer, Jenny Finkel, Steven Bethard, and David McClosky. 2014. The stanford corenlp natural language pro- cessing toolkit. In Proceedings of 52nd Annual Meeting of the Association for Computational Lin- guistics: System Demonstrations. Baltimore, Mary- land, pages 55-60.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Learning a natural language interface with neural programmer", |
| "authors": [ |
| { |
| "first": "Arvind", |
| "middle": [], |
| "last": "Neelakantan", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Abadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ICLR 2017", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arvind Neelakantan, Quoc V Le, Martin Abadi, An- drew McCallum, and Dario Amodei. 2017. Learn- ing a natural language interface with neural pro- grammer. In Proceedings of ICLR 2017. Toulon, France.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Compositional semantic parsing on semi-structured tables", |
| "authors": [ |
| { |
| "first": "Panupong", |
| "middle": [], |
| "last": "Pasupat", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1470--1480", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Panupong Pasupat and Percy Liang. 2015. Compo- sitional semantic parsing on semi-structured tables. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Lan- guage Processing (Volume 1: Long Papers). Beijing, China, pages 1470-1480.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representation. In Proceedings of the 2014 Con- ference on Empirical Methods in Natural Language Processing (EMNLP). Doha, Qatar, pages 1532- 1543.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Large-scale semantic parsing without questionanswer pairs", |
| "authors": [ |
| { |
| "first": "Siva", |
| "middle": [], |
| "last": "Reddy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "377--392", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siva Reddy, Mirella Lapata, and Mark Steedman. 2014. Large-scale semantic parsing without question- answer pairs. Transactions of the Association for Computational Linguistics 2:377-392.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Transforming dependency structures to logical forms for semantic parsing", |
| "authors": [ |
| { |
| "first": "Siva", |
| "middle": [], |
| "last": "Reddy", |
| "suffix": "" |
| }, |
| { |
| "first": "Oscar", |
| "middle": [], |
| "last": "T\u00e4ckstr\u00f6m", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "4", |
| "issue": "", |
| "pages": "127--140", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siva Reddy, Oscar T\u00e4ckstr\u00f6m, Michael Collins, Tom Kwiatkowski, Dipanjan Das, Mark Steedman, and Mirella Lapata. 2016. Transforming dependency structures to logical forms for semantic parsing. Transactions of the Association for Computational Linguistics 4:127-140.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Semimarkov conditional random fields for information extraction", |
| "authors": [ |
| { |
| "first": "Sunita", |
| "middle": [], |
| "last": "Sarawagi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "William W Cohen", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "17", |
| "issue": "", |
| "pages": "1185--1192", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sunita Sarawagi and William W Cohen. 2005. Semi- markov conditional random fields for information extraction. In Advances in Neural Information Pro- cessing Systems 17, MIT Press, pages 1185-1192.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "On generating characteristic-rich question sets for qa evaluation", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Huan", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Sadler", |
| "suffix": "" |
| }, |
| { |
| "first": "Mudhakar", |
| "middle": [], |
| "last": "Srivatsa", |
| "suffix": "" |
| }, |
| { |
| "first": "Izzeddin", |
| "middle": [], |
| "last": "Gur", |
| "suffix": "" |
| }, |
| { |
| "first": "Zenghui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Xifeng", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "562--572", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yu Su, Huan Sun, Brian Sadler, Mudhakar Srivatsa, Izzeddin Gur, Zenghui Yan, and Xifeng Yan. 2016. On generating characteristic-rich question sets for qa evaluation. In Proceedings of the 2016 Confer- ence on Empirical Methods in Natural Language Processing. Austin, Texas, pages 562-572.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "27", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural net- works. In Advances in Neural Information Process- ing Systems 27, MIT Press, pages 3104-3112.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Simple and effective question answering with recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Ferhan", |
| "middle": [], |
| "last": "Ture", |
| "suffix": "" |
| }, |
| { |
| "first": "Oliver", |
| "middle": [], |
| "last": "Jojic", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1606.05029" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ferhan Ture and Oliver Jojic. 2016. Simple and ef- fective question answering with recurrent neural net- works. arXiv preprint arXiv:1606.05029 .", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Grammar as a foreign language", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Terry", |
| "middle": [], |
| "last": "Koo", |
| "suffix": "" |
| }, |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in Neural Information Processing Systems 28", |
| "volume": "", |
| "issue": "", |
| "pages": "2773--2781", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, \u0141ukasz Kaiser, Terry Koo, Slav Petrov, Ilya Sutskever, and Geoffrey Hinton. 2015. Gram- mar as a foreign language. In Advances in Neu- ral Information Processing Systems 28. MIT Press, pages 2773-2781.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Learning for semantic parsing with statistical machine translation", |
| "authors": [ |
| { |
| "first": "Yuk", |
| "middle": [ |
| "Wah" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Human Language Technology Conference of the NAACL, Main Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "439--446", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuk Wah Wong and Raymond Mooney. 2006. Learn- ing for semantic parsing with statistical machine translation. In Proceedings of the Human Language Technology Conference of the NAACL, Main Con- ference. New York City, USA, pages 439-446.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Question answering on Freebase via relation extraction and textual evidence", |
| "authors": [ |
| { |
| "first": "Kun", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Siva", |
| "middle": [], |
| "last": "Reddy", |
| "suffix": "" |
| }, |
| { |
| "first": "Yansong", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Songfang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongyan", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2326--2336", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kun Xu, Siva Reddy, Yansong Feng, Songfang Huang, and Dongyan Zhao. 2016. Question answering on Freebase via relation extraction and textual evi- dence. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers). Berlin, Germany, pages 2326- 2336.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Information extraction over structured data: Question answering with Freebase", |
| "authors": [ |
| { |
| "first": "Xuchen", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "956--966", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xuchen Yao and Benjamin Van Durme. 2014. Infor- mation extraction over structured data: Question an- swering with Freebase. In Proceedings of the 52nd Annual Meeting of the Association for Computa- tional Linguistics (Volume 1: Long Papers). Balti- more, Maryland, pages 956-966.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Semantic parsing via staged query graph generation: Question answering with knowledge base", |
| "authors": [ |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Wen-Tau Yih", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1321--1331", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wen-tau Yih, Ming-Wei Chang, Xiaodong He, and Jianfeng Gao. 2015. Semantic parsing via staged query graph generation: Question answering with knowledge base. In Proceedings of the 53rd Annual Meeting of the Association for Computational Lin- guistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers). Beijing, China, pages 1321-1331.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "The value of semantic parse labeling for knowledge base question answering", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Wen-Tau Yih", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Richardson", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Meek", |
| "suffix": "" |
| }, |
| { |
| "first": "Jina", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Suh", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "201--206", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wen-tau Yih, Matthew Richardson, Chris Meek, Ming- Wei Chang, and Jina Suh. 2016. The value of se- mantic parse labeling for knowledge base question answering. In Proceedings of the 54th Annual Meet- ing of the Association for Computational Linguistics (Volume 2: Short Papers). Berlin, Germany, pages 201-206.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Learning to Parse Database Queries Using Inductive Logic Programming", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Zelle", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of the 13th National Conference on Artificial Intelligence. Portland, Oregon", |
| "volume": "", |
| "issue": "", |
| "pages": "1050--1055", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John M. Zelle and Raymond J. Mooney. 1996. Learn- ing to Parse Database Queries Using Inductive Logic Programming. In Proceedings of the 13th National Conference on Artificial Intelligence. Portland, Ore- gon, pages 1050-1055.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "Online learning of relaxed CCG grammars for parsing to logical form", |
| "authors": [ |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "678--687", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luke Zettlemoyer and Michael Collins. 2007. Online learning of relaxed CCG grammars for parsing to logical form. In Proceedings of the 2007 Joint Con- ference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL). Prague, Czech Repub- lic, pages 678-687.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "Learning to Map Sentences to Logical Form: Structured Classification with Probabilistic Categorial Grammars", |
| "authors": [ |
| { |
| "first": "Luke", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of 21st Conference in Uncertainilty in Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "658--666", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luke S. Zettlemoyer and Michael Collins. 2005. Learning to Map Sentences to Logical Form: Struc- tured Classification with Probabilistic Categorial Grammars. In Proceedings of 21st Conference in Uncertainilty in Artificial Intelligence. Edinburgh, Scotland, pages 658-666.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Type-driven incremental semantic parsing with polymorphism", |
| "authors": [ |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1416--1421", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Zhao and Liang Huang. 2015. Type-driven in- cremental semantic parsing with polymorphism. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies. Denver, Colorado, pages 1416-1421.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "List of domain-general predicates.", |
| "content": "<table/>" |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "Terminal symbols in buffer: texas Stack Action NT choice TER choice all answer ( exclude ( states ( all RED answer ( exclude ( states ( all ) NT border answer ( exclude ( states ( all ) , border ( TER texas answer ( exclude ( states ( all ) , border ( texas RED answer ( exclude ( states ( all ) , border ( texas ) RED answer ( exclude ( states ( all ) , border ( texas ) ) RED answer ( exclude ( states ( all ) , border ( texas ) ) )", |
| "content": "<table><tr><td/><td>NT</td><td>answer</td></tr><tr><td>answer (</td><td>NT</td><td>exclude</td></tr><tr><td>answer ( exclude (</td><td/><td/></tr></table>" |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "Actions taken by the transition system for generating the ungrounded meaning representation of the example utterance. Symbols in red indicate domain-general predicates.", |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "WEBQUESTIONS results.", |
| "content": "<table><tr><td>Models</td><td>F1</td></tr><tr><td>SEMPRE</td><td/></tr></table>" |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "GEOQUERY results.", |
| "content": "<table><tr><td>Models</td><td>F1</td></tr><tr><td>Unsupervised CCG</td><td/></tr></table>" |
| }, |
| "TABREF7": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "GEOQUERY evaluation of ungrounded meaning representations. We report accuracy against a manually created gold standard.", |
| "content": "<table><tr><td>which contains only 280 test examples, we manu-</td></tr><tr><td>ally annotated intermediate representations for the</td></tr><tr><td>test instances and evaluated the learned represen-</td></tr><tr><td>tations against them. The experimental setup aims</td></tr><tr><td>to shows how humans can participate in improving</td></tr><tr><td>the semantic parser with feedback at the interme-</td></tr><tr><td>diate stage. In terms of evaluation, we use three</td></tr><tr><td>metrics shown in</td></tr></table>" |
| }, |
| "TABREF9": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "Informative predicates identified by SCANNER in various types of utterances. Yellow predicates were identified by both SCANNER and EASYCCG, red predicates by SCANNER alone, and green predicates by EASYCCG alone. essentially jointly learns how to parse natural language semantics and the lexicons that help grounding. Compared to previous neural semantic parsers, our model is more interpretable as the intermediate structures are useful for inspecting what the model has learned and whether it matches linguistic intuition.", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |