| { |
| "paper_id": "P18-1040", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:42:43.059564Z" |
| }, |
| "title": "Discourse Representation Structure Parsing", |
| "authors": [ |
| { |
| "first": "Jiangming", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh", |
| "location": { |
| "addrLine": "10 Crichton Street", |
| "postCode": "EH8 9AB", |
| "settlement": "Edinburgh" |
| } |
| }, |
| "email": "jiangming.liu@ed.ac.uk" |
| }, |
| { |
| "first": "Shay", |
| "middle": [ |
| "B" |
| ], |
| "last": "Cohen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh", |
| "location": { |
| "addrLine": "10 Crichton Street", |
| "postCode": "EH8 9AB", |
| "settlement": "Edinburgh" |
| } |
| }, |
| "email": "scohen@inf.ed.ac.uk" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh", |
| "location": { |
| "addrLine": "10 Crichton Street", |
| "postCode": "EH8 9AB", |
| "settlement": "Edinburgh" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We introduce an open-domain neural semantic parser which generates formal meaning representations in the style of Discourse Representation Theory (DRT; Kamp and Reyle 1993). We propose a method which transforms Discourse Representation Structures (DRSs) to trees and develop a structure-aware model which decomposes the decoding process into three stages: basic DRS structure prediction, condition prediction (i.e., predicates and relations), and referent prediction (i.e., variables). Experimental results on the Groningen Meaning Bank (GMB) show that our model outperforms competitive baselines by a wide margin.", |
| "pdf_parse": { |
| "paper_id": "P18-1040", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We introduce an open-domain neural semantic parser which generates formal meaning representations in the style of Discourse Representation Theory (DRT; Kamp and Reyle 1993). We propose a method which transforms Discourse Representation Structures (DRSs) to trees and develop a structure-aware model which decomposes the decoding process into three stages: basic DRS structure prediction, condition prediction (i.e., predicates and relations), and referent prediction (i.e., variables). Experimental results on the Groningen Meaning Bank (GMB) show that our model outperforms competitive baselines by a wide margin.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Semantic parsing is the task of mapping natural language to machine interpretable meaning representations. A variety of meaning representations have been adopted over the years ranging from functional query language (FunQL; Kate et al. 2005) to dependency-based compositional semantics (\u03bb-DCS; Liang et al. 2011) , lambda calculus (Zettlemoyer and Collins, 2005) , abstract meaning representations (Banarescu et al., 2013) , and minimal recursion semantics (Copestake et al., 2005) .", |
| "cite_spans": [ |
| { |
| "start": 216, |
| "end": 223, |
| "text": "(FunQL;", |
| "ref_id": null |
| }, |
| { |
| "start": 224, |
| "end": 241, |
| "text": "Kate et al. 2005)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 294, |
| "end": 312, |
| "text": "Liang et al. 2011)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 331, |
| "end": 362, |
| "text": "(Zettlemoyer and Collins, 2005)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 398, |
| "end": 422, |
| "text": "(Banarescu et al., 2013)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 457, |
| "end": 481, |
| "text": "(Copestake et al., 2005)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Existing semantic parsers are for the most part data-driven using annotated examples consisting of utterances and their meaning representations (Zelle and Mooney, 1996; Wong and Mooney, 2006; Zettlemoyer and Collins, 2005) . The successful application of encoder-decoder models (Sutskever et al., 2014; Bahdanau et al., 2015) to a variety of NLP tasks has provided strong impetus to treat semantic parsing as a sequence transduction problem where an utterance is mapped to a target meaning representation in string format (Dong and Lapata, 2016; Jia and Liang, 2016; Ko\u010disk\u1ef3 et al., 2016) . The fact that meaning representations do not naturally conform to a lin-ear ordering has also prompted efforts to develop recurrent neural network architectures tailored to tree or graph-structured decoding (Dong and Lapata, 2016; Cheng et al., 2017; Yin and Neubig, 2017; Alvarez-Melis and Jaakkola, 2017; Rabinovich et al., 2017; Buys and Blunsom, 2017) Most previous work focuses on building semantic parsers for question answering tasks, such as querying a database to retrieve an answer (Zelle and Mooney, 1996; Cheng et al., 2017) , or conversing with a flight booking system (Dahl et al., 1994) . As a result, parsers trained on query-based datasets work on restricted domains (e.g., restaurants, meetings; Wang et al. 2015) , with limited vocabularies, exhibiting limited compositionality, and a small range of syntactic and semantic constructions. In this work, we focus on open-domain semantic parsing and develop a general-purpose system which generates formal meaning representations in the style of Discourse Representation Theory (DRT; Kamp and Reyle 1993) .", |
| "cite_spans": [ |
| { |
| "start": 144, |
| "end": 168, |
| "text": "(Zelle and Mooney, 1996;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 169, |
| "end": 191, |
| "text": "Wong and Mooney, 2006;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 192, |
| "end": 222, |
| "text": "Zettlemoyer and Collins, 2005)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 278, |
| "end": 302, |
| "text": "(Sutskever et al., 2014;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 303, |
| "end": 325, |
| "text": "Bahdanau et al., 2015)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 522, |
| "end": 545, |
| "text": "(Dong and Lapata, 2016;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 546, |
| "end": 566, |
| "text": "Jia and Liang, 2016;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 567, |
| "end": 588, |
| "text": "Ko\u010disk\u1ef3 et al., 2016)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 798, |
| "end": 821, |
| "text": "(Dong and Lapata, 2016;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 822, |
| "end": 841, |
| "text": "Cheng et al., 2017;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 842, |
| "end": 863, |
| "text": "Yin and Neubig, 2017;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 864, |
| "end": 897, |
| "text": "Alvarez-Melis and Jaakkola, 2017;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 898, |
| "end": 922, |
| "text": "Rabinovich et al., 2017;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 923, |
| "end": 946, |
| "text": "Buys and Blunsom, 2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1083, |
| "end": 1107, |
| "text": "(Zelle and Mooney, 1996;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 1108, |
| "end": 1127, |
| "text": "Cheng et al., 2017)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1173, |
| "end": 1192, |
| "text": "(Dahl et al., 1994)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1305, |
| "end": 1322, |
| "text": "Wang et al. 2015)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1641, |
| "end": 1661, |
| "text": "Kamp and Reyle 1993)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "DRT is a popular theory of meaning representation designed to account for a variety of linguistic phenomena, including the interpretation of pronouns and temporal expressions within and across sentences. Advantageously, it supports meaning representations for entire texts rather than isolated sentences which in turn can be translated into firstorder logic. The Groningen Meaning Bank (GMB; ) provides a large collection of English texts annotated with Discourse Representation Structures (see Figure 1 for an example). GMB integrates various levels of semantic annotation (e.g., anaphora, named entities, thematic roles, rhetorical relations) into a unified formalism providing expressive meaning representations for open-domain texts.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 495, |
| "end": 503, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We treat DRT parsing as a structure prediction problem. We develop a method to transform DRSs to tree-based representations which can be further linearized to bracketed string format. We examine a series of encoder-decoder models (Bahdanau et al., 2015) differing in the way tree-", |
| "cite_spans": [ |
| { |
| "start": 230, |
| "end": 253, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "x 1 , e 1 , \u03c0 1 statement(x 1 ), say(e 1 ), Cause(e 1 , x 1 ), Topic(e 1 ,\u03c0 1 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u03c0 1 : k 1 : x 2 thing(x) \u21d2 x 3 , s 1 , x 3 , x 5 , e 2 Topic(s 1 , x 3 ), dead(s 1 ), man(x 3 ), of(x 2 , x 3 ), magazine(x 4 ), on(x 5 ,x 4 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "vest(x 5 ), wear(e 2 ), Agent(e 2 , x 2 ), Theme(e 2 , x 5 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "k 2 : x 6 thing(x 6 ) \u21d2 x 7 , s 2 , x 8 , x 9 , e 3 Topic(s 2 , x 7 ), dead(s 2 ), man(x 7 ), of(x 6 , x 7 ), |x 8 | = 2, hand(x 9 ), in(x 8 , x 9 ), grenade(x 8 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "carry(e 3 ), Agent(e 3 , x 6 ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Theme(e 3 , x 8 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "continuation(k 1 , k 2 ), parallel(k 1 , k 2 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Figure 1: DRT meaning representation for the sentence The statement says each of the dead men wore magazine vests and carried two hand grenades.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "structured logical forms are generated and show that a structure-aware decoder is paramount to open-domain semantic parsing. Our proposed model decomposes the decoding process into three stages. The first stage predicts the structure of the meaning representation omitting details such as predicates or variable names. The second stage fills in missing predicates and relations (e.g., thing, Agent) conditioning on the natural language input and the previously predicted structure. Finally, the third stage predicts variable names based on the input and the information generated so far. Decomposing decoding into these three steps reduces the complexity of generating logical forms since the model does not have to predict deeply nested structures, their variables, and predicates all at once. Moreover, the model is able to take advantage of the GMB annotations more efficiently, e.g., examples with similar structures can be effectively used in the first stage despite being very different in their lexical make-up. Finally, a piecemeal mode of generation yields more accurate predictions; since the output of every decoding step serves as input to the next one, the model is able to refine its predictions taking progressively more global context into account. Experimental results on the GMB show that our three-stage decoder outperforms a vanilla encoder-decoder model and a related variant which takes shallow structure into account, by a wide margin.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our contributions in this work are three-fold: an open-domain semantic parser which yields discourse representation structures; a novel end-toend neural model equipped with a structured decoder which decomposes the parsing process into three stages; a DRS-to-tree conversion method which transforms DRSs to tree-based representations allowing for the application of structured de-coders as well as sequential modeling. We release our code 1 and tree formatted version of the GMB in the hope of driving further research in opendomain semantic parsing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section we provide a brief overview of the representational semantic formalism used in the GMB. We refer the reader to and Kamp and Reyle (1993) for more details.", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 152, |
| "text": "Kamp and Reyle (1993)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Discourse Representation Theory (DRT; Kamp and Reyle 1993) is a general framework for representing the meaning of sentences and discourse which can handle multiple linguistic phenomena including anaphora, presuppositions, and temporal expressions. The basic meaning-carrying units in DRT are Discourse Representation Structures (DRSs), which are recursive formal meaning structures that have a model-theoretic interpretation and can be translated into first-order logic (Kamp and Reyle, 1993) . Basic DRSs consist of discourse referents (e.g., x, y) representing entities in the discourse and discourse conditions (e.g., man(x), magazine(y)) representing information about discourse referents. Following conventions in the DRT literature, we visualize DRSs in a box-like format (see Figure 1 ).", |
| "cite_spans": [ |
| { |
| "start": 470, |
| "end": 492, |
| "text": "(Kamp and Reyle, 1993)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 783, |
| "end": 791, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "GMB adopts a variant of DRT that uses a neo-Davidsonian analysis of events (Kipper et al., 2008) , i.e., events are first-order entities characterized by one-place predicate symbols (e.g., say(e 1 ) in Figure 1 ). In addition, it follows Projective Discourse Representation Theory (PDRT; Venhuizen et al. 2013) an extension of DRT specifically developed to account for the interpretation of presuppositions and related projection phenomena (e.g., conventional implicatures). In PDRT, each basic DRS introduces a label, which can be bound by a pointer indicating the interpretation site of semantic content. To account for the rhetorical structure of texts, GMB adopts Segmented Discourse Representation Theory (SDRT; Asher and Lascarides 2003) . In SDRT, discourse segments are linked with rhetorical relations reflecting different characteristics of textual coherence, such as temporal order and communicative intentions (see continuation(k 1 , k 2 ) in Figure 1 ).", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 96, |
| "text": "(Kipper et al., 2008)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 717, |
| "end": 743, |
| "text": "Asher and Lascarides 2003)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 202, |
| "end": 210, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 955, |
| "end": 963, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "More formally, DRSs are expressions of type exp e (denoting individuals or discourse referents) and exp t (i.e., truth values): exp e ::= re f , exp t ::", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "= drs | sdrs ,", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "discourse referents re f are in turn classified into six categories, namely common referents (x n ), event referents (e n ), state referents (s n ), segment referents (k n ), proposition referents (\u03c0 n ), and time referents (t n ). drs and sdrs denote basic and segmented DRSs, respectively:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "drs ::= pvar : ( pvar , re f ) * ( pvar , condition ) * ,", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "sdrs :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": ":= k 1 : exp t , k 2 : exp t coo(k 1 , k 2 ) | k 1 : exp t k 2 : exp t sub(k 1 , k 2 ) ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Basic DRSs consist of a set of referents ( re f ) and conditions ( condition ), whereas segmented DRSs are recursive structures that combine two exp t by means of coordinating (coo) or subordinating (sub) relations. DRS conditions can be basic or complex:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "condition ::= basic | complex ,", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Basic conditions express properties of discourse referents or relations between them:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "basic ::= sym 1 ( exp e ) | sym 2 ( exp e , exp e ) | exp e = exp e | exp e = num | timex( exp e , sym 0 ) | named( exp e , sym 0 , class).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where sym n denotes n-place predicates, num denotes cardinal numbers, timex expresses temporal information (e.g., timex(x 7 , 2005) denotes the year 2005), and class refers to named entity classes (e.g., location). Complex conditions are unary or binary. Unary conditions have one DRS as argument and represent negation (\u00ac) and modal operators expressing necessity (2) and possibility (3). Condition re f : exp t represents verbs with propositional content (e.g., factive verbs). Binary conditions are conditional statements (\u2192) and questions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "complex", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "::= unary | binary ,", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "unary :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": ":= \u00ac exp t | 2 exp t |3 exp t | re f : exp t binary ::= exp t \u2192 exp t | exp t \u2228 exp t | exp t ? exp t 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The Groningen Meaning Bank Corpus", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Corpus Creation DRSs in GMB were obtained from Boxer (Bos, 2008 (Bos, , 2015 , and then refined using expert linguists and crowdsourcing methods. Boxer constructs DRSs based on a pipeline of tools involving POS-tagging, named entity recognition, and parsing. Specifically, it relies on the syntactic analysis of the C&C parser (Clark and Curran, 2007) , a general-purpose parser using the framework of Combinatory Categorial Grammar (CCG; Steedman 2001) . DRSs are obtained from CCG parses, with semantic composition being guided by the CCG syntactic derivation. Documents in the GMB were collected from a variety of sources including Voice of America (a newspaper published by the US Federal Government), the Open American National Corpus, Aesop's fables, humorous stories and jokes, and country descriptions from the CIA World Factbook. The dataset consists of 10,000 documents each annotated with a DRS. Various statistics on the GMB are shown in Table 1 . recommend sections 20-99 for training, 10-19 for tuning, and 00-09 for testing.", |
| "cite_spans": [ |
| { |
| "start": 53, |
| "end": 63, |
| "text": "(Bos, 2008", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 64, |
| "end": 76, |
| "text": "(Bos, , 2015", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 327, |
| "end": 351, |
| "text": "(Clark and Curran, 2007)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 439, |
| "end": 453, |
| "text": "Steedman 2001)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 950, |
| "end": 957, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discourse Representation Theory", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As mentioned earlier, DRSs in the GMB are displayed in a box-like format which is intuitive and easy to read but not particularly amenable to structure modeling. In this section we discuss how DRSs were post-processed and simplified into a tree-based format, which served as input to our models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "DRS-to-Tree Conversion", |
| "sec_num": null |
| }, |
| { |
| "text": "The GMB provides DRS annotations perdocument. Our initial efforts have focused on sentence-level DRS parsing which is undoubtedly a necessary first step for more global semantic representations. It is relatively, straightforward to obtain sentence-level DRSs from document-level annotations since referents and conditions are indexed to tokens. We match each sentence in a document with the DRS whose content bears the same indices as the tokens occurring in the sentence. This matching process yields 52,268 sentences for training (sections 20-99), 5,172 sentences for development (sections 10-19), (development), and 5,440 sentences for testing (sections 00-09).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "DRS-to-Tree Conversion", |
| "sec_num": null |
| }, |
| { |
| "text": "In order to simplify the representation, we omit referents in the top part of the DRS (e.g., x 1 , e 1 and \u03c0 1 in Figure 1 ) but preserve them in conditions without any information loss. Also we ignore pointers to DRSs since this information is implicitly captured through the typing and co-indexing of referents. Definition (1) is simplified to:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 114, |
| "end": 122, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "DRS-to-Tree Conversion", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "drs ::= DRS( condition * ),", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "DRS-to-Tree Conversion", |
| "sec_num": null |
| }, |
| { |
| "text": "where DRS() denotes a basic DRS. We also modify discourse referents to SDRSs (e.g., k 1 , k 2 in Figure 1 ) which we regard as elements bearing scope over expressions exp t and add a 2-place predicate sym 2 to describe the discourse relation between them. So, definition (3) becomes:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 97, |
| "end": 105, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "DRS-to-Tree Conversion", |
| "sec_num": null |
| }, |
| { |
| "text": "sdrs ::=SDRS(( re f ( exp t )) * (8) ( sym 2 ( re f , re f )) * ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "DRS-to-Tree Conversion", |
| "sec_num": null |
| }, |
| { |
| "text": "where SDRS() denotes a segmented DRS, and re f are segment referents. We treat cardinal numbers num and sym 0 in relation timex as constants. We introduce the binary predicate \"card\" to represent cardinality (e.g., |x 8 | = 2 is card(x 8 , NUM)). We also simplify exp e = exp e to eq( exp e , exp e ) using the binary relation \"eq\" (e.g., x 1 = x 2 becomes eq(x 1 , x 2 )). Moreover, we ignore class in named and transform named( exp e , sym 0 , class) into sym 1 ( exp e ) (e.g., named(x 2 , mongolia, geo) becomes mongolia(x 2 )). Consequently, basic conditions (see definition (5)) are simplified to:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "DRS-to-Tree Conversion", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "basic ::= sym 1 ( exp e )| sym 2 ( exp e , exp e )", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "DRS-to-Tree Conversion", |
| "sec_num": null |
| }, |
| { |
| "text": "Analogously, we treat unary and binary conditions as scoped functions, and definition (6) becomes:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "DRS-to-Tree Conversion", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "unary ::= \u00ac | 2 | 3 | re f ( exp t ) binary ::= \u2192 | \u2228 | ?( exp t , exp t ),", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "DRS-to-Tree Conversion", |
| "sec_num": null |
| }, |
| { |
| "text": "Following the transformations described above, the DRS in Figure 1 is converted into the tree in DRS statement(x 1 ) say(e 1 ) Cause(e 1 ,x 1 ) Topic(e 1 ,\u03c0 1 ) 2, which can be subsequently linearized into a PTB-style bracketed sequence. It is important to note that the conversion does not diminish the complexity of DRSs. The average tree width in the training set is 10.39 and tree depth is 4.64.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 58, |
| "end": 66, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "DRS-to-Tree Conversion", |
| "sec_num": null |
| }, |
| { |
| "text": "\u03c0 1 SDRS k 1 DRS =\u21d2 DRS thing(x 2 ) DRS Topic(s 1 ,x 3 ) . . . Theme(e 2 , x 5 ) k 2 DRS =\u21d2 DRS thing(x 6 ) DRS Topic(s 2 ,x 7 ) . . . Theme(e 3 ,x 8 ) continuation(k 1 , k 2 ) parallel(k 1 , k 2 ) DRS(statement(x1) say(e1) Cause(e1, x1) Topic(e1, \u03c01) \u03c01(SDRS(k1 (DRS (=\u21d2(DRS(thing(x2)) DRS (Topic(s1, x3) dead(s1) man(x3) of(x2, x3) magazine(x4) on(x 5 , x4) vest(x 5 ) wear(e2) Agent(e2, x2) Theme(e2, x 5 ))))) k2(DRS =\u21d2(DRS(thing(x 6 )) DRS(Topic(s2, x7) dead(s2) man(x7) of(x 6 , x7) card(x8,NUM) hand(x9) in(x8, x9) carry(e3) Agent(e3, x6) Theme(e3, x8))))) continuation(k1, k2) parallel(k1, k2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "DRS-to-Tree Conversion", |
| "sec_num": null |
| }, |
| { |
| "text": "We present below three encoder-decoder models which are increasingly aware of the structure of the DRT meaning representations. The models take as input a natural language sentence X represented as w 1 , w 2 ,. . . , w n , and generate a sequence Y = (y 1 , y 2 , ..., y m ), which is a linearized tree (see Figure 2 bottom), where n is the length of the sentence, and m the length of the generated DRS sequence. We aim to estimate p(Y |X), the conditional probability of the semantic parse tree Y given natural language input X:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 308, |
| "end": 316, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Semantic Parsing Models", |
| "sec_num": "4" |
| }, |
| { |
| "text": "p(Y |X) = \u220f j p(y j |Y j\u22121 1 , X n 1 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Parsing Models", |
| "sec_num": "4" |
| }, |
| { |
| "text": "An encoder is used to represent the natural language input X into vector representations. Each token in a sentence is represented by a vector x k which is the concatenation of randomly initialized embeddings e w i , pre-trained word embeddings\u0113 w i , and lemma embeddings e l i :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "x k = tanh([e w i ;\u0113 w i ; e l i ] * W 1 + b 1 ), where W 1 \u2208 R D and D is a shorthand for (d w + d p + d l ) \u00d7 d input (sub-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "scripts w, p, and l denote the dimensions of word embeddings, pre-trained embeddings, and lemma embeddings, respectively); b 1 \u2208 R d input and the symbol ; denotes concatenation. Embeddings e w i and e l i are randomly initialized and tuned during training, while\u0113 w i are fixed. We use a bidirectional recurrent neural network with long short-term memory units (bi-LSTM; Hochreiter and Schmidhuber 1997) to encode natural language sentences:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "[h e 1 : h e n ] = bi-LSTM(x 1 : x n ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where h e i denotes the hidden representation of the encoder, and x i refers to the input representation of the ith token in the sentence. Table 2 summarizes the notation used throughout this paper.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 139, |
| "end": 146, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Encoder", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We employ a sequential decoder (Bahdanau et al., 2015 ) as our baseline model with the architecture shown in Figure 3(a) . Our decoder is a (forward) LSTM, which is conditionally initialized with the hidden state of the encoder, i.e., we set h d 0 = h e n and c d 0 = c e n , where c is a memory cell:", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 53, |
| "text": "(Bahdanau et al., 2015", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 109, |
| "end": 120, |
| "text": "Figure 3(a)", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "h d j = LSTM(e y j\u22121 ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where h d j denotes the hidden representation of y j , e y j are randomly initialized embeddings tuned during training, and y 0 denotes the start of sequence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The decoder uses the contextual representation of the encoder together with the embedding of the previously predicted token to output the next token from the vocabulary V :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "s j = [h ct j ; e y j\u22121 ] * W 2 + b 2 , where W 2 \u2208 R (d enc +d y )\u00d7|V | , b 2 \u2208 R |V | , d enc and d y", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "are the dimensions of the encoder hidden unit and output representation, respectively, and h ct j is obtained using an attention mechanism:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "h ct j = n \u2211 i=1 \u03b2 ji h e i ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where the weight \u03b2 ji is computed by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u03b2 ji = e f (h d j ,h e i ) \u2211 k e f (h d j ,h e k ) ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "and f is the dot-product function. We obtain the probability distribution over the output tokens as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "p j = p(y j |Y j\u22121 1 , X n 1 ) = SOFTMAX(s j ) Symbol Description X; Y", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "sequence of words; outputs w i ; y i the ith word; output X j i ; Y j i word; output sequence from position i to j e w i ; e y i random embedding of word w i ; of output y \u012b e w i fixed pretrained embedding of word w i e l i random embedding for lemma ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "l i d w dimension", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Decoder", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The baseline decoder treats all conditions in a DRS uniformly and has no means of distinguishing between conditions corresponding to tokens in a sentence (e.g., the predicate say(e 1 ) refers to the verb said) and semantic relations (e.g., Cause(e 1 , x 1 )). Our second decoder attempts to take this into account by distinguishing conditions which are local and correspond to words in a sentence from items which are more global and express semantic content (see Figure 3(b) ). Specifically, we model sentence specific conditions using a copying mechanism, and all other conditions G which do not correspond to sentential tokens (e.g., thematic roles, rhetorical relations) with an insertion mechanism.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 464, |
| "end": 475, |
| "text": "Figure 3(b)", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Shallow Structure Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Each token in a sentence is assigned a copying score o ji :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Shallow Structure Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "o ji = h d j W 3 h e i ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Shallow Structure Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where subscript ji denotes the ith token at jth time step, and W 3 \u2208 R d dec \u00d7d enc . All other conditions G are assigned an insertion score:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Shallow Structure Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "s j = [h ct j ; e y j\u22121 ] * W 4 + b 4 ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Shallow Structure Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where W 4 \u2208 R (d enc +d y )\u00d7|G| , b 4 \u2208 R |G| , and h ct j are the same with the baseline decoder. We obtain the probability distribution over output tokens as: ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Shallow Structure Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "p j = p(y j |Y j\u22121 1 , X n 1 ) = SOFTMAX([o j ; s j ])", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Shallow Structure Decoder", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "As explained previously, our structure prediction problem is rather challenging: the length of a bracketed DRS is nearly five times longer than its corresponding sentence. As shown in Figure 1, a bracketed DRS, y 1 , y 2 , . .., y n consists of three parts: internal structure\u0176 =\u0177 1 ,\u0177 2 , ...\u0177 t (e.g., DRS( \u03c0 1 ( SDRS(k 1 (DRS(\u2192(DRS( )DRS( ))) k 2 ( DRS(\u2192( DRS( ) DRS ( ) ) ) ) ) ) )), condi-tions\u0232 =\u0233 1 ,\u0233 2 , ...,\u0233 r (e.g., statement, say, Topic), and referents\u1e8e =\u1e8f 1 ,\u1e8f 2 , ...,\u1e8f v (e.g., x 1 , e 1 , \u03c0 1 ), where t + r * 2 + v = n. 2 Our third decoder (see Figure 3(c) ) first predicts the structural make-up of the DRS, then the conditions, and finally their referents in an end-to-end framework. The probability distribution of structured output Y given natural language input X is rewritten as:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 184, |
| "end": 224, |
| "text": "Figure 1, a bracketed DRS, y 1 , y 2 , .", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 563, |
| "end": 574, |
| "text": "Figure 3(c)", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Deep Structure Decoder", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "p(Y |X) = p(\u0176 ,\u0232 ,\u1e8e |X) = \u220f j p(\u0177 j |\u0176 j\u22121 1 , X) \u00d7 \u220f j p(\u0233 j |\u0232 j\u22121 1 ,\u0176 j 1 , X) \u00d7 \u220f j p(\u1e8f j |\u1e8e j\u22121 1 ,\u0232 j 1 ,\u0176 j 1 , X) (11) where\u0176 j\u22121 1 ,\u0232 j\u22121 1 , and\u1e8e j\u22121 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Structure Decoder", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "denote the tree structure, conditions, and referents predicted so far.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Structure Decoder", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "2 Each condition has one and only one right bracket.\u0176 j 1 denotes the structure predicted before conditions\u0233 j ;\u0176 j 1 and\u0232 j 1 are the structures and conditions predicted before referents\u1e8f j . We next discuss how each decoder is modeled.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Structure Decoder", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "To model basic DRS structure we apply the shallow decoder discussed in Section 4.3 and also shown in Figure 3(c.1) . Tokens in such structures correspond to parent nodes in a tree; in other words, they are all inserted from G, and subsequently predicted tokens are only scored with the insert score, i.e.,\u015d i = s i . The hidden units of the decoder are:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 101, |
| "end": 114, |
| "text": "Figure 3(c.1)", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "h d j = LSTM(e\u0177 j\u22121 ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "And the probabilistic distribution over structure denoting tokens is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "p(y j |Y j\u22121 1 , X) = SOFTMAX(\u015d j )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "Condition Prediction DRS conditions are generated by taking previously predicted structures into account, e.g., when \"DRS(\" or \"SDRS(\" are predicted, their conditions will be generated next. By mapping j to (k, m k ), the sequence of conditions can be rewritten as\u0233 1 , . . . ,\u0233 j , . . . ,\u0233 r = y (1,1) ,\u0233 (1,2) , . . . ,\u0233 (k,m k ) , . . . , where\u0233 (k,m k ) is m k th condition of structure token\u0177 k . The corresponding hidden units\u0125 d k act as conditional input to the decoder. Structure denoting tokens (e.g., \"DRS(\" or \"SDRS(\") are fed into the decoder one by one to generate the corresponding conditions as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "e\u0233 (k,0) =\u0125 d k * W 5 + b 5 ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "where W 5 \u2208 R d dec \u00d7d y and b 5 \u2208 R d y . The hidden unit of the conditions decoder is computed as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "h d j =h d (k,m k ) = LSTM(e\u0233 (k,m k \u22121) ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "Given hidden unith d j , we obtain the copy score\u014d j and insert scores j . The probabilistic distribution over conditions is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "p(\u0233 j |\u0232 j\u22121 1 ,\u0176 j 1 , X) = SOFTMAX([\u014d j ;s j ])", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "Referent Prediction Referents are generated based on the structure and conditions of the DRS. Each condition has at least one referent. Similar to condition prediction, the sequence of referents can be rewritten as\u1e8f 1 , . . . ,\u1e8f j , . . . ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "\u1e8f v = y (1,1) ,\u1e8f (1,2) , . . . ,\u1e8f (k,m k ) , .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": ". . The hidden units of the conditions decoder are fed into the referent decoder", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "e\u1e8f (k,0) =h d k * W 6 + b 6 , where W 6 \u2208 R d dec \u00d7d y , b 6 \u2208 R d y .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "The hidden unit of the referent decoder is computed as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "h d j =\u1e23 d (k,m k ) = LSTM(e\u1e8f (k,m k \u22121) ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "All referents are inserted from G, given hidden unit\u1e23 d j (we only obtain the insert score\u1e61 j ). The probabilistic distribution over predicates is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "p(\u1e8f j |\u1e8e j\u22121 1 ,\u0232 j 1 ,\u0176 j 1 , X) = SOFTMAX(\u1e61 j ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "Note that a single LSTM is adopted for structure, condition and referent prediction. The mathematic symbols are summarized in Table 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 126, |
| "end": 133, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Structure Prediction", |
| "sec_num": null |
| }, |
| { |
| "text": "The models are trained to minimize a crossentropy loss objective with 2 regularization:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "L(\u03b8) = \u2212 \u2211 j log p j + \u03bb 2 ||\u03b8|| 2 ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "where \u03b8 is the set of parameters, and \u03bb is a regularization hyper-parameter (\u03bb = 10 \u22126 ). We used stochastic gradient descent with Adam (Kingma and Ba, 2014) to adjust the learning rate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Settings Our experiments were carried out on the GMB following the tree conversion process discussed in Section 3. We adopted the training, development, and testing partitions recommended in . We compared the three models introduced in Section 4, namely the baseline sequence decoder, the shallow structured decoder and the deep structure decoder. We used the same empirical hyper-parameters for all three models. The dimensions of word and lemma embeddings were 64 and 32, respectively. The dimensions of hidden vectors were 256 for the encoder and 128 for the decoder. The encoder used two hidden layers, whereas the decoder only one. The dropout rate was 0.1. Pre-trained word embeddings (100 dimensions) were generated with Word2Vec trained on the AFP portion of the English Gigaword corpus. 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Evaluation Due to the complex nature of our structured prediction task, we cannot expect model output to exactly match the gold standard. For instance, the numbering of the referents may be different, but nevertheless valid, or the order of the children of a tree node (e.g., \"DRS(india(x 1 ) say(e 1 ))\" and \"DRS(say(e 1 ) india(x 1 ))\" are the same). We thus use F 1 instead of exact match accuracy. Specifically, we report D-match 4 a metric designed to evaluate scoped meaning representations and released as part of the distribution of the Parallel Meaning Bank corpus (Abzianidze et al., 2017) . D-match is based on Smatch 5 , a metric used to evaluate AMR graphs ; it calculates F 1 on discourse representation graphs (DRGs), i.e., triples of nodes, arcs, and their referents, applying multiple restarts to obtain a good referent (node) mapping between graphs. We converted DRSs (predicted and goldstandard) into DRGs following the top-down procedure described in Algorithm 1. 6 ISCONDI-TION returns true if the child is a condition (e.g., india(x 1 )), where three arcs are created, one is connected to a parent node and the other two are connected to arg1 and arg2, respectively (lines 7-12). ISQUANTIFIER returns true if the child is a quantifier (e.g., \u03c0 1 , \u00ac and 2) and three arcs are created; one is connected to the parent node, one to the referent that is created if and only Algorithm 1 DRS to DRG Conversion Input: T, tree-like DRS Output: G, a set of edges 1: n b \u2190 0; n c \u2190 0; G \u2190 \u00d8 2: stack \u2190 []; R \u2190 \u00d8 3: procedure TRAVELDRS(parent) 4:", |
| "cite_spans": [ |
| { |
| "start": 574, |
| "end": 599, |
| "text": "(Abzianidze et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "stack.append(b n b ); n b \u2190 n b + 1 5:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "node p \u2190 stack.top 6:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "for child in parent do 7:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "if ISCONDITION(child) then 8:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "G \u2190 G \u222a {node p child.rel \u2212 \u2212\u2212\u2212\u2212 \u2192 c n c } 9: G \u2190 G \u222a {c n c arg1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2212\u2212\u2192 child.arg1}", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "10:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "G \u2190 G \u222a {c n c arg2 \u2212\u2212\u2192 child.arg2} 11:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "n c \u2190 n c + 1 12:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "ADDREFERENT(node p , child) 13:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "else if ISQUANTIFIER(child) then 14:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "G \u2190 G \u222a {node p child.class \u2212 \u2212\u2212\u2212\u2212\u2212 \u2192 c n c } 15: G \u2190 G \u222a {c n c arg1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2212\u2212\u2192 child.arg1}", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "16:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "G \u2190 G \u222a {c n c arg1 \u2212\u2212\u2192 b n b +1 } 17: n c \u2190 n c + 1 18:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "if ISPROPSEG(child) then 19:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "ADDREFERENT ( if the child is a proposition or segment (e.g., \u03c0 1 and k 1 ), and one is connected to the next DRS or SDRS nodes (lines 13-20). The algorithm will recursively travel all DRS or SDRS nodes (line 21). Furthermore, arcs are introduced to connect DRS or SDRS nodes to the referents that first appear in a condition (lines 26-35).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "When comparing two DRGs, we calculate the F 1 over their arcs. For example consider the two DRGs (a) and (b) shown in Figure 4 . Let {b 0 : b 0 , x 1 : x 2 , x 2 : x 3 , c 0 : c 0 , c 1 : c 2 , c 2 : c 3 } denote the node alignment between them. The number of matching arcs is eight, the number of arcs in the gold DRG is nine, and the number of arcs in the predicted DRG is 12. So recall is 8/9, precision is 8/12, and F 1 is 76.19. 6 Results Table 3 compares our three models on the development set. As can be seen, the shallow structured decoder performs better than the baseline decoder, and the proposed deep structure decoder outperforms both of them. Ablation experiments show that without pre-trained word embeddings or word lemma embeddings, the model generally performs worse. Compared to lemma embeddings, pretrained word embeddings contribute more. Table 4 shows our results on the test set. To assess the degree to which the various decoders contribute to DRS parsing, we report results when predicting the full DRS structure (second block), when ignoring referents (third block), and when ignoring both referents and conditions (fourth block). Overall, we observe that the shallow structure model improves precision over the baseline with a slight loss in recall, while the deep structure model performs best by a large margin. When referents are not taken into account (compare the second and third blocks in Table 4 ), performance improves across the board. When conditions are additionally omitted, we observe further performance gains. This is hardly surprising, since errors propagate from one stage to the next when predicting full DRS structures. Further analysis revealed that the parser performs slightly better on (copy) conditions which correspond to natural language tokens compared to (insert) conditions (e.g., Topic, Agent) which are generated from global semantic content (83.22 vs 80.63 F 1 ). The parser is also better on sentences which do not represent SDRSs (79.12 vs 68.36 F 1 ) which is expected given that they usually correspond to more elaborate structures. We also found that rhetorical relations (linking segments) are predicted fairly accurately, especially if they are frequently attested (e.g., Continuation, Parallel), while the parser has difficulty with relations denoting contrast. Figure 5 shows F 1 performance for the three parsers on sentences of different length. We observe a similar trend for all models: as sentence length increases, model performance decreases. The baseline and shallow models do not perform well on short sentences which despite containing fewer words, can still represent complex meaning which is challenging to capture sequentially. On the other hand, the performance of the deep model is relatively stable. LSTMs in this case function relatively well, as they are faced with the easier task of predicting meaning in different stages (starting with a tree skeleton which is progressively refined). We provide examples of model output in the supplementary material.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 118, |
| "end": 126, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 444, |
| "end": 451, |
| "text": "Table 3", |
| "ref_id": "TABREF7" |
| }, |
| { |
| "start": 861, |
| "end": 868, |
| "text": "Table 4", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 1424, |
| "end": 1431, |
| "text": "Table 4", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 2331, |
| "end": 2339, |
| "text": "Figure 5", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Tree-structured Decoding A few recent approaches develop structured decoders which make use of the syntax of meaning representations. Dong and Lapata (2016) and Alvarez-Melis and Jaakkola (2017) generate trees in a top-down fashion, while in other work (Xiao et al., 2016; Krishnamurthy et al., 2017) the decoder generates from a grammar that guarantees that predicted logical forms are well-typed. In a similar vein, Yin and Neubig (2017) generate abstract syntax trees (ASTs) based on the application of production rules defined by the grammar. Rabinovich et al. (2017) introduce a modular decoder whose various components are dynamically composed according to the generated tree structure. In comparison, our model does not use grammar information explic-itly. We first decode the structure of the DRS, and then fill in details pertaining to its semantic content. Our model is not strictly speaking top-down, we generate partial trees sequentially, and then expand non-terminal nodes, ensuring that when we generate the children of a node, we have already obtained the structure of the entire tree.", |
| "cite_spans": [ |
| { |
| "start": 134, |
| "end": 156, |
| "text": "Dong and Lapata (2016)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 253, |
| "end": 272, |
| "text": "(Xiao et al., 2016;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 273, |
| "end": 300, |
| "text": "Krishnamurthy et al., 2017)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 547, |
| "end": 571, |
| "text": "Rabinovich et al. (2017)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Wide-coverage Semantic Parsing Our model is trained on the GMB , a richly annotated resource in the style of DRT which provides a unique opportunity for bootstrapping wide-coverage semantic parsers. Boxer (Bos, 2008) was a precursor to the GMB, the first semantic parser of this kind, which deterministically maps CCG derivations onto formal meaning representations. Le and Zuidema (2012) were the first to train a semantic parser on an early release of the GMB (2,000 documents; Basile et al. 2012), however, they abandon lambda calculus in favor of a graph based representation. The latter is closely related to AMR, a general-purpose meaning representation language for broad-coverage text. In AMR the meaning of a sentence is represented as a rooted, directed, edge-labeled and leaf-labeled graph. AMRs do not resemble classical meaning representations and do not have a model-theoretic interpretation. However, see Bos (2016) and Artzi et al. (2015) for translations to first-order logic.", |
| "cite_spans": [ |
| { |
| "start": 205, |
| "end": 216, |
| "text": "(Bos, 2008)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 367, |
| "end": 388, |
| "text": "Le and Zuidema (2012)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 920, |
| "end": 930, |
| "text": "Bos (2016)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 935, |
| "end": 954, |
| "text": "Artzi et al. (2015)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We introduced a new end-to-end model for opendomain semantic parsing. Experimental results on the GMB show that our decoder is able to recover discourse representation structures to a good degree (77.54 F 1 ), albeit with some simplifications. In the future, we plan to model document-level representations which are more in line with DRT and the GMB annotations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "8" |
| }, |
| { |
| "text": "https://github.com/EdinburghNLP/EncDecDRSparsing", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The models are trained on a single GPU without batches. 4 https://github.com/RikVN/D-match 5 https://github.com/snowblink14/smatch 6 We refer the interested reader to the supplementary material for more details.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "Acknowledgments We thank the anonymous reviewers for their feedback and Johan Bos for answering several questions relating to the GMB. We gratefully acknowledge the support of the European Research Council (Lapata, Liu; award number 681760) and the EU H2020 project SUMMA (Cohen, Liu; grant agreement 688139).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "acknowledgement", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "The parallel meaning bank: Towards a multilingual corpus of translations annotated with compositional meaning representations", |
| "authors": [ |
| { |
| "first": "Lasha", |
| "middle": [], |
| "last": "Abzianidze", |
| "suffix": "" |
| }, |
| { |
| "first": "Johannes", |
| "middle": [], |
| "last": "Bjerva", |
| "suffix": "" |
| }, |
| { |
| "first": "Kilian", |
| "middle": [], |
| "last": "Evang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hessel", |
| "middle": [], |
| "last": "Haagsma", |
| "suffix": "" |
| }, |
| { |
| "first": "Rik", |
| "middle": [], |
| "last": "Van Noord", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierre", |
| "middle": [], |
| "last": "Ludmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Duc-Duy", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Bos", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter", |
| "volume": "2", |
| "issue": "", |
| "pages": "242--247", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lasha Abzianidze, Johannes Bjerva, Kilian Evang, Hessel Haagsma, Rik van Noord, Pierre Ludmann, Duc-Duy Nguyen, and Johan Bos. 2017. The paral- lel meaning bank: Towards a multilingual corpus of translations annotated with compositional meaning representations. In Proceedings of the 15th Confer- ence of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, pages 242-247, Valencia, Spain.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Tree-structured decoding with doubly-recurrent neural networks", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Alvarez", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Melis", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommi", |
| "middle": [ |
| "S" |
| ], |
| "last": "Jaakkola", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 5th International Conference on Learning Representation (ICLR)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Alvarez-Melis and Tommi S. Jaakkola. 2017. Tree-structured decoding with doubly-recurrent neural networks. In Proceedings of the 5th In- ternational Conference on Learning Representation (ICLR), Toulon, France.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Broad-coverage CCG semantic parsing with AMR", |
| "authors": [ |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1699--1710", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoav Artzi, Kenton Lee, and Luke Zettlemoyer. 2015. Broad-coverage CCG semantic parsing with AMR. In Proceedings of the 2015 Conference on Empiri- cal Methods in Natural Language Processing, pages 1699-1710, Lisbon, Portugal.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Logics of conversation", |
| "authors": [ |
| { |
| "first": "Nicholas", |
| "middle": [], |
| "last": "Asher", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Lascarides", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nicholas Asher and Alex Lascarides. 2003. Logics of conversation. Cambridge University Press.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 4th International Conference on Learning Representations (ICLR)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In Proceedings of the 4th International Conference on Learning Rep- resentations (ICLR), San Diego, California.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Abstract meaning representation for sembanking", |
| "authors": [ |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Banarescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Bonial", |
| "suffix": "" |
| }, |
| { |
| "first": "Shu", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Madalina", |
| "middle": [], |
| "last": "Georgescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kira", |
| "middle": [], |
| "last": "Griffitt", |
| "suffix": "" |
| }, |
| { |
| "first": "Ulf", |
| "middle": [], |
| "last": "Hermjakob", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Schneider", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 7th Linguistic Annotation Workshop and Interoperability with Discourse", |
| "volume": "", |
| "issue": "", |
| "pages": "178--186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laura Banarescu, Claire Bonial, Shu Cai, Madalina Georgescu, Kira Griffitt, Ulf Hermjakob, Kevin Knight, Philipp Koehn, Martha Palmer, and Nathan Schneider. 2013. Abstract meaning representation for sembanking. In Proceedings of the 7th Linguis- tic Annotation Workshop and Interoperability with Discourse, pages 178-186, Sofia, Bulgaria.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Developing a large semantically annotated corpus", |
| "authors": [ |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Bos", |
| "suffix": "" |
| }, |
| { |
| "first": "Kilian", |
| "middle": [], |
| "last": "Evang", |
| "suffix": "" |
| }, |
| { |
| "first": "Noortje", |
| "middle": [], |
| "last": "Venhuizen", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 8th International Conference on Language Resources and Evaluation (LREC'12)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Valerio Basile, Johan Bos, Kilian Evang, and Noortje Venhuizen. 2012. Developing a large semantically annotated corpus. In Proceedings of the 8th Interna- tional Conference on Language Resources and Eval- uation (LREC'12), Istanbul, Turkey.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Wide-coverage semantic analysis with Boxer", |
| "authors": [ |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Bos", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 2008 Conference on Semantics in Text Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "277--286", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johan Bos. 2008. Wide-coverage semantic analysis with Boxer. In Proceedings of the 2008 Conference on Semantics in Text Processing, pages 277-286.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Open-domain semantic parsing with Boxer", |
| "authors": [ |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Bos", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 20th Nordic Conference of Computational Linguistics (NODALIDA 2015)", |
| "volume": "", |
| "issue": "", |
| "pages": "301--304", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johan Bos. 2015. Open-domain semantic parsing with Boxer. In Proceedings of the 20th Nordic Con- ference of Computational Linguistics (NODALIDA 2015), pages 301-304. Link\u00f6ping University Elec- tronic Press, Sweden.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Expressive power of abstract meaning representations", |
| "authors": [ |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Bos", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Computational Linguistics", |
| "volume": "42", |
| "issue": "3", |
| "pages": "527--535", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johan Bos. 2016. Expressive power of abstract mean- ing representations. Computational Linguistics, 42(3):527-535.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "The groningen meaning bank", |
| "authors": [ |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Bos", |
| "suffix": "" |
| }, |
| { |
| "first": "Valerio", |
| "middle": [], |
| "last": "Basile", |
| "suffix": "" |
| }, |
| { |
| "first": "Kilian", |
| "middle": [], |
| "last": "Evang", |
| "suffix": "" |
| }, |
| { |
| "first": "Noortje", |
| "middle": [], |
| "last": "Venhuizen", |
| "suffix": "" |
| }, |
| { |
| "first": "Johannes", |
| "middle": [], |
| "last": "Bjerva", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Handbook of Linguistic Annotation", |
| "volume": "2", |
| "issue": "", |
| "pages": "463--496", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johan Bos, Valerio Basile, Kilian Evang, Noortje Ven- huizen, and Johannes Bjerva. 2017. The gronin- gen meaning bank. In Nancy Ide and James Puste- jovsky, editors, Handbook of Linguistic Annotation, volume 2, pages 463-496. Springer.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Robust incremental neural semantic graph parsing", |
| "authors": [ |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Buys", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1215--1226", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jan Buys and Phil Blunsom. 2017. Robust incremen- tal neural semantic graph parsing. In Proceedings of the 55th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers), pages 1215-1226, Vancouver, Canada.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Smatch: an evaluation metric for semantic feature structures", |
| "authors": [ |
| { |
| "first": "Shu", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "748--752", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shu Cai and Kevin Knight. 2013. Smatch: an evalua- tion metric for semantic feature structures. In Pro- ceedings of the 51st Annual Meeting of the Associa- tion for Computational Linguistics (Volume 2: Short Papers), pages 748-752, Sofia, Bulgaria.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Learning structured natural language representations for semantic parsing", |
| "authors": [ |
| { |
| "first": "Jianpeng", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Siva", |
| "middle": [], |
| "last": "Reddy", |
| "suffix": "" |
| }, |
| { |
| "first": "Vijay", |
| "middle": [], |
| "last": "Saraswat", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "44--55", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jianpeng Cheng, Siva Reddy, Vijay Saraswat, and Mirella Lapata. 2017. Learning structured natural language representations for semantic parsing. In Proceedings of the 55th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 44-55, Vancouver, Canada.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Widecoverage efficient statistical parsing with CCG and log-linear models", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Curran", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Computational Linguistics", |
| "volume": "33", |
| "issue": "4", |
| "pages": "493--552", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Clark and James Curran. 2007. Wide- coverage efficient statistical parsing with CCG and log-linear models. Computational Linguistics, 33(4):493-552.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Minimal recursion semantics: An introduction", |
| "authors": [ |
| { |
| "first": "Ann", |
| "middle": [], |
| "last": "Copestake", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Flickinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Carl", |
| "middle": [], |
| "last": "Pollar", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [ |
| "A" |
| ], |
| "last": "Sag", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Research on Language and Computation", |
| "volume": "2", |
| "issue": "", |
| "pages": "281--332", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ann Copestake, Dan Flickinger, Carl Pollar, and Ivan A. Sag. 2005. Minimal recursion semantics: An introduction. Research on Language and Com- putation, 2-3(3):281-332.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Expanding the scope of the atis task: the atis-3 corpus", |
| "authors": [ |
| { |
| "first": "Deborah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Dahl", |
| "suffix": "" |
| }, |
| { |
| "first": "Madeleine", |
| "middle": [], |
| "last": "Bates", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Fisher", |
| "suffix": "" |
| }, |
| { |
| "first": "Kate", |
| "middle": [], |
| "last": "Hunicke-Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Christine Pao David", |
| "middle": [], |
| "last": "Pallett", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Rudnicky", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Shriberg", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Proceedings of the workshop on ARPA Human Language Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "43--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deborah A. Dahl, Madeleine Bates, Michael Brown, William Fisher, Kate Hunicke-Smith, Christine Pao David Pallett, Alexander Rudnicky, and Elizabeth Shriberg. 1994. Expanding the scope of the atis task: the atis-3 corpus. In Proceedings of the workshop on ARPA Human Language Technology, pages 43-48, Plainsboro, New Jersey.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Language to logical form with neural attention", |
| "authors": [ |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "33--43", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li Dong and Mirella Lapata. 2016. Language to logi- cal form with neural attention. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 33-43, Berlin, Germany.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Data recombination for neural semantic parsing", |
| "authors": [ |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "12--22", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robin Jia and Percy Liang. 2016. Data recombination for neural semantic parsing. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 12-22, Berlin, Germany.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "From discourse to logic; an introduction to modeltheoretic semantics of natural language, formal logic and DRT", |
| "authors": [ |
| { |
| "first": "Hans", |
| "middle": [], |
| "last": "Kamp", |
| "suffix": "" |
| }, |
| { |
| "first": "Uwe", |
| "middle": [], |
| "last": "Reyle", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hans Kamp and Uwe Reyle. 1993. From discourse to logic; an introduction to modeltheoretic semantics of natural language, formal logic and DRT.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Learning to transform natural to formal languages", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Rohit", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuk", |
| "middle": [ |
| "Wah" |
| ], |
| "last": "Kate", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 20th National Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "1062--1068", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rohit J. Kate, Yuk Wah Wong, and Raymond J. Mooney. 2005. Learning to transform natural to for- mal languages. In Proceedings of the 20th National Conference on Artificial Intelligence, pages 1062- 1068, Pittsburgh, PA.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 3rd International Conference on Learning Representations (ICLR)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. In Proceed- ings of the 3rd International Conference on Learn- ing Representations (ICLR), Banff, Canada.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "A large-scale classification of english verbs. Language Resources and Evaluation", |
| "authors": [ |
| { |
| "first": "Karin", |
| "middle": [], |
| "last": "Kipper", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| }, |
| { |
| "first": "Neville", |
| "middle": [], |
| "last": "Ryant", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "42", |
| "issue": "", |
| "pages": "21--40", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karin Kipper, Anna Korhonen, Neville Ryant, and Martha Palmer. 2008. A large-scale classification of english verbs. Language Resources and Evaluation, 42(1):21-40.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Semantic parsing with semi-supervised sequential autoencoders", |
| "authors": [ |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Ko\u010disk\u1ef3", |
| "suffix": "" |
| }, |
| { |
| "first": "G\u00e1bor", |
| "middle": [], |
| "last": "Melis", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Wang", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [ |
| "Moritz" |
| ], |
| "last": "Hermann", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1078--1087", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom\u00e1\u0161 Ko\u010disk\u1ef3, G\u00e1bor Melis, Edward Grefenstette, Chris Dyer, Wang Ling, Phil Blunsom, and Karl Moritz Hermann. 2016. Semantic parsing with semi-supervised sequential autoencoders. In Pro- ceedings of the 2016 Conference on Empirical Meth- ods in Natural Language Processing, pages 1078- 1087, Austin, Texas.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Neural semantic parsing with type constraints for semi-structured tables", |
| "authors": [ |
| { |
| "first": "Jayant", |
| "middle": [], |
| "last": "Krishnamurthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Pradeep", |
| "middle": [], |
| "last": "Dasigi", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1516--1526", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jayant Krishnamurthy, Pradeep Dasigi, and Matt Gard- ner. 2017. Neural semantic parsing with type con- straints for semi-structured tables. In Proceedings of the 2017 Conference on Empirical Methods in Natu- ral Language Processing, pages 1516-1526, Copen- hagen, Denmark.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Learning compositional semantics for open domain semantic parsing", |
| "authors": [ |
| { |
| "first": "Phong", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Willem", |
| "middle": [], |
| "last": "Zuidema", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 24th International Conference on Computational Linguistics (COLING)", |
| "volume": "", |
| "issue": "", |
| "pages": "1535--1552", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Phong Le and Willem Zuidema. 2012. Learning com- positional semantics for open domain semantic pars- ing. In Proceedings of the 24th International Con- ference on Computational Linguistics (COLING), pages 1535-1552, Mumbai, India.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Learning dependency-based compositional semantics", |
| "authors": [ |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Jordan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "590--599", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Percy Liang, Michael Jordan, and Dan Klein. 2011. Learning dependency-based compositional seman- tics. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Hu- man Language Technologies, pages 590-599, Port- land, Oregon.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Found in translation: Reconstructing phylogenetic language trees from translations", |
| "authors": [ |
| { |
| "first": "Ella", |
| "middle": [], |
| "last": "Rabinovich", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Ordan", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuly", |
| "middle": [], |
| "last": "Wintner", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "530--540", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ella Rabinovich, Noam Ordan, and Shuly Wintner. 2017. Found in translation: Reconstructing phylo- genetic language trees from translations. In Pro- ceedings of the 55th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), pages 530-540, Vancouver, Canada.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "The Syntactic Process", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Steedman. 2001. The Syntactic Process. The MIT Press.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "27", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural net- works. In Z. Ghahramani, M. Welling, C. Cortes, N. D. Lawrence, and K. Q. Weinberger, editors, Ad- vances in Neural Information Processing Systems 27, pages 3104-3112. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Parsimonious semantic representations with projection pointers", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Noortje", |
| "suffix": "" |
| }, |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Venhuizen", |
| "suffix": "" |
| }, |
| { |
| "first": "Harm", |
| "middle": [], |
| "last": "Bos", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Brouwer", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 10th International Conference on Computational Semantics (IWCS 2013) -Long Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "252--263", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noortje J. Venhuizen, Johan Bos, and Harm Brouwer. 2013. Parsimonious semantic representations with projection pointers. In Proceedings of the 10th In- ternational Conference on Computational Seman- tics (IWCS 2013) -Long Papers, pages 252-263, Potsdam, Germany.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Building a semantic parser overnight", |
| "authors": [ |
| { |
| "first": "Yushi", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Berant", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1332--1342", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yushi Wang, Jonathan Berant, and Percy Liang. 2015. Building a semantic parser overnight. In Proceed- ings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th Interna- tional Joint Conference on Natural Language Pro- cessing (Volume 1: Long Papers), pages 1332-1342, Beijing, China.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Learning for semantic parsing with statistical machine translation", |
| "authors": [ |
| { |
| "first": "Yuk", |
| "middle": [ |
| "Wah" |
| ], |
| "last": "Wong", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the main conference on Human Language Technology Conference of the North American Chapter of the Association of Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "439--446", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuk Wah Wong and Raymond J. Mooney. 2006. Learn- ing for semantic parsing with statistical machine translation. In Proceedings of the main conference on Human Language Technology Conference of the North American Chapter of the Association of Com- putational Linguistics, pages 439-446, New York City, USA.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Sequence-based structured prediction for semantic parsing", |
| "authors": [ |
| { |
| "first": "Chunyang", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Dymetman", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Gardent", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1341--1350", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chunyang Xiao, Marc Dymetman, and Claire Gardent. 2016. Sequence-based structured prediction for se- mantic parsing. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1341- 1350, Berlin, Germany.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "A syntactic neural model for general-purpose code generation", |
| "authors": [ |
| { |
| "first": "Pengcheng", |
| "middle": [], |
| "last": "Yin", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "440--450", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pengcheng Yin and Graham Neubig. 2017. A syntactic neural model for general-purpose code generation. In Proceedings of the 55th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 440-450, Vancouver, Canada.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Learning to parse database queries using inductive logic programming", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "J" |
| ], |
| "last": "Zelle", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of the 13th National Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "1050--1055", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John M. Zelle and Raymond J. Mooney. 1996. Learn- ing to parse database queries using inductive logic programming. In Proceedings of the 13th National Conference on Artificial Intelligence, pages 1050- 1055, Portland, Oregon.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Learning to map sentences to logical form: Structured classification with probabilistic categorial grammars", |
| "authors": [ |
| { |
| "first": "Luke", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "PProceedings of the 21st Conference in Uncertainty in Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "658--666", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luke S. Zettlemoyer and Michael Collins. 2005. Learning to map sentences to logical form: Struc- tured classification with probabilistic categorial grammars. In PProceedings of the 21st Conference in Uncertainty in Artificial Intelligence, pages 658- 666, Edinburgh, Scotland.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Tree-based representation (top) of the DRS inFigure 1and its linearization (bottom)." |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Figure 2, which can be subsequently linearized into a PTB-style bracketed sequence. It is important to note that the conversion does not diminish the complexity of DRSs. The average tree width in the training set is 10.39 and tree depth is 4.64." |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "(a) baseline model; (b) shallow structure model; (c) deep structure model (scoring components are not displayed): (c.1) predicts DRS structure, (c.2) predicts conditions, and (c.3) predicts referents. Blue boxes are encoder hidden units, red boxes are decoder LSTM hidden units, green and yellow boxes represent copy and insertion scores, respectively." |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "(a) is the gold DRS and (b) is the predicted DRS (condition names are not shown)." |
| }, |
| "FIGREF4": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "F 1 score as a function of sentence length." |
| }, |
| "TABREF1": { |
| "num": null, |
| "html": null, |
| "text": "Statistics on the GMB (avg denotes the average number of tokens per sentence).", |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "num": null, |
| "html": null, |
| "text": "Notation used throughout this paper.", |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF7": { |
| "num": null, |
| "html": null, |
| "text": "GMB development set. .21 64.46 57.69 47.20 58.93 52.42 52.89 71.80 60.91 shallow 66.61 63.92 65.24 66.05 62.93 64.45 83.30 62.91 71.68 deep 79.27 75.88 77.54 82.87 79.40 81.10 93.91 88.51 91.13", |
| "content": "<table><tr><td>Model</td><td>P</td><td>DRG R</td><td>F 1</td><td>P</td><td>DRG w/o refs R</td><td>F 1</td><td>DRG w/o refs & conds P R F 1</td></tr><tr><td colspan=\"2\">baseline 52</td><td/><td/><td/><td/><td/><td/></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF8": { |
| "num": null, |
| "html": null, |
| "text": "GMB test set.", |
| "content": "<table><tr><td/><td>100</td><td/><td/><td/></tr><tr><td/><td/><td/><td/><td/><td>deep</td></tr><tr><td/><td/><td/><td/><td/><td>shallow</td></tr><tr><td>F 1 (%)</td><td>80</td><td/><td/><td/><td>baseline</td></tr><tr><td/><td>60</td><td/><td/><td/></tr><tr><td/><td>10</td><td>15</td><td>20</td><td>25</td><td>30</td></tr><tr><td/><td/><td/><td colspan=\"2\">sentence length</td></tr></table>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |