| { |
| "paper_id": "P18-1038", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:41:00.437163Z" |
| }, |
| "title": "Accurate SHRG-Based Semantic Parsing", |
| "authors": [ |
| { |
| "first": "Yufei", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "The MOE Key Laboratory of Computational Linguistics", |
| "institution": "Peking University", |
| "location": {} |
| }, |
| "email": "yufei.chen@pku.edu.cn" |
| }, |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "The MOE Key Laboratory of Computational Linguistics", |
| "institution": "Peking University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "The MOE Key Laboratory of Computational Linguistics", |
| "institution": "Peking University", |
| "location": {} |
| }, |
| "email": "wanxiaojun@pku.edu.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We demonstrate that an SHRG-based parser can produce semantic graphs much more accurately than previously shown, by relating synchronous production rules to the syntacto-semantic composition process. Our parser achieves an accuracy of 90.35 for EDS (89.51 for DMRS) in terms of ELEMENTARY DEPENDENCY MATCH, which is a 4.87 (5.45) point improvement over the best existing data-driven model, indicating, in our view, the importance of linguistically-informed derivation for data-driven semantic parsing. This accuracy is equivalent to that of English Resource Grammar guided models, suggesting that (recurrent) neural network models are able to effectively learn deep linguistic knowledge from annotations.", |
| "pdf_parse": { |
| "paper_id": "P18-1038", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We demonstrate that an SHRG-based parser can produce semantic graphs much more accurately than previously shown, by relating synchronous production rules to the syntacto-semantic composition process. Our parser achieves an accuracy of 90.35 for EDS (89.51 for DMRS) in terms of ELEMENTARY DEPENDENCY MATCH, which is a 4.87 (5.45) point improvement over the best existing data-driven model, indicating, in our view, the importance of linguistically-informed derivation for data-driven semantic parsing. This accuracy is equivalent to that of English Resource Grammar guided models, suggesting that (recurrent) neural network models are able to effectively learn deep linguistic knowledge from annotations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Graph-structured semantic representations, e.g. Semantic Dependency Graphs (SDG; Clark et al., 2002; Ivanova et al., 2012) , Elementary Dependency Structure (EDS; Oepen and L\u00f8nning, 2006) , Abstract Meaning Representation (AMR; Banarescu et al., 2013) , Dependency-based Minimal Recursion Semantics (DMRS; Copestake, 2009) , and Universal Conceptual Cognitive Annotation (UCCA; Abend and Rappoport, 2013) , provide a lightweight yet effective way to encode rich semantic information of natural language sentences (Kuhlmann and Oepen, 2016) . Parsing to semantic graphs has been extensively studied recently.", |
| "cite_spans": [ |
| { |
| "start": 81, |
| "end": 100, |
| "text": "Clark et al., 2002;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 101, |
| "end": 122, |
| "text": "Ivanova et al., 2012)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 163, |
| "end": 187, |
| "text": "Oepen and L\u00f8nning, 2006)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 228, |
| "end": 251, |
| "text": "Banarescu et al., 2013)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 306, |
| "end": 322, |
| "text": "Copestake, 2009)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 378, |
| "end": 404, |
| "text": "Abend and Rappoport, 2013)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 513, |
| "end": 539, |
| "text": "(Kuhlmann and Oepen, 2016)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "At the risk of oversimplifying, work in this area can be divided into three types, according to how much structural information of a target graph is explicitly modeled. Parsers of the first type throw an input sentence into a sequence-to-sequence model and leverage the power of deep learning technologies to obtain auxiliary symbols to transform the output sequence into a graph (Peng et al., 2017b; Konstas et al., 2017) . The strategy of the second type is to gradually generate a graph in a greedy search fashion (Zhang et al., 2016; Buys and Blunsom, 2017) . Usually, a transition system is defined to handle graph construction. The last solution explicitly associates each basic part with a target graph score, and casts parsing as the search for the graphs with highest sum of partial scores (Flanigan et al., 2014; Cao et al., 2017) . Although many parsers achieve encouraging results, they are very hard for linguists to interpret and understand, partially because they do not explicitly model the syntacto-semantic composition process which is a significant characteristic of natural languages.", |
| "cite_spans": [ |
| { |
| "start": 380, |
| "end": 400, |
| "text": "(Peng et al., 2017b;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 401, |
| "end": 422, |
| "text": "Konstas et al., 2017)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 517, |
| "end": 537, |
| "text": "(Zhang et al., 2016;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 538, |
| "end": 561, |
| "text": "Buys and Blunsom, 2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 799, |
| "end": 822, |
| "text": "(Flanigan et al., 2014;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 823, |
| "end": 840, |
| "text": "Cao et al., 2017)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In theory, Synchronous Hyperedge Replacement Grammar (SHRG; Drewes et al., 1997) provides a mathematically sound framework to construct semantic graphs. In practice, however, initial results on the utility of SHRG for semantic parsing were somewhat disappointing (Peng et al., 2015; Peng and Gildea, 2016) . In this paper, we show that the performance that can be achieved by an SHRG-based parser is far higher than what has previously been demonstrated. We focus here on relating SHRG rules to the syntactosemantic composition process because we feel that information about syntax-semantics interface has been underexploited in the data-driven parsing architecture. We demonstrate the feasibility of inducing a high-quality, linguistically-informed SHRG from compositional semantic annotations licensed by English Resource Grammar (ERG; Flickinger, 2000) , dubbed English Resource Semantics 1 (ERS). Coupled with RNN-based pars- Table 1 : Parsing accuracy of the best existing grammar-free and -based models as well as our SHRG-based model. Results are copied from Peng et al., 2017a; Buys and Blunsom, 2017) .", |
| "cite_spans": [ |
| { |
| "start": 60, |
| "end": 80, |
| "text": "Drewes et al., 1997)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 263, |
| "end": 282, |
| "text": "(Peng et al., 2015;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 283, |
| "end": 305, |
| "text": "Peng and Gildea, 2016)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 838, |
| "end": 855, |
| "text": "Flickinger, 2000)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1066, |
| "end": 1085, |
| "text": "Peng et al., 2017a;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1086, |
| "end": 1109, |
| "text": "Buys and Blunsom, 2017)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 930, |
| "end": 937, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "ing techniques, we build a robust SHRG parser that is able to produce semantic analysis for all sentences. Our parser achieves an accuracy of 90.35 for EDS and 89.51 for DMRS in terms of EL-EMENTARY DEPENDENCY MATCH (EDM) which outperforms the best existing grammar-free model (Buys and Blunsom, 2017 ) by a significant margin (see Table 1 ). This marked result affirms the value of modeling the syntacto-semantic composition process for semantic parsing. On sentences that can be parsed by ERG-guided parsers, e.g. PET 2 or ACE 3 , significant accuracy gaps between ERG-guided parsers and data-driven parsers are repeatedly reported (see Table 1 ). The main challenge for ERG-guided parsing is limited coverage. Even for treebanking on WSJ sentences from PTB, such a parser lacks analyses for c.a. 11% of sentences . Our parser yields equivalent accuracy to ERG-guided parsers and equivalent coverage, full-coverage in fact, to data-driven parsers. We see this investigation as striking a balance between data-driven and grammar-driven parsing. It is not our goal to argue against the use of unification grammar in high-performance deep linguistic processing. Nevertheless, we do take it as a reflection of two points: (1) (recurrent) neural network models are able to effectively learn deep linguistic knowledge from annotations; (2) practical parsing may benefit from transforming a model-theoretic grammar into a generative-enumerative grammar.", |
| "cite_spans": [ |
| { |
| "start": 277, |
| "end": 300, |
| "text": "(Buys and Blunsom, 2017", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 332, |
| "end": 339, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 639, |
| "end": 646, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The architecture of our parser has potential uses beyond establishing a strong string-to-graph parser. Our grammar extraction algorithm has some freedom to induce different SHRGs following different linguistic hypothesis, and allows some issues in theoretical linguistics to be empirically investigated. In this paper, we examine the 2 http://pet.opendfki.de/ 3 http://sweaglesw.org/linguistics/ace/ Figure 1 : A partial rewriting process of HRG on the semantic graph associated with \"Some boys want to go.\" Lowercase symbols indicate terminal edges, while bold, uppercase symbols indicate nonterminal edges. Red edges are the hyperedges that will be replaced in the next step, while the blue edges in the next step constitute their corresponding RHS graphs. lexicalist/constructivist hypothesis, a divide across a variety of theoretical frameworks, in an empirical setup. The lexicalist tradition traces its origins to Chomsky (1970) and is widely accepted by various computational grammar formalisms, including CCG, LFG, HPSG and LTAG. A lexicalist approach argues that the lexical properties of words determine their syntactic and semantic behaviors. The constructivist perspective, e.g. Borer's Exo-Skeletal approach (2005b; 2005a; 2013) , emphasizes the role of syntax in constructing meanings. In this paper, we focus on lexicalist and constructivist hypotheses for syntacto-semantic composition. We present our computation-oriented analysis in \u00a76. Under the architecture of our neural parser, a construction grammar works much better than a lexicalized grammar.", |
| "cite_spans": [ |
| { |
| "start": 920, |
| "end": 934, |
| "text": "Chomsky (1970)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1191, |
| "end": 1228, |
| "text": "Borer's Exo-Skeletal approach (2005b;", |
| "ref_id": null |
| }, |
| { |
| "start": 1229, |
| "end": 1235, |
| "text": "2005a;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1236, |
| "end": 1241, |
| "text": "2013)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 400, |
| "end": 408, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our parser is available at https://github. com/draplater/hrg-parser/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Hyperedge replacement grammar (HRG) is a context-free rewriting formalism for graph generation (Drewes et al., 1997 ). An edge-labeled, directed hypergraph is a tuple H = V, E, l, X , where V is a finite set of nodes, and E \u2286 V + is a finite set of hyperedges. A hyperedge is an extension of a normal edge which can connect to more than two nodes or only one node. l : E \u2192 L RULES \u2190 RULES \u222a {(A, ALL-EDGES, INTERNAL-NODES, EXTERNAL-NODES)} 18: end for assigns a label from a finite set L to each edge. X \u2208 V * defines an ordered list of nodes, i.e., external nodes, which specify the connecting parts when replacing a hyperedge.", |
| "cite_spans": [ |
| { |
| "start": 95, |
| "end": 115, |
| "text": "(Drewes et al., 1997", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hyperedge Replacement Grammar", |
| "sec_num": "2" |
| }, |
| { |
| "text": "An HRG G = N, T, P, S is a graph rewriting system, where N and T are two disjoint finite sets of nonterminal and terminal symbols respectively. S \u2208 N is the start symbol. P is a finite set of productions of the form A \u2192 R, where the left hand side (LHS) A \u2208 N , and the right hand side (RHS) R is a hypergraph with edge labels over N \u222a T . The rewriting process replaces a nonterminal hyperedge with the graph fragment specified by a production's RHS, attaching each external node to the matched node of the corresponding LHS. An example is shown in Figure 1 . Following Chiang et al. 2013, we make the nodes only describe connections between edges and store no other information.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 550, |
| "end": 558, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hyperedge Replacement Grammar", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A synchronous grammar defines mappings between different grammars. Here we focus on relating a string grammar, CFG in our case, to a graph grammar, i.e., HRG. SHRG can be represented as tuple G = N, T, T , P, S . N is a finite set of nonterminal symbols in both CFG and HRG. T and T are finite sets of terminal symbols in CFG and HRG, respectively. S \u2208 N is the start symbol. P is a finite set of productions of the form A \u2192 R, R , \u223c , where A \u2208 N , R is a hypergraph fragment with edge labels over N \u222a T , and R is a symbol sequence over N \u222a T . \u223c is a mapping between the nonterminals in R and R . When a coherent CFG derivation is ready, we can interpret it using the corresponding HRG and get a semantic graph.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hyperedge Replacement Grammar", |
| "sec_num": "2" |
| }, |
| { |
| "text": "ERS are richly detailed semantic representations produced by the ERG, a hand-crafted, linguistically-motivated HPSG grammar for English. Beyond basic predicate-argument structures, ERS also includes other information about various complex phenomena such as the distinction between scopal and non-scopal arguments, conditionals, comparatives, and many others. ERS are in the formalism of Minimal Recursion Semantics (MRS; Copestake et al., 2005) , but can be expressed in different ways. Semantic graphs, including EDS and DMRS, can be reduced from the standard feature structure encoded representations, with or without a loss of information. In this paper, we conduct experiments on ERS data, but our grammar extraction algorithm and the parser are not limited to ERS.", |
| "cite_spans": [ |
| { |
| "start": 421, |
| "end": 444, |
| "text": "Copestake et al., 2005)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph Representations for ERS", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "One distinguished characteristic of ERS is that the construction of ERS strictly follows the prin- ciple of compositionality (Bender et al., 2015) . A precise syntax-semantics interface is introduced to guarantee compositionality and therefore all meaning units can be traced back to linguistic signals, including both lexical and constructional ones. Take Figure 2 for example. Every concept, e.g. the existence quantifier some q, is associated with a surface string. We favor such correspondence not because it eases extraction of SHRGs, but because we emphasize sentence meanings that are from forms. The connection between syntax (sentence form) and semantics (word and sentence meaning) is fundamental to the study of language.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 146, |
| "text": "(Bender et al., 2015)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 357, |
| "end": 365, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Graph Representations for ERS", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "N(1,2) bv D(0,1) arg1 V(4,5) arg1 arg2 V(2,3) _boy_n_1(1,2) bv _some_q(0,1) arg1 _go_v_1(4,5) arg1 arg2 _want_v_1(2,3) SP-HD(0,2) arg1 V(4,5) arg1 arg2 V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Graph Representations for ERS", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We introduce a novel SHRG extraction algorithm, which requires and only requires alignments between conceptual edges and surface strings. A tree is also required, but this tree does not have to be a gold-standard syntactic tree. All trees that are compatible with an alignment can be used. The syntactic part of DeepBank is a phrase structure which describes HPSG derivation. The vast majority of syntactic rules in DeepBank are binary, and the rest are unary. In \u00a75, we report evaluation results based on DeepBank trees.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "A conceptual graph is composed by two kinds of edges: 1) conceptual edges that carry semantic concept information and are connected with only one node, and 2) structural edges that build relationships among concepts by connecting nodes. The grammar extraction process repeatedly replaces a subgraph with a nonterminal hyperedge, defining the nonterminal symbol as LHS and the subgraph as RHS. The key problem is to identify an appropriate subgraph in each step. To this end, we take advantage of DeepBank's accurate and fine-grained alignments between the surface string in syntactic tree and concepts in semantic graphs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To extract the HRG rule synchronized with the syntactic rewriting rule A \u2192 B + C, we assume that conceptual edges sharing common spans with A, B or C are in the same subgraph. This subgraph acts as the RHS of the HRG rule. We make the extraction process go in the direction of postorder traversal of the syntactic tree, to ensure that all sub-spans of A, B or C are already replaced with hyperedges. We then add the structural edges that connect the above conceptual edges to RHS. After the subgraph is identified, it is easy to distinguish between internal nodes and external nodes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "If all edges connected to a node are in the subgraph, this node is an internal node. Otherwise, it is external node. Finally, the subgraph is replaced with a nonterminal edge. Algorithm 1 presents a precise demonstration and Figure 2 illustrates an example.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 225, |
| "end": 233, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Under the SHRG formalism, semantic parsing can be divided into two steps: syntactic parsing and semantic interpretation. Syntactic parsing utilizes the CFG part to get a derivation that is shared by the HRG part. At one derivation step, there may be more than one HRG rule applicable. In this case, we need a semantic disambiguation model to choose a good one.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Neural SHRG Parser", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Following the LSTM-Minus approach proposed by Cross and Huang (2016) , we build a constituent parser with a CKY decoder. We denote the output vectors of forward and backward LSTM as f i and b i . The feature s i,j of a span (i, j) can be calculated from the differences of LSTM encodings:", |
| "cite_spans": [ |
| { |
| "start": 46, |
| "end": 68, |
| "text": "Cross and Huang (2016)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic Parsing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "s i,j = (f j \u2212 f i ) \u2295 (b i \u2212 b j )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic Parsing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The operator \u2295 indicates the concatenation of two vectors. Constituency parsing can be regarded as predicting scores for spans and labels, and getting the best syntactic tree with dynamic programming. Following Stern et al. (2017)'s approach, We calculate the span scores SCORE span (i, j) and labels scores SCORE label (i, j, l) from s i,j with multilayer perceptrons (MLPs):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic Parsing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "SCORE span (i, j) = MLP span (s i,j ) SCORE label (i, j, l) = MLP label (s i,j )[l]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic Parsing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "x[i] indicates the ith element of a vector x. We condense the unary chains into one label to ensure that only one rule is corresponds with a specific span. Because the construction rules from Deep-Bank are either unary or binary, we do not deal with binarization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Syntactic Parsing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Because the SHRG synchronizes at rule level, we need to restrict the parser to ensure that the output agrees with the known rules. The restriction can be directly added into the CKY decoder. To simplify the semantic interpretation process, we add extra label information to enrich the nonterminals in CFG rules. In particular, we consider the count of external nodes of a corresponding HRG rule. For example, the LHS of rule { in Figure 2 will be labeled as \"HD-CMP#2\", since the RHS of its HRG counterpart has two external nodes.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 430, |
| "end": 438, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Syntactic Parsing", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "When a phrase structure tree, i.e., a derivation tree, T is available, semantic interpretation can be regarded as translating T to the derivation of graph construction by assigning a corresponding HRG rule to each syntactic counterpart. Our approach to finding the optimal HRG rule combina-tionR = {r 1 , r 2 , ...} from the search space R(T ):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Interpretation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "R = argmax R\u2208R(T ) SCORE(R|T )", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Semantic Interpretation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To solve this optimization problem, we implement a greedy search decoder and a bottom-up beam search decoder. The final semantic graph G is read off fromR.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Interpretation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In this model, we assume that each HRG rule is selected independently of the others. The score of G is defined as the sum of all rule scores:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Greedy Search Model", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "SCORE(R = {r 1 , r 2 , ...}|T ) = r\u2208R SCORE(r|T )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Greedy Search Model", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "The maximization of the graph score can be decomposed into the maximization of each rule score. SCORE(r|T ) can be calculated in many ways. Count-based approach is the simplest one, where the rule score is estimated by its frequency in the training data. We also evaluate a sophisticated scoring method, i.e., training a classifier based on rule embedding:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Greedy Search Model", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "SCORE(r|T ) = MLP(s i,j \u2295 r)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Greedy Search Model", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "Inspired by the bag-of-words model, we represent the rule as bag of edge labels. The i-th position in r indicates the number of times the i-th label appears in the rule.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Greedy Search Model", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "We can also leverage structured prediction to approximate SCORE(R|T ) and employ principled decoding algorithms to solve the optimization problem (1). We propose a factorization model to assign scores to the graph and subgraphs in the intermediate state. The score of a certain graph can be seen as the sum of each factor score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bottom-Up Beam Search Model", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "i\u2208PART(R,T )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SCORE(R|T ) =", |
| "sec_num": null |
| }, |
| { |
| "text": "We use predicates and arguments as factors for scoring. There are two kinds of factors: 1) A conceptual edge aligned with span (i, j) taking predicate name p. We use the span embedding s i,j as features, and scoring with non-linear transformation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SCOREPART(i)", |
| "sec_num": null |
| }, |
| { |
| "text": "SCOREPART pred (i, j, p) = MLP pred (s i,j )[p]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SCOREPART(i)", |
| "sec_num": null |
| }, |
| { |
| "text": "2) A structural edge with label L connects with predicates p a and p b , which are aligned with spans (i 1 , j 1 ) and (i 2 , j 2 ) respectively. We use the span embedding s i 1 ,j 1 , s i 2 ,j 2 and random initialized predicate embedding p a , p b as features, and scoring with non-linear transformation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SCOREPART(i)", |
| "sec_num": null |
| }, |
| { |
| "text": "SCOREPART arg (i 1 , j 1 , i 2 , j 2 , p a , p b , L) = MLP arg (s i 1 ,j 1 \u2295 s i 2 ,j 2 \u2295 p a \u2295 p b )[L]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SCOREPART(i)", |
| "sec_num": null |
| }, |
| { |
| "text": "We assign a beam to each node in the syntactic tree. To ensure that we always get a subgraph which does not contain any nonterminal edges during the search process, we perform the beam search in the bottom-up direction. We only reserve top k subgraphs in each beam. Figure 3 illustrates the process.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 266, |
| "end": 274, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "SCOREPART(i)", |
| "sec_num": null |
| }, |
| { |
| "text": "The objective of training is to make the score of the correct graph higher than incorrect graphs. We use the score difference between the correct graph R g and the highest scoring incorrect graph as the loss:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "loss = maxR =Rg SCORE(R|T )\u2212SCORE(R g |T )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Following (Kiperwasser and Goldberg, 2016) 's experience of loss augmented inference, in order to update graphs which have high model scores but are very wrong, we augment each factor belonging to the gold graph by adding a penalty term c to its score. Finally the loss term is: ", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 42, |
| "text": "(Kiperwasser and Goldberg, 2016)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "loss = SCORE(R g |T ) \u2212 i\u2208PART(Rg,T )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "DeepBank is an annotation of the Penn TreeBank Wall Street Journal which is annotated under the formalism of HPSG. We use DeepBank version 1.1, corresponding to ERG 1214, and use the standard data split. Therefore the numeric performance can be directly compared to results reported in Buys and Blunsom (2017) . We use the pyDelphin library to extract DMRS and EDS graphs and use the tool provided by jigsaw 4 to separate punctuation marks from the words they attach to. We use DyNet 5 to implement our neural models, and automatic batch technique (Neubig et al., 2017) in DyNet to perform mini-batch gradient descent training. The detailed network hyper-parameters can be seen in Table 2 . The same pre-trained word embedding as (Kiperwasser and Goldberg, 2016) is employed.", |
| "cite_spans": [ |
| { |
| "start": 286, |
| "end": 309, |
| "text": "Buys and Blunsom (2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 548, |
| "end": 569, |
| "text": "(Neubig et al., 2017)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 730, |
| "end": 762, |
| "text": "(Kiperwasser and Goldberg, 2016)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 681, |
| "end": 688, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Set-up", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "DeepBank provides fine-grained syntactic trees with rich information. For example, the label SP-HD HC C denotes that this is a \"head+specifier\" construction, where the semantic head is also the syntactic head. But there is also the potential for data sparseness. In our experiments, we extract SHRG with three kinds of labels: fine-grained labels, coarse-grained labels and single Xs (meaning unlabeled parsing). The fine-grained labels are the original labels, namely fine-grained construction types. We use the part before the first underscore of each label, e.g. SP-HD, as a coarse-grained label. The coarse-grained labels are more like the highly generalized rule schemata proposed by Pollard and Sag (1994) . Some statistics are shown in Table 3 . Instead of using gold-standard trees to extract a synchronous grammar, we also tried randomlygenerated alignment-compatible trees. The result is shown in Table 4 . Gold standard trees exhibit a low entropy, indicating a high regularity.", |
| "cite_spans": [ |
| { |
| "start": 689, |
| "end": 711, |
| "text": "Pollard and Sag (1994)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 743, |
| "end": 750, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 907, |
| "end": 914, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results of Grammar Extraction", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In addition to the standard evaluation method for phrase-structure parsing, we find a more suitable measurement, i.e. condensed score, for our task. Because we condense unary rule chains into one label and extract synchronous grammar under this condensed syntactic tree, it is better to calculate the correctness of the condensed label rather than Tree Type 1 2 3 4 5+ Gold 1476 488 280 248 251 Fuzzy 1 12710 7591 7963 6578 8998 Fuzzy 2 13606 7355 7228 6090 9112 Fuzzy 3 12278 8228 8462 7039 9946 Table 4 : Comparison of grammars extracted from unlabeled gold trees and randomly-generated alignment-compatible trees (\"fuzzy\" trees). Table 5 : Accuracy of syntactic parsing under different labels on development data. We add the count of external nodes of corresponding HRG rule. \"POS\" concerns the prediction of preterminals, while \"BCKT\" denotes bracketing.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 348, |
| "end": 523, |
| "text": "Tree Type 1 2 3 4 5+ Gold 1476 488 280 248 251 Fuzzy 1 12710 7591 7963 6578 8998 Fuzzy 2 13606 7355 7228 6090 9112 Fuzzy 3 12278 8228 8462 7039 9946 Table 4", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 652, |
| "end": 659, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results of Syntactic Parsing", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "a single label. The additional label \"#N\" that indicates the number of external points is also considered in our condensed score evaluation method. The result is shown in Table 5 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 171, |
| "end": 178, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results of Syntactic Parsing", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Dridan and Oepen (2011) proposed the EDM metric to evaluate the performance the ERS-based graphs. EDM uses the alignment between the nodes in a graph and the spans in a string to detect the common parts between two graphs. It converts the predicate and predicate-argument relationship to comparable triples and calculates the correctness in these triples. A predicate of label L and span S is denoted as triple (S, NAME, L) and a relationship R between the predicate labelled P and argument labelled A is denoted as triple (P, R, A). We calculate the F 1 value of the total triples as EDM score. Similarity, we compute the F 1 score of only predicate triples and only the relation triples as EDM P and EDM A . We reuse the word embeddings and bidirectional LSTM in the trained syntactic parsing model to extract span embedding s i,j . The results of the count-based model, rule embedding model and structured model with beam decoder are summarized in Table 6 . We report the standard EDM metrics. The count-based model can achieve considerably good results, showing the correctness of our grammar extraction method. We also try different labels for the syntactic trees. The results are shown in Table 7 . Models based on coarsegrained labels achieve optimal performance. The results on test set of EDS data are shown in Table 8 . We achieve state-of-the-art performance with a remarkable improvement over Buys and Blunsom (2017) 's neural parser.", |
| "cite_spans": [ |
| { |
| "start": 1406, |
| "end": 1429, |
| "text": "Buys and Blunsom (2017)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 951, |
| "end": 958, |
| "text": "Table 6", |
| "ref_id": "TABREF7" |
| }, |
| { |
| "start": 1195, |
| "end": 1202, |
| "text": "Table 7", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 1320, |
| "end": 1328, |
| "text": "Table 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results of Semantic Interpretation", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "In this paper, we empirically study the lexicalist/constructivist hypothesis, a divide across a variety of theoretical frameworks, taking semantic parsing as a case study. Although the original grammar that guides the annotation of ERS data, namely ERG, is highly lexicalized in that the majority of information is encoded in lexical entries (or lexical rules) as opposed to being represented in constructions (i.e., rules operating on phrases), our grammar extraction algorithm has some freedom to induce different SHRGs that choose between the lexicalist and constructivist approaches. We modify algorithm 1 to follow the key insights of the lexicalist approach. This is done by considering all outgoing edges when finding the subgraph of the lexical rules. The differences between two kinds of grammars is shown in Table 9 . Different grammars allow the lexicalist/constructivist issue in theoretical linguistics to be empirically examined. The comparison of the counts of rules in each grammar is summarized in Table 11 , from which we can see that the sizes of the grammars are comparable. However, the parsing results are quite different, as shown Table 8 : Accuracy on the test set. We use syntactic trees of coarse-grained labels and beam search.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 818, |
| "end": 825, |
| "text": "Table 9", |
| "ref_id": "TABREF10" |
| }, |
| { |
| "start": 1015, |
| "end": 1023, |
| "text": "Table 11", |
| "ref_id": "TABREF12" |
| }, |
| { |
| "start": 1154, |
| "end": 1161, |
| "text": "Table 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "On Syntax-Semantics Interface", |
| "sec_num": "6" |
| }, |
| { |
| "text": "in Table 10 . A construction grammar works much better than a lexicalized grammar under the architecture of our neural parser. We take this comparison as informative since lexicalist approaches are more widely accepted by various computational grammar formalisms, including CCG, LFG, HPSG and LTAG. We think that the success of applying SHRG to resolve semantic parsing highly relies on the compositionality nature of ERS' sentence-level semantic annotation. This is the property that makes sure the extracted rules are consistent and regular. Previous investigation by Peng et al. (2015) on SHRG-based semantic parsing utilizes AMR-Bank which lacks this property to some extent (see Bender et al.'s argument) . We think this may be one reason for the disappointing parsing performance. Think about the AMR graph associated \"John wants Bob to believe that he saw him.\" The AMR's annotation for co-reference is a kind of non-compositional, speaker meaning, and results in grammar sparseness.", |
| "cite_spans": [ |
| { |
| "start": 570, |
| "end": 588, |
| "text": "Peng et al. (2015)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 684, |
| "end": 709, |
| "text": "Bender et al.'s argument)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 11, |
| "text": "Table 10", |
| "ref_id": "TABREF11" |
| } |
| ], |
| "eq_spans": [], |
| "section": "On Syntax-Semantics Interface", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Semantic annotations have a tremendous impact on semantic parsing. In parallel with developing new semantic annotations, e.g. AMRBank, there is a resurgence of interest in exploring existing annotations grounded under deep grammar formalisms, such as semantic analysis provided by ERS (Flickinger, 2000) . In stark contrast, it seems that only the annotation results gain interests, but not the core annotation engine-knowledgeextensive grammar.", |
| "cite_spans": [ |
| { |
| "start": 285, |
| "end": 303, |
| "text": "(Flickinger, 2000)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "On Deep Linguistic Knowledge", |
| "sec_num": "7" |
| }, |
| { |
| "text": "The tendency to continually ignore the positive impact of precision grammar on semantic parsing is somewhat strange. For sentences that can be parsed by an ERG-guided parser, there is a significant accuracy gap which is repeatedly reported. See Table 1 for recent results. The main challenges for precision grammar-guided parsing are unsat- isfactory coverage and efficiency that limit their uses in NLP applications. Even for treebanking on newswire data, i.e., Wall Street Journal data from Penn TreeBank (Marcus et al., 1993) , ERG lacks analyses for c.a. 11% of sentences . For text data from the web, e.g. tweets, this problem is even more serious. Moreover, checking all possible linguistic constraints makes a grammar-guided parser too slow for many realistic NLP applications. Robustness and efficiency, thus, are two major problems for practical NLP applications. Recent encouraging progress achieved with purely data-driven models helps resolve the above two problems. Nevertheless, it seems too radical to remove all explicit linguistic knowledge about the syntacto-semantic composition process, the key characteristics of natural languages. In this paper, we introduce a neural SHRG-based semantic parser that strikes a balance between datadriven and grammar-guided parsing. We encode deep linguistic knowledge partially in a symbolic way and partially in a statistical way. It is worth noting that the symbolic system is a derivational, generative-enumerative grammar, while the origin of the data source is grounded under a representational, model-theoretic grammar. While grammar writers may favor the convenience provided by a unification grammar formalism, a practical parser may re-use algorithms by another formalism by translating grammars through data. Experiments also suggest that (recurrent) neural network models are able to effectively gain some deep linguistic knowledge from annotations.", |
| "cite_spans": [ |
| { |
| "start": 507, |
| "end": 528, |
| "text": "(Marcus et al., 1993)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 245, |
| "end": 252, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "On Deep Linguistic Knowledge", |
| "sec_num": "7" |
| }, |
| { |
| "text": "The advantages of using graph grammars to resolve semantic parsing is clear in concept but underexploited in practice. Here, we have shown ways to improve SHRG-based string-to-semanticgraph parsing. Especially, we emphasize the importance of modeling syntax-semantic interface and the compositional property of semantic annotations. Just like recent explorations on many other NLP tasks, we also show that neural network models are very powerful to advance deep language understanding.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "http://moin.delph-in.net/ErgSemantics", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "www.coli.uni-saarland.de/\u02dcyzhang/ files/jigsaw.jar 5 https://github.com/clab/dynet", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by the National Natural Science Foundation of China (61772036, 61331011) and the Key Laboratory of Science, Technology and Standard in Press Industry (Key Laboratory of Intelligent Press Media Technology). We thank the anonymous reviewers for their helpful comments. Weiwei Sun is the corresponding author.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Universal conceptual cognitive annotation (UCCA)", |
| "authors": [ |
| { |
| "first": "Omri", |
| "middle": [], |
| "last": "Abend", |
| "suffix": "" |
| }, |
| { |
| "first": "Ari", |
| "middle": [], |
| "last": "Rappoport", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omri Abend and Ari Rappoport. 2013. Universal con- ceptual cognitive annotation (UCCA). In Proceed- ings of the 51st Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics,", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Abstract Meaning Representation for Sembanking", |
| "authors": [ |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Banarescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Bonial", |
| "suffix": "" |
| }, |
| { |
| "first": "Shu", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Madalina", |
| "middle": [], |
| "last": "Georgescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kira", |
| "middle": [], |
| "last": "Griffitt", |
| "suffix": "" |
| }, |
| { |
| "first": "Ulf", |
| "middle": [], |
| "last": "Hermjakob", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Schneider", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 7th Linguistic Annotation Workshop and Interoperability with Discourse. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "178--186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laura Banarescu, Claire Bonial, Shu Cai, Madalina Georgescu, Kira Griffitt, Ulf Hermjakob, Kevin Knight, Philipp Koehn, Martha Palmer, and Nathan Schneider. 2013. Abstract Meaning Representation for Sembanking. In Proceedings of the 7th Linguis- tic Annotation Workshop and Interoperability with Discourse. Association for Computational Linguis- tics, Sofia, Bulgaria, pages 178-186. http:// www.aclweb.org/anthology/W13-2322.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Layers of interpretation: On grammar and compositionality", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [ |
| "M" |
| ], |
| "last": "Bender", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Flickinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Oepen", |
| "suffix": "" |
| }, |
| { |
| "first": "Woodley", |
| "middle": [], |
| "last": "Packard", |
| "suffix": "" |
| }, |
| { |
| "first": "Ann", |
| "middle": [ |
| "A" |
| ], |
| "last": "Copestake", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 11th International Conference on Computational Semantics, IWCS", |
| "volume": "", |
| "issue": "", |
| "pages": "239--249", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily M. Bender, Dan Flickinger, Stephan Oepen, Woodley Packard, and Ann A. Copestake. 2015. Layers of interpretation: On grammar and com- positionality. In Proceedings of the 11th In- ternational Conference on Computational Seman- tics, IWCS 2015, 15-17 April, 2015, Queen Mary University of London, London, UK. pages 239- 249. http://aclweb.org/anthology/W/ W15/W15-0128.pdf.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Name Only. Hagit Borer", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Borer", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. Borer. 2005a. In Name Only. Hagit Borer. Oxford University Press. https://books.google. com/books?id=cAEmAQAAIAAJ.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "The Normal Course of Events. Hagit Borer", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Borer", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. Borer. 2005b. The Normal Course of Events. Hagit Borer. Oxford University Press. https://books.google.com/books?id= M48UPLst_MQC.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Structuring Sense: Volume III: Taking Form", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Borer", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. Borer. 2013. Structuring Sense: Volume III: Taking Form.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Robust incremental neural semantic graph parsing", |
| "authors": [ |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Buys", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1215--1226", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jan Buys and Phil Blunsom. 2017. Robust incremen- tal neural semantic graph parsing. In Proceedings of the 55th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers). As- sociation for Computational Linguistics, Vancouver, Canada, pages 1215-1226. http://aclweb. org/anthology/P17-1112.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Parsing to 1-endpoint-crossing, pagenumber-2 graphs", |
| "authors": [ |
| { |
| "first": "Junjie", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Sheng", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "2110--2120", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junjie Cao, Sheng Huang, Weiwei Sun, and Xiao- jun Wan. 2017. Parsing to 1-endpoint-crossing, pagenumber-2 graphs. In Proceedings of the 55th Annual Meeting of the Association for Computa- tional Linguistics (Volume 1: Long Papers). Asso- ciation for Computational Linguistics, Vancouver, Canada, pages 2110-2120. http://aclweb. org/anthology/P17-1193.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Parsing graphs with Hyperedge Replacement Grammars", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Chiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Andreas", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [ |
| "Moritz" |
| ], |
| "last": "Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Bevan", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "13--1091", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Chiang, Jacob Andreas, Daniel Bauer, Karl Moritz Hermann, Bevan Jones, and Kevin Knight. 2013. Parsing graphs with Hyperedge Replacement Grammars. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers). Association for Computational Linguistics, Sofia, Bulgaria, pages 924-932. http://www. aclweb.org/anthology/P13-1091.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Remarks on nominalization", |
| "authors": [ |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Chomsky", |
| "suffix": "" |
| } |
| ], |
| "year": 1970, |
| "venue": "Readings in English Transformational Grammar", |
| "volume": "", |
| "issue": "", |
| "pages": "170--221", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noam Chomsky. 1970. Remarks on nominalization. In R. A. Jacobs and P. S. Rosenbaum, editors, Readings in English Transformational Grammar, Waltham, MA, pages 170-221.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Building deep dependency structures using a wide-coverage CCG parser", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hockenmaier", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "A" |
| ], |
| "last": "Philadelphia", |
| "suffix": "" |
| }, |
| { |
| "first": "Usa", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "327--334", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Clark, Julia Hockenmaier, and Mark Steed- man. 2002. Building deep dependency structures us- ing a wide-coverage CCG parser. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics, July 6-12, 2002, Philadel- phia, PA, USA.. pages 327-334. http://www. aclweb.org/anthology/P02-1042.pdf.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Invited Talk: slacker semantics: Why superficiality, dependency and avoidance of commitment can be the right way to go", |
| "authors": [ |
| { |
| "first": "Ann", |
| "middle": [], |
| "last": "Copestake", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 12th Conference of the European Chapter of the ACL (EACL 2009). Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ann Copestake. 2009. Invited Talk: slacker semantics: Why superficiality, dependency and avoidance of commitment can be the right way to go. In Proceed- ings of the 12th Conference of the European Chap- ter of the ACL (EACL 2009). Association for Com- putational Linguistics, Athens, Greece, pages 1- 9. http://www.aclweb.org/anthology/ E09-1001.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Minimal Recursion Semantics: An introduction", |
| "authors": [ |
| { |
| "first": "Ann", |
| "middle": [], |
| "last": "Copestake", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Flickinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Carl", |
| "middle": [], |
| "last": "Pollard", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [ |
| "A" |
| ], |
| "last": "Sag", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "281--332", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ann Copestake, Dan Flickinger, Carl Pollard, and Ivan A. Sag. 2005. Minimal Recursion Semantics: An introduction. Research on Language and Com- putation pages 281-332.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Span-based constituency parsing with a structure-label system and provably optimal dynamic oracles", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Cross", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1--11", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Cross and Liang Huang. 2016. Span-based constituency parsing with a structure-label system and provably optimal dynamic oracles. In Proceed- ings of the 2016 Conference on Empirical Meth- ods in Natural Language Processing. Association for Computational Linguistics, Austin, Texas, pages 1-11. https://aclweb.org/anthology/ D16-1001.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Handbook of Graph Grammars and Computing by Graph Transformation", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Drewes", |
| "suffix": "" |
| }, |
| { |
| "first": "H.-J", |
| "middle": [], |
| "last": "Kreowski", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Habel", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "95--162", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "F. Drewes, H.-J. Kreowski, and A. Habel. 1997. Hyper- edge Replacement Graph Grammars. In Grzegorz Rozenberg, editor, Handbook of Graph Grammars and Computing by Graph Transformation, World Scientific Publishing Co., Inc., River Edge, NJ, USA, pages 95-162. http://dl.acm.org/ citation.cfm?id=278918.278927.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Parser evaluation using elementary dependency matching", |
| "authors": [ |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Dridan", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Oepen", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 12th International Conference on Parsing Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "225--230", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rebecca Dridan and Stephan Oepen. 2011. Parser eval- uation using elementary dependency matching. In Proceedings of the 12th International Conference on Parsing Technologies. Dublin, Ireland, pages 225- 230.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A discriminative graph-based parser for the Abstract Meaning Representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Flanigan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1426--1436", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Flanigan, Sam Thomson, Jaime Carbonell, Chris Dyer, and Noah A. Smith. 2014. A discrim- inative graph-based parser for the Abstract Mean- ing Representation. In Proceedings of the 52nd An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, Baltimore, Mary- land, pages 1426-1436. http://www.aclweb. org/anthology/P14-1134.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "On building a more efficient grammar by exploiting types", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Flickinger", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Nat. Lang. Eng", |
| "volume": "6", |
| "issue": "1", |
| "pages": "15--28", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Flickinger. 2000. On building a more efficient grammar by exploiting types. Nat. Lang. Eng. 6(1):15-28.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Who did what to whom? A contrastive study of syntacto-semantic dependencies", |
| "authors": [ |
| { |
| "first": "Angelina", |
| "middle": [], |
| "last": "Ivanova", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Oepen", |
| "suffix": "" |
| }, |
| { |
| "first": "Lilja", |
| "middle": [], |
| "last": "\u00d8vrelid", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Flickinger", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the Sixth Linguistic Annotation Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "2--11", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Angelina Ivanova, Stephan Oepen, Lilja \u00d8vrelid, and Dan Flickinger. 2012. Who did what to whom? A contrastive study of syntacto-semantic dependen- cies. In Proceedings of the Sixth Linguistic Annota- tion Workshop. Jeju, Republic of Korea, pages 2-11.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Simple and accurate dependency parsing using bidirectional LSTM feature representations", |
| "authors": [ |
| { |
| "first": "Eliyahu", |
| "middle": [], |
| "last": "Kiperwasser", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "4", |
| "issue": "", |
| "pages": "313--327", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eliyahu Kiperwasser and Yoav Goldberg. 2016. Sim- ple and accurate dependency parsing using bidirec- tional LSTM feature representations. Transactions of the Association for Computational Linguistics 4:313-327. https://transacl.org/ojs/ index.php/tacl/article/view/885.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Neural amr: Sequence-to-sequence models for parsing and generation", |
| "authors": [ |
| { |
| "first": "Ioannis", |
| "middle": [], |
| "last": "Konstas", |
| "suffix": "" |
| }, |
| { |
| "first": "Srinivasan", |
| "middle": [], |
| "last": "Iyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Yatskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "17--1014", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ioannis Konstas, Srinivasan Iyer, Mark Yatskar, Yejin Choi, and Luke Zettlemoyer. 2017. Neu- ral amr: Sequence-to-sequence models for pars- ing and generation. In Proceedings of the 55th Annual Meeting of the Association for Computa- tional Linguistics (Volume 1: Long Papers). As- sociation for Computational Linguistics, Vancou- ver, Canada, pages 146-157. http://aclweb. org/anthology/P17-1014.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Towards a catalogue of linguistic graph banks", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Kuhlmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Oepen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Computational Linguistics", |
| "volume": "42", |
| "issue": "4", |
| "pages": "819--827", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Kuhlmann and Stephan Oepen. 2016. Towards a catalogue of linguistic graph banks. Computa- tional Linguistics 42(4):819-827.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Building a large annotated corpus of English: the penn treebank", |
| "authors": [ |
| { |
| "first": "Mitchell", |
| "middle": [ |
| "P" |
| ], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [ |
| "Ann" |
| ], |
| "last": "Marcinkiewicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Beatrice", |
| "middle": [], |
| "last": "Santorini", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational Linguistics", |
| "volume": "19", |
| "issue": "2", |
| "pages": "313--330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mitchell P. Marcus, Mary Ann Marcinkiewicz, and Beatrice Santorini. 1993. Building a large annotated corpus of English: the penn tree- bank. Computational Linguistics 19(2):313- 330. http://dl.acm.org/citation.cfm? id=972470.972475.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "On-the-fly operation batching in dynamic computation graphs", |
| "authors": [ |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Graham Neubig, Yoav Goldberg, and Chris Dyer. 2017. On-the-fly operation batching in dynamic computa- tion graphs. In Advances in Neural Information Pro- cessing Systems.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Semeval 2015 task 18: Broad-coverage semantic dependency parsing", |
| "authors": [ |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Oepen", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Kuhlmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Miyao", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Zeman", |
| "suffix": "" |
| }, |
| { |
| "first": "Silvie", |
| "middle": [], |
| "last": "Cinkov\u00e1", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Flickinger", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 9th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephan Oepen, Marco Kuhlmann, Yusuke Miyao, Daniel Zeman, Silvie Cinkov\u00e1, Dan Flickinger, Jan Hajic, and Zdenka Uresov\u00e1. 2015. Semeval 2015 task 18: Broad-coverage semantic dependency pars- ing. In Proceedings of the 9th International Work- shop on Semantic Evaluation (SemEval 2015).", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Discriminant-based mrs banking", |
| "authors": [ |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Oepen", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the Fifth International Conference on Language Resources and Evaluation (LREC-2006). European Language Resources Association (ELRA)", |
| "volume": "", |
| "issue": "", |
| "pages": "6--1214", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephan Oepen and Jan Tore L\u00f8nning. 2006. Discriminant-based mrs banking. In Proceedings of the Fifth International Conference on Language Resources and Evaluation (LREC-2006). European Language Resources Association (ELRA), Genoa, Italy. ACL Anthology Identifier: L06-1214.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Deep multitask learning for semantic dependency parsing", |
| "authors": [ |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "17--1186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hao Peng, Sam Thomson, and Noah A. Smith. 2017a. Deep multitask learning for semantic dependency parsing. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computa- tional Linguistics, Vancouver, Canada, pages 2037- 2048. http://aclweb.org/anthology/ P17-1186.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Uofr at semeval-2016 task 8: Learning Synchronous Hyperedge Replacement Grammar for AMR parsing", |
| "authors": [ |
| { |
| "first": "Xiaochang", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Gildea", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016)", |
| "volume": "", |
| "issue": "", |
| "pages": "1185--1189", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaochang Peng and Daniel Gildea. 2016. Uofr at semeval-2016 task 8: Learning Synchronous Hyper- edge Replacement Grammar for AMR parsing. In Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016). Association for Computational Linguistics, San Diego, Califor- nia, pages 1185-1189. http://www.aclweb. org/anthology/S16-1183.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "A Synchronous Hyperedge Replacement Grammar based approach for AMR parsing", |
| "authors": [ |
| { |
| "first": "Xiaochang", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Linfeng", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Gildea", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Nineteenth Conference on Computational Natural Language Learning. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "32--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaochang Peng, Linfeng Song, and Daniel Gildea. 2015. A Synchronous Hyperedge Replacement Grammar based approach for AMR parsing. In Proceedings of the Nineteenth Conference on Com- putational Natural Language Learning. Associa- tion for Computational Linguistics, Beijing, China, pages 32-41.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Addressing the data sparsity issue in neural amr parsing", |
| "authors": [ |
| { |
| "first": "Xiaochang", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Chuan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Gildea", |
| "suffix": "" |
| }, |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "366--375", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaochang Peng, Chuan Wang, Daniel Gildea, and Nianwen Xue. 2017b. Addressing the data spar- sity issue in neural amr parsing. In Proceed- ings of the 15th Conference of the European Chapter of the Association for Computational Lin- guistics: Volume 1, Long Papers. Association for Computational Linguistics, Valencia, Spain, pages 366-375. http://www.aclweb.org/ anthology/E17-1035.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Head-Driven Phrase Structure Grammar", |
| "authors": [ |
| { |
| "first": "Carl", |
| "middle": [], |
| "last": "Pollard", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [ |
| "A" |
| ], |
| "last": "Sag", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carl Pollard and Ivan A. Sag. 1994. Head-Driven Phrase Structure Grammar. The University of Chicago Press, Chicago.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "A minimal span-based neural constituency parser", |
| "authors": [ |
| { |
| "first": "Mitchell", |
| "middle": [], |
| "last": "Stern", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Andreas", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "17--1076", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mitchell Stern, Jacob Andreas, and Dan Klein. 2017. A minimal span-based neural constituency parser. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers). Association for Computa- tional Linguistics, Vancouver, Canada, pages 818- 827. http://aclweb.org/anthology/ P17-1076.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Transition-based parsing for deep dependency structures", |
| "authors": [ |
| { |
| "first": "Xun", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yantao", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Computational Linguistics", |
| "volume": "42", |
| "issue": "3", |
| "pages": "353--389", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xun Zhang, Yantao Du, Weiwei Sun, and Xiao- jun Wan. 2016. Transition-based parsing for deep dependency structures. Computational Lin- guistics 42(3):353-389. http://aclweb.org/ anthology/J16-3001.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "V + PUNCT CM + HD\u2193V V + HD-CMP SP-HD + HD-CMP RHS (semantics)" |
| }, |
| "FIGREF2": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "The grammar extraction process of the running example. Conceptual edges which are directly aligned with the syntactic rules are painted in green. The span-based alignment is shown in the parentheses. Structural edges that connect conceptual edges are painted in brown. Green edges and brown edges together form the subgraph, which acts as RHS in the HRG rule. External nodes are represented as solid dots." |
| }, |
| "FIGREF3": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "(R|T ) \u2212 i\u2208PART(R,T )\u2229PART(Rg,T )The semantic interpretation process. The interpretation performs bottom-up beam search to get a bunch of high-scored subgraphs for each node in the derivation tree." |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "num": null, |
| "text": "Algorithm 1 Hyperedge Replacement Grammar Extraction Algorithm Require: Input syntactic tree T , hypergraph g 1: RULES \u2190 {} 2: for tree node n in postorder traversal of T do Ensure: Rewriting rule of node n is A \u2192 B + C, spans of node A, B, C are SPAN-A, SPAN-B, SPAN-C", |
| "html": null, |
| "content": "<table><tr><td>3:</td><td>SPANS \u2190 {SPAN-A, SPAN-B, SPAN-C}</td></tr><tr><td>4:</td><td>C-EDGES \u2190 {e|e \u2208 EDGES(g) \u2227 SPAN(e) \u2208 SPANS}</td></tr><tr><td>5:</td><td>ALL-NODES \u2190 {s|s \u2208 NODES(g) \u2227 \u2203e \u2208 C-EDGES s.t. s \u2208 NODES(e)}</td></tr><tr><td>6:</td><td>S-EDGES \u2190 {e|e \u2208 EDGES(g) \u2227 e is structual edge \u2227 \u2200s \u2208 NODES(e) =\u21d2 s \u2208 C-EDGES}</td></tr><tr><td>7:</td><td>ALL-EDGES = C-EDGES \u222a S-EDGES</td></tr><tr><td>8:</td><td>INTERNAL-NODES \u2190 {}</td></tr><tr><td>9:</td><td>EXTERNAL-NODES \u2190 {}</td></tr><tr><td>10:</td><td>for node s in ALL-NODES do</td></tr><tr><td>11:</td><td>if \u2200e \u2208 EDGES(g), s \u2208 NODES(e) =\u21d2 e \u2208 ALL-EDGES then</td></tr><tr><td>12:</td><td>INTERNAL-NODES \u2190 INTERNAL-NODES \u222a {s}</td></tr><tr><td>13:</td><td>else</td></tr><tr><td>14:</td><td>EXTERNAL-NODES \u2190 EXTERNAL-NODES \u222a {s}</td></tr><tr><td>15:</td><td>end if</td></tr><tr><td>16:</td><td>end for</td></tr><tr><td>17:</td><td/></tr></table>" |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "num": null, |
| "text": "Hyperparamters used in the experiments.", |
| "html": null, |
| "content": "<table><tr><td/><td>#EP</td><td/><td>#Rule</td><td/><td>#Instance</td></tr><tr><td/><td/><td colspan=\"3\">Fine Coarse Unlabeled</td><td/></tr><tr><td/><td>1</td><td colspan=\"2\">49689 14234</td><td>1476</td><td>676817</td></tr><tr><td/><td>2</td><td>9616</td><td>3424</td><td>488</td><td>64708</td></tr><tr><td>EDS</td><td>3</td><td>2739</td><td>1486</td><td>280</td><td>11195</td></tr><tr><td/><td>4</td><td>1059</td><td>732</td><td>248</td><td>2071</td></tr><tr><td/><td>5+</td><td>508</td><td>418</td><td>251</td><td>655</td></tr><tr><td/><td>1</td><td colspan=\"2\">50668 15745</td><td>2688</td><td>657999</td></tr><tr><td/><td>2</td><td>11428</td><td>4418</td><td>896</td><td>79888</td></tr><tr><td>DMRS</td><td>3</td><td>3576</td><td>1929</td><td>465</td><td>14237</td></tr><tr><td/><td>4</td><td>1237</td><td>873</td><td>299</td><td>2561</td></tr><tr><td/><td>5+</td><td>669</td><td>557</td><td>297</td><td>901</td></tr></table>" |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "num": null, |
| "text": "Statistics of SHRG rules with different label type by the count of external points in EDS and DMRS representations.", |
| "html": null, |
| "content": "<table/>" |
| }, |
| "TABREF7": { |
| "type_str": "table", |
| "num": null, |
| "text": "", |
| "html": null, |
| "content": "<table><tr><td colspan=\"4\">: The EDM score on EDS development</td></tr><tr><td colspan=\"4\">data with different model: count based greedy</td></tr><tr><td colspan=\"4\">search, rule embedding greedy search and beam</td></tr><tr><td colspan=\"4\">search. We use syntactic trees with coarse-grained</td></tr><tr><td>labels.</td><td/><td/><td/></tr><tr><td>Data</td><td colspan=\"3\">Label EDM P EDM A EDM</td></tr><tr><td>EDS</td><td>Fine Coarse</td><td>92.70 93.48</td><td>87.77 90.23 87.88 90.67</td></tr><tr><td>DMRS</td><td>Fine Coarse</td><td>92.52 93.60</td><td>86.47 89.46 86.62 90.07</td></tr></table>" |
| }, |
| "TABREF8": { |
| "type_str": "table", |
| "num": null, |
| "text": "Accuracy on the development data under different labels of syntactic tree and beam search.", |
| "html": null, |
| "content": "<table/>" |
| }, |
| "TABREF9": { |
| "type_str": "table", |
| "num": null, |
| "text": "ModelEDM P EDM A EDM EDS", |
| "html": null, |
| "content": "<table><tr><td/><td>Buys and Blunsom</td><td>88.14</td><td>82.20 85.48</td></tr><tr><td/><td>ACE</td><td>91.82</td><td>86.92 89.58</td></tr><tr><td/><td>Ours</td><td>93.15</td><td>87.59 90.35</td></tr><tr><td/><td>Buys and Blunsom</td><td>87.54</td><td>80.10 84.16</td></tr><tr><td>DMRS</td><td>ACE</td><td>92.08</td><td>86.77 89.64</td></tr><tr><td/><td>Ours</td><td>93.11</td><td>86.01 89.51</td></tr></table>" |
| }, |
| "TABREF10": { |
| "type_str": "table", |
| "num": null, |
| "text": "Rules of lexicalized and construction grammars that are extracted from the running example.", |
| "html": null, |
| "content": "<table><tr><td>Grammar</td><td colspan=\"2\">EDM P EDM A EDM</td></tr><tr><td colspan=\"2\">Construction 93.48</td><td>87.88 90.67</td></tr><tr><td>Lexicalized</td><td>92.14</td><td>81.05 86.63</td></tr></table>" |
| }, |
| "TABREF11": { |
| "type_str": "table", |
| "num": null, |
| "text": "The EDM score on EDS development data with construction grammar and lexicalized grammar using syntax trees of coarse-grained labels and beam search.", |
| "html": null, |
| "content": "<table><tr><td>Grammar</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5+</td></tr><tr><td colspan=\"6\">Construction 14234 3424 1486 732 418</td></tr><tr><td>Lexicalized</td><td colspan=\"5\">11653 5938 2358 396 11</td></tr></table>" |
| }, |
| "TABREF12": { |
| "type_str": "table", |
| "num": null, |
| "text": "Comparison of the construction grammar and the lexicalized grammar extracted from EDS data. We use syntax trees of coarse-grained labels.", |
| "html": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |