| { |
| "paper_id": "E14-1039", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:39:53.135513Z" |
| }, |
| "title": "Fast Statistical Parsing with Parallel Multiple Context-Free Grammars", |
| "authors": [ |
| { |
| "first": "Krasimir", |
| "middle": [], |
| "last": "Angelov", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chalmers University of Technology", |
| "location": { |
| "settlement": "G\u00f6teborg", |
| "country": "Sweden" |
| } |
| }, |
| "email": "krasimir@chalmers.se" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Ljungl\u00f6f", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Chalmers University of Technology", |
| "location": { |
| "settlement": "G\u00f6teborg", |
| "country": "Sweden" |
| } |
| }, |
| "email": "peter.ljunglof@cse.gu.se" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We present an algorithm for incremental statistical parsing with Parallel Multiple Context-Free Grammars (PMCFG). This is an extension of the algorithm by Angelov (2009) to which we added statistical ranking. We show that the new algorithm is several times faster than other statistical PMCFG parsing algorithms on real-sized grammars. At the same time the algorithm is more general since it supports non-binarized and non-linear grammars. We also show that if we make the search heuristics non-admissible, the parsing speed improves even further, at the risk of returning sub-optimal solutions.", |
| "pdf_parse": { |
| "paper_id": "E14-1039", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We present an algorithm for incremental statistical parsing with Parallel Multiple Context-Free Grammars (PMCFG). This is an extension of the algorithm by Angelov (2009) to which we added statistical ranking. We show that the new algorithm is several times faster than other statistical PMCFG parsing algorithms on real-sized grammars. At the same time the algorithm is more general since it supports non-binarized and non-linear grammars. We also show that if we make the search heuristics non-admissible, the parsing speed improves even further, at the risk of returning sub-optimal solutions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In this paper we present an algorithm for incremental parsing using Parallel Multiple Context-Free Grammars (PMCFG) (Seki et al., 1991) . This is a non context-free formalism allowing discontinuity and crossing dependencies, while remaining with polynomial parsing complexity.", |
| "cite_spans": [ |
| { |
| "start": 116, |
| "end": 135, |
| "text": "(Seki et al., 1991)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The algorithm is an extension of the algorithm by Angelov (2009; which adds statistical ranking. This is a top-down algorithm, shown by Ljungl\u00f6f (2012) to be similar to other top-down algorithms (Burden and Ljungl\u00f6f, 2005; Kanazawa, 2008; Kallmeyer and Maier, 2009) . None of the other top-down algorithms are statistical.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 64, |
| "text": "Angelov (2009;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 136, |
| "end": 151, |
| "text": "Ljungl\u00f6f (2012)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 195, |
| "end": 222, |
| "text": "(Burden and Ljungl\u00f6f, 2005;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 223, |
| "end": 238, |
| "text": "Kanazawa, 2008;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 239, |
| "end": 265, |
| "text": "Kallmeyer and Maier, 2009)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The only statistical PMCFG parsing algorithms (Kato et al., 2006; Kallmeyer and Maier, 2013; Maier et al., 2012) all use bottom-up parsing strategies. Furthermore, they require the grammar to be binarized and linear, which means that they only support linear context-free rewriting systems (LCFRS). In contrast, our algorithm naturally supports the full power of PMCFG. By lifting these restrictions, we make it possible to ex-periment with novel grammar induction methods (Maier, 2013) and to use statistical disambiguation for hand-crafted grammars (Angelov, 2011) .", |
| "cite_spans": [ |
| { |
| "start": 46, |
| "end": 65, |
| "text": "(Kato et al., 2006;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 66, |
| "end": 92, |
| "text": "Kallmeyer and Maier, 2013;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 93, |
| "end": 112, |
| "text": "Maier et al., 2012)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 473, |
| "end": 486, |
| "text": "(Maier, 2013)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 551, |
| "end": 566, |
| "text": "(Angelov, 2011)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "By extending the algorithm with a statistical model, we allow the parser to explore only parts of the search space, when only the most probable parse tree is needed. Our cost estimation is similar to the estimation for the Viterbi probability as in Stolcke (1995) , except that we have to take into account that our grammar is not contextfree. The estimation is both admissible and monotonic (Klein and Manning, 2003) which guarantees that we always find a tree whose probability is the global maximum.", |
| "cite_spans": [ |
| { |
| "start": 249, |
| "end": 263, |
| "text": "Stolcke (1995)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 392, |
| "end": 417, |
| "text": "(Klein and Manning, 2003)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We also describe a variant with a nonadmissible estimation, which further improves the efficiency of the parser at the risk of returning a suboptimal parse tree.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We start with a formal definition of a weighted PMCFG in Section 2, and we continue with a presentation of our algorithm by means of a weighted deduction system in Section 3. In Section 4, we prove that our estimations are admissible and monotonic. In Section 5 we calculate an estimate for the minimal inside probability for every category, and in Section 6 we discuss the nonadmissible heuristics. Sections 7 and 8 describe the implementation and our evaluation, and the final Section 9 concludes the paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our definition of weighted PMCFG (Definition 1) is the same as the one used by Angelov (2009; , except that we extend it with weights for the productions. This definition is also similar to Kato et al (2006) , with the small difference that we allow non-linear functions.", |
| "cite_spans": [ |
| { |
| "start": 79, |
| "end": 93, |
| "text": "Angelov (2009;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 190, |
| "end": 207, |
| "text": "Kato et al (2006)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As an illustration for PMCFG parsing, we use a simple grammar ( Figure 1 ) which can generate phrases like \"both black and white\" and \"either red or white\" but rejects the incorrect combina-Definition 1 A parallel multiple context-free grammar is a tuple G = (N, T, F, P, S, d, d i , r, a) where:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 64, |
| "end": 72, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 N is a finite set of categories and a positive integer d(A) called dimension is given for each A \u2208 N .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 T is a finite set of terminal symbols which is disjoint with N .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 F is a finite set of functions where the arity a(f ) and the dimensions r(f ) and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "d i (f ) (1 \u2264 i \u2264 a(f ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "are given for every f \u2208 F . For every positive integer d, (T * ) d denote the set of all dtuples of strings over T . Each function f \u2208 F is a total mapping from (", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "T * ) d 1 (f ) \u00d7 (T * ) d 2 (f ) \u00d7 \u2022 \u2022 \u2022 \u00d7 (T * ) d a(f ) (f ) to (T * ) r(f )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": ", defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "f := (\u03b1 1 , \u03b1 2 , . . . , \u03b1 r(f ) )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Here \u03b1 i is a sequence of terminals and k; l pairs, where 1 \u2264 k \u2264 a(f ) is called argument index and 1 \u2264 l \u2264 d k (f ) is called constituent index.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 P is a finite set of productions of the form:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A w \u2212 \u2192 f [A 1 , A 2 , . . . , A a(f ) ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where A \u2208 N is called result category, A 1 , A 2 , . . . , A a(f ) \u2208 N are called argument categories, f \u2208 F is the function symbol and w > 0 is a weight. For the production to be well formed the conditions", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "d i (f ) = d(A i ) (1 \u2264 i \u2264 a(f )) and r(f ) = d(A) must hold.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 S is the start category and d(S) = 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "tions both-or and either-and. We avoid these combinations by coupling the right pairs of words in a single function, i.e. we have the abstract conjunctions both and and either or which are linearized as discontinuous phrases. The phrase insertion itself is done in the definition of conjA . It takes the conjunction as its first argument, and it uses 1; 1 and 1; 2 to insert the first and the second constituent of the argument at the right places in the complete phrase. A tree of function applications that yelds a complete phrase is the parse tree for the phrase. For instance, the phrase \"both red and either black or white\" is represented by the tree:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "(conjA both and red (conjA either or black white))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A w 1 \u2212\u2192 conjA [Conj , A , A ] A w 2 \u2212\u2192 black [] A w 3 \u2212\u2192 white[] A w 4 \u2212\u2192 red [] Conj w 5 \u2212\u2192 both and [] Conj w 6 \u2212\u2192 either or[]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "conjA := ( 1; 1 2; 1 1; 2 3; 1 ) black := (\"black\") white := (\"white\") red := (\"red\") both and := (\"both\", \"and\") either or := (\"either\", \"or\") The weight of a tree is the sum of the weights for all functions that are used in it. In this case the weight for the example is w 1 +w 5 +w 4 +w 1 +w 6 + w 2 + w 3 . If there are ambiguities in the sentence, the algorithm described in Section 3 always finds a tree which minimizes the weight.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Usually the weights for the productions are logarithmic probabilities, i.e. the weight of the pro-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "duction A \u2192 f [ B] is: w = \u2212 log P (A \u2192 f [ B] | A) where P (A \u2192 f [ B] | A)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "is the probability to choose this production when the result category is fixed. In this case the probabilities for all productions with the same result category sum to one:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A w \u2212 \u2192f[ B] \u2208P e \u2212w = 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "However, the parsing algorithm does not depend on the probabilistic interpretation of the weights, so the same algorithm can be used with any other kind of weights.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "PMCFG definition", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We define the algorithm as weighted deduction system (Nederhof, 2003) which generalizes Angelov's system.", |
| "cite_spans": [ |
| { |
| "start": 53, |
| "end": 69, |
| "text": "(Nederhof, 2003)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "A key feature in his algorithm is that the expressive PMCFG is reduced to a simple contextfree grammar which is extended dynamically at parsing time in order to account for context dependent features in the original grammar. This can be exemplified with the grammar in Figure 1 , where there are two productions for category Conj . Given the phrase \"both black and white\", after accepting the token both, only the production Conj w 5 \u2212\u2192 both and [] can be applied for parsing the second part of the conjunction. This is achieved by generating a new category Conj 2 which has just a single production:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 269, |
| "end": 277, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Conj 2 w 5 \u2212\u2192 both and []", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The parsing algorithm is basically an extension of Earley's (1970) algorithm, except that the parse items in the chart also keep track of the categories for the arguments. In the particular case, the corresponding chart item will be updated to point to Conj 2 instead of Conj . This guarantees that only and will be accepted as a second constituent after seeing that the first constituent is both. Now since the set of productions is dynamic, the parser must keep three kinds of items in the chart, instead of two as in the Earley algorithm:", |
| "cite_spans": [ |
| { |
| "start": 51, |
| "end": 66, |
| "text": "Earley's (1970)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Productions The parser maintains a dynamic set with all productions that are derived during the parsing. The initial state is populated with the productions from the set P in the grammar.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Active Items The active items play the same role as the active items in the Earley algorithm. They have the form:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "[ k j A w \u2212 \u2192 f [ B]; l : \u03b1 \u2022 \u03b2; w i ; w o ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "and represent the fact that a constituent l of a category A has been partially recognized from position j to k in the sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Here A w \u2212 \u2192 f [ B]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "is the production and the concatenation \u03b1\u03b2 is the sequence of terminals and k; r pairs which defines the l-th constituent of function f . The dot \u2022 between \u03b1 and \u03b2 separates the part of the constituent that is already recognized from the part which is still pending. Finally w i and w o are the inside and outside weights for the item.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Passive Items The passive items are of the form:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "[ k j A; l;\u00c2]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "and state that a constituent with index l from category A was recognized from position j to position k in the sentence. As a consequence the parser has created a new category\u00c2. The set of productions derived for\u00c2 compactly records all possible ways to parse the j \u2212 k fragment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction System", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The inside weight w i and the outside weight w o in the active items deserve more attention since this is the only difference compared to Angelov (2009; . When the item is complete, it will yield the forest of all trees that derive the sub-string covered by the item. For example, when the first constituent for category Conj is completely parsed, the forest will contain the single production in (1). The inside weight for the active item is the currently best known estimation for the lowest weight of a tree in the forest. The trees yielded by the item do not cover the whole sentence however. Instead, they will become part of larger trees that cover the whole sentence. The outside weight is the estimation for the lowest weight for an extension of a tree to a full tree. The sum w i + w o estimates the weight of the full tree.", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 152, |
| "text": "Angelov (2009;", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inside and outside weights", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Before turning to the deduction rules we also need a notation for the lowest possible weight for a tree of a given category. If A \u2208 N is a category then w A will denote the lowest weight that a tree of category A can have. For convenience, we also use w B as a notation for the sum i w B i of the weight of all categories in the vector B. If the category A is defined in the grammar then we assume that the weight is precomputed as described in Section 5. When the parser creates the category, it will compute the weight dynamically.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inside and outside weights", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The deduction rules are shown in Figure 2 . Here the assumption is that the active items are processed in the order of increasing w i + w o weight. In the actual implementation we put all active items in a priority queue and we always take first the item with the lowest weight. We never throw away items but the processing of items with very high weight might be delayed indefinitely or they may never be processed if the best tree is found before that. Furthermore, we think of the deduction system as a way do derive a set of items, but in our case we ignore the weights when we consider whether two active items are the same. In this way, every item is derived only once and the weights for the active items are computed from the weights of the first antecedents that led to its derivation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 33, |
| "end": 41, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Finally, we use two more notations in the rules: rhs(g, r) denotes constituent with index r in function g; and \u03c9 k denotes the k-th token in the sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "INITIAL PREDICT S w \u2212 \u2192 f [ B] [ 0 0 S w \u2212 \u2192 f [ B]; 1 : \u2022 \u03b3; w + w B ; 0] S = start category, \u03b3 = rhs(f, 1) PREDICT B d w 1 \u2212\u2192 g[ C] [ k j A w 2 \u2212\u2192 f [ B]; l : \u03b1 \u2022 d; r \u03b2; w i ; w o ] [ k k B d w 1 \u2212\u2192 g[ C]; r : \u2022 \u03b3; w 1 + w C ; w i \u2212 w B d + w o ] \u03b3 = rhs(g, r) SCAN [ k j A w \u2212 \u2192 f [ B]; l : \u03b1 \u2022 s \u03b2; w i ; w o ] [ k+1 j A w \u2212 \u2192 f [ B]; l : \u03b1 s \u2022 \u03b2; w i ; w o ] s = \u03c9 k+1 COMPLETE [ k j A w \u2212 \u2192 f [ B]; l : \u03b1 \u2022 ; w i ; w o ] A w \u2212 \u2192 f [ B] [ k j A; l;\u00c2]\u00c2 = (A, l, j, k), w\u00c2 = w i COMBINE [ u j A w \u2212 \u2192 f [ B]; l : \u03b1 \u2022 d; r \u03b2; w i ; w o ] [ k u B d ; r;B d ] [ k j A w \u2212 \u2192 f [ B{d :=B d }]; l : \u03b1 d; r \u2022 \u03b2; w i + wB d \u2212 w B d ; w o ] Figure 2: Deduction Rules", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The first rule on Figure 2 is INITIAL PREDICT and here we predict the initial active items from the productions for the start category S. Since this is the start category, we set the outside weight to zero. The inside weight is equal to the sum of the weight w for the production and the lowest possible weight w B for the vector of arguments B. The reason is that despite that we do not know the weight for the final tree yet, it cannot be lower than w + w B since w B is the lowest possible weight for the arguments of function f . The interaction between inside and outside weights is more interesting in the PREDICT rule. Here we have an item where the dot is before d; r and from this we must predict one item for each production", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 18, |
| "end": 26, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "B d w 1 \u2212\u2192 g[ C] of category B d .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The inside weight for the new item is w 1 + w C for the same reasons as for the INITIAL PREDICT rule. The outside weight however is not zero because the new item is predicted from another item. The inside weight for the active item in the antecedents is now part of the outside weight of the new item. We just have to subtract w B d from w i because the new item is going to produce a new tree which will replace the d-th argument of f . For this reason the estimation for the outside weight is w", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "i \u2212w B d +w o ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where we also added the outside weight for the antecedent item.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the SCAN rule, we just move the dot past a token, if it matches the current token \u03c9 k+1 . Both the inside and the outside weights are passed untouched from the antecedent to the consequent.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the COMPLETE rule, we have an item where the dot has reached the end of the constituent. Here we generate a new category\u00c2 which is unique for the combination (A, l, j, k), and we derive the produc-tion\u00c2 w \u2212 \u2192 f [ B] for it. We set the weight w\u00c2 for\u00c2 to be equal to w i and in Section 4, we will prove that this is indeed the lowest weight for a tree of category\u00c2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the last rule COMBINE, we combine an active item with a passive item. The outside weight w o for the new active item remains the same. However, we must update the inside weight since we have replaced the d-th argument in B with the newly generated categoryB d . The new weight is", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "w i + wB d \u2212 w B d , i.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "e. we add the weight for the new category and we subtract the weight for the previous category B d .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Now for the correctness of the weights we must prove that the estimations are both admissible and monotonic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deduction rules", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We will first prove that the weights grow monotonically, i.e. if we derive one active item from another then the sum w i + w o for the new item is always greater or equal to the sum for the previous item. PREDICT and COMBINE are the only two rules with an active item both in the antecedents and in the consequents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Admissibility and Monotonicity", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Note that in PREDICT we choose one particular production for category B d . We know that the lowest possible weight of a tree of this category is w B d . If we restrict the set of trees to those that not only have the same category B d but also use the same production", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Admissibility and Monotonicity", |
| "sec_num": "4" |
| }, |
| { |
| "text": "B d w 1 \u2212\u2192 g[ C]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Admissibility and Monotonicity", |
| "sec_num": "4" |
| }, |
| { |
| "text": "on the top level, then the best weight for such a tree will be w 1 + w C . According to the definition of w B d , it must follow that:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Admissibility and Monotonicity", |
| "sec_num": "4" |
| }, |
| { |
| "text": "w 1 + w C \u2265 w B d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Admissibility and Monotonicity", |
| "sec_num": "4" |
| }, |
| { |
| "text": "From this we can trivially derive that:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Admissibility and Monotonicity", |
| "sec_num": "4" |
| }, |
| { |
| "text": "(w 1 + w C ) + (w i \u2212 w B d + w o ) \u2265 w i + w o", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Admissibility and Monotonicity", |
| "sec_num": "4" |
| }, |
| { |
| "text": "which is the monotonicity condition for rule PREDICT. Similarly in rule COMBINE, the condition: ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Admissibility and Monotonicity", |
| "sec_num": "4" |
| }, |
| { |
| "text": "wB d \u2265 w B d must", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Admissibility and Monotonicity", |
| "sec_num": "4" |
| }, |
| { |
| "text": "(w i + wB d \u2212 w B d ) + w o \u2265 w i + w o", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Admissibility and Monotonicity", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The last two inequalities are valid only if we can correctly compute wB d for a dynamically generated categoryB d . This happens in rule COMPLETE, where we have a complete active item with a correctly computed inside weight w i . Since we process the active items in the order of increasing w i + w o weight and since we create\u00c2 when we find the first complete item for category A, it is guaranteed that at this point we have an item with minimal w i + w o value. Furthermore, all items with the same result category A and the same start position j must have the same outside weight. It follows that when we create\u00c2 we actually do it from an active item with minimal inside weight w i . This means that it is safe to assign that w\u00c2 = w i .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Admissibility and Monotonicity", |
| "sec_num": "4" |
| }, |
| { |
| "text": "It is also easy to see that the estimation is admissible. The only places where we use estimations for the unseen parts of the sentence is in the rules INITIAL PREDICT and PREDICT where we use the weights w B and w C which may include components corresponding to function argument that are not seen yet. However by definition it is not possible to build a tree with weight lower than the weight for the category. This means that the estimation is always admissible.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Admissibility and Monotonicity", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The minimal weight for a dynamically created category is computed by the parser, but we must initialize the weights for the categories that are defined in the grammar. The easiest way is to just set all weights to zero, and this is safe since the weights for the predefined categories are used only as estimations for the yet unseen parts of the sentence. Essentially this gives us a statistical parser which performs Dijkstra search in the space of all parse trees. Any other reasonable weight assignment will give us an A * algorithm (Hart et al., 1968) .", |
| "cite_spans": [ |
| { |
| "start": 536, |
| "end": 555, |
| "text": "(Hart et al., 1968)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In general it is possible to devise different heuristics which will give us different improvements in the parsing time. In our current implementation of the parser we use a weight assignment which considers only the already known probabilities for the productions in the grammar.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The weight for a category A is computed as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "w A = min A w \u2212 \u2192f[ B] \u2208 P (w + w B )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Here the sum w + w B is the minimal weight for a tree constructed with the production A w \u2212 \u2192 f [ B] at the root. By taking the minimum over all productions for A, we get the corresponding weight w A . This is a recursive equation since its righthand side contains the value w B which depends on the weights for the categories in B. It might happen that there are mutually dependent categories which will lead to a recursion in the equation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The solution is found with iterative assignments until a fixed point is reached. In the beginning we assign w A = 0 for all categories. After that we recompute the new weights with the equation above until we reach a fixed point.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Estimation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The set of active items is kept in a priority queue and at each step we process the item with the lowest weight. However, when we experimented with the algorithm we noticed that most of the time the item that is selected would eventually contribute with an alternative reading of the sentence but not to the best parse. What happens is that despite that there are already items ending at position k in the sentence, the current best item might have a span i \u2212 j where j < k. The parser then picks the best item only to discover later that the item became much heavier until it reached the span i \u2212 k.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-admissible heuristics", |
| "sec_num": "6" |
| }, |
| { |
| "text": "This suggests that when we compare the weights of items with different end positions, then we must take into account the weight that will be accumulated by the item that ends earlier until the two items align at the same end position.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-admissible heuristics", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We use the following heuristic to estimate the difference. The first time when we extend an item from position i to position i + 1, we record the weight increment w \u2206 (i + 1) for that position. The increment w \u2206 is the difference between the weights for the best active item reaching position i + 1 and the best active item reaching position i. From now on when we compare the weights for two items x j and x k , with end positions j and k respectively (j < k), then we always add to the score w x j of the first item a fraction of the sum of the increments for the positions between j and k. In other words, instead of using w x j when comparing with w x k , we use", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-admissible heuristics", |
| "sec_num": "6" |
| }, |
| { |
| "text": "w x j + h \u2022 j<i\u2264k w \u2206 (i)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-admissible heuristics", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We call the constant h \u2208 [0, 1] the \"heuristics factor\". If h = 0, we obtain the basic algorithm that we described earlier which is admissible and always returns the best parse. However, the evaluation in Section 8.3 shows that a significant speedup can be obtained by using larger values of h. Unfortunately, if h > 0, we loose some accuracy and cannot guarantee that the best parse is always returned first.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-admissible heuristics", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Note that the heuristics does not change the completeness of the algorithm -it will succeed for all grammatical sentences and fail for all nongrammatical. But it does not guarantee that the first parse tree will be the optimal.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-admissible heuristics", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The parser is implemented in C and is distributed as a part of the runtime system for the open-source Grammatical Framework (GF) programming language (Ranta, 2011) . 1 Although the primary application of the runtime system is to run GF applications, it is not specific to one formalism, and it can serve as an execution platform for other frameworks where natural language parsing and generation is needed.", |
| "cite_spans": [ |
| { |
| "start": 150, |
| "end": 163, |
| "text": "(Ranta, 2011)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation", |
| "sec_num": "7" |
| }, |
| { |
| "text": "The GF system is distributed with a library of manually authored resource grammars (Ranta, 1 http://www.grammaticalframework.org/ 2009) for over 25 languages, which are used as a resource for deriving domain specific grammars. Adding a big lexicon to the resource grammar results in a highly ambiguous grammar, which can give rise to millions of trees even for moderately complex sentences. Previously, the GF system has not been able to parse with such ambiguous grammars, but with our statistical algorithm it is now feasible.", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 90, |
| "text": "(Ranta,", |
| "ref_id": null |
| }, |
| { |
| "start": 91, |
| "end": 92, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation", |
| "sec_num": "7" |
| }, |
| { |
| "text": "We did an initial evaluation on the GF English resource grammar augmented with a large-coverage lexicon of 40 000 lemmas taken from the Oxford Advanced Learner's Dictionary (Mitton, 1986) . In total the grammar has 44 000 productions. The rule weights were trained from a version of the Penn Treebank (Marcus et al., 1993) which was converted to trees compatible with the grammar.", |
| "cite_spans": [ |
| { |
| "start": 173, |
| "end": 187, |
| "text": "(Mitton, 1986)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 301, |
| "end": 322, |
| "text": "(Marcus et al., 1993)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "8" |
| }, |
| { |
| "text": "The trained grammar was tested on Penn Treebank sentences of length up to 35 tokens, and the parsing times were at most 7 seconds per sentence. This initial test was run on a computer with a 2.4 GHz Intel Core i5 processor with 8 GB RAM. This result was very encouraging, given the complexity of the grammar, so we decided to do a larger test and compare with an existing state-of-the-art statistical PMCFG parser.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Rparse (Kallmeyer and Maier, 2013 ) is a another state-of-the-art training and parsing system for PMCFG. 2 It is written in Java and developed at the Universities of T\u00fcbingen and D\u00fcsseldorf, Germany. Rparse can be used for training probabilistic PMCFGs from discontinuous treebanks. It can also be used for parsing new sentences with the trained grammars.", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 33, |
| "text": "(Kallmeyer and Maier, 2013", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "8" |
| }, |
| { |
| "text": "In our evaluation we used Rparse to extract PM-CFG grammars from the discontinuous German Tiger Treebank (Brants et al., 2002) . The reason for using this treebank is that the extracted grammars are non-context-free, and our parser is specifically made for such grammars.", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 126, |
| "text": "(Brants et al., 2002)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "8" |
| }, |
| { |
| "text": "In our evaluations we got the same general results regardless of the size of the grammar, so we only report the results from one of these runs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation data", |
| "sec_num": "8.1" |
| }, |
| { |
| "text": "In this particular example, we trained the grammar on 40 000 sentences from the Tiger Treebank with lengths up to 160 tokens. We evaluated on ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation data", |
| "sec_num": "8.1" |
| }, |
| { |
| "text": "We evaluated our parser by comparing it with Rparse's built-in parser. Note that we are only interested in the efficiency of our implementation, not the coverage and accuracy of the trained grammar. In the comparison we used only the admissible heuristics, and we did confirm that the parsers produce optimal trees with exactly the same weight for the same input.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison with Rparse", |
| "sec_num": "8.2" |
| }, |
| { |
| "text": "Rparse extracts grammars in two steps. First it converts the treebank into a PMCFG, and then it binarizes that grammar. The binarization process uses markovization to improve the precision and recall of the final grammar (Kallmeyer and Maier, 2013) . We tested both Rparse's standard (Kallmeyer and Maier, 2013) and its new improved parsing alogorithm (Maier et al., 2012) . The new algorithm unfortunately works only with LCFRS grammars with a fan-out \u2264 2 (Maier et al., 2012) .", |
| "cite_spans": [ |
| { |
| "start": 221, |
| "end": 248, |
| "text": "(Kallmeyer and Maier, 2013)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 284, |
| "end": 311, |
| "text": "(Kallmeyer and Maier, 2013)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 352, |
| "end": 372, |
| "text": "(Maier et al., 2012)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 457, |
| "end": 477, |
| "text": "(Maier et al., 2012)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison with Rparse", |
| "sec_num": "8.2" |
| }, |
| { |
| "text": "In this test we used the optimal binarization method described in Kallmeyer (2010, chapter 7.2) . This was the only binarization algorithm in Rparse that produced a grammar with fan-out \u2264 2.", |
| "cite_spans": [ |
| { |
| "start": 66, |
| "end": 95, |
| "text": "Kallmeyer (2010, chapter 7.2)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison with Rparse", |
| "sec_num": "8.2" |
| }, |
| { |
| "text": "As can be seen in Figure 3 , our parser outperforms Rparse for all sentence lengths. For sentences longer than 15 tokens, the standard Rparse parser needs on average 100 times longer time than our parser. This difference increases with sentence length, suggesting that our algorithm has a better parsing complexity than Rparse.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 18, |
| "end": 26, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison with Rparse", |
| "sec_num": "8.2" |
| }, |
| { |
| "text": "The PGF parser also outperforms the improved Rparse parser, but the relative difference seems to stabilize on a speedup of 10-15 times. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison with Rparse", |
| "sec_num": "8.2" |
| }, |
| { |
| "text": "In another test we compared the effect of the heuristic factor h described in Section 6. We used the same training and testing data as before, and we tried four different heuristic factors: h = 0, 0.50, 0.75 and 0.95. As mentioned in Section 6, a factor of 0 gives an admissible heuristics, which means that the parser is guaranteed to return the tree with the best weight. The parsing times are shown in Figure 4 . As can be seen, a higher heuristics factor h gives a considerable speed-up. For 40 token sentences, h = 0.50 gives an average speedup of 5 times, while h = 0.75 is 30 times faster, and h = 0.95 is almost 500 times faster than using the admissible heuristics h = 0. This is more clearly seen in Figure 5 , where the parsing times are shown relative to the admissible heuristics.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 405, |
| "end": 413, |
| "text": "Figure 4", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 710, |
| "end": 718, |
| "text": "Figure 5", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparing different heuristics", |
| "sec_num": "8.3" |
| }, |
| { |
| "text": "Note that all charts have a logarithmic y-axis, which means that a straight line is equivalent to exponential growth. If we examine the graph lines more closely, we can see that they are not straight. The closest curves are in fact polynomial, with a degree of 4-6 depending on the parser and the value of h. 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparing different heuristics", |
| "sec_num": "8.3" |
| }, |
| { |
| "text": "What about the loss of parsing quality when we use a non-admissible heuristics? Firstly, as mentioned in Section 6, the parser still recognizes exactly the same language as defined by the grammar. The difference is that it is not guaranteed to return the tree with the best weight. In our evaluation we saw that for a factor h = 0.50, 80% of the trees are optimal, and only 3% of the trees have a weight more than 5% from the optimal weight. The performance gradually gets worse for higher h, and with h = 0.95 almost 10% of the trees have a weight more than 20% from the optimum.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-admissibility and parsing quality", |
| "sec_num": "8.4" |
| }, |
| { |
| "text": "These numbers only show how the parsing quality degrades relative to the grammar. But since the grammar is trained from a treebank it is more interesting to evaluate how the parsing quality on the treebank sentences is affected when we use a non-admissible heuristics. Table 2 shows how the labelled precision and recall are changed with different values for h. The evaluation was done using the EVALB measure which is implemented in Rparse (Maier, 2010) . As can be seen, a factor of h = 0.50 only results in a f-score loss of 3 points, which is arguably not very much. On the other extreme, for h = 0.95 the f-score drops 14 points. ", |
| "cite_spans": [ |
| { |
| "start": 441, |
| "end": 454, |
| "text": "(Maier, 2010)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 269, |
| "end": 276, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Non-admissibility and parsing quality", |
| "sec_num": "8.4" |
| }, |
| { |
| "text": "The presented algorithm is an important generalization of the classical algorithms of Earley (1970) and Stolcke (1995) for parsing with probabilistic context-free grammars to the more general formalism of parallel multiple context-free grammars.", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 99, |
| "text": "Earley (1970)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 104, |
| "end": 118, |
| "text": "Stolcke (1995)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "9" |
| }, |
| { |
| "text": "The algorithm has been implemented as part of the runtime for the Grammatical Framework (Ranta, 2011) , but it is not limited to GF alone.", |
| "cite_spans": [ |
| { |
| "start": 88, |
| "end": 101, |
| "text": "(Ranta, 2011)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "9" |
| }, |
| { |
| "text": "To show the universality of the algorithm, we evaluated it on large LCFRS grammars trained from the Tiger Treebank. Our parser is around 10-15 times faster than the latest, optimized version of the Rparse state-ofthe-art parser. This improvement seems to be constant, which means that it can be a consequence of low-level optimizations. More important is that our algorithm does not impose any restrictions at all on the underlying PMCFG grammar. Rparse on the other hand requires that the grammar is both binarized and has a fan-out of at most 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance", |
| "sec_num": "9.1" |
| }, |
| { |
| "text": "By using a non-admissible heuristics, the speed improves by orders of magnitude, at the expense of parsing quality. This makes it possible to parse long sentences (more than 50 tokens) in just around a second on a standard desktop computer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance", |
| "sec_num": "9.1" |
| }, |
| { |
| "text": "We would like to extend the algorithm to be able to use lexicalized statistical models (Collins, 2003) . Furthermore, it would be interesting to develop better heuristics for A * search, and to investigate how to incorporate beam search pruning into the algorithm.", |
| "cite_spans": [ |
| { |
| "start": 87, |
| "end": 102, |
| "text": "(Collins, 2003)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Future work", |
| "sec_num": "9.2" |
| }, |
| { |
| "text": "https://github.com/wmaier/rparse", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The exception is the standard Rparse parser, which has a polynomial degree of 8.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Incremental parsing with parallel multiple context-free grammars", |
| "authors": [ |
| { |
| "first": "Krasimir", |
| "middle": [], |
| "last": "Angelov", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of EACL 2009, the 12th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Krasimir Angelov. 2009. Incremental parsing with parallel multiple context-free grammars. In Pro- ceedings of EACL 2009, the 12th Conference of the European Chapter of the Association for Computa- tional Linguistics, Athens, Greece.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "The Mechanics of the Grammatical Framework", |
| "authors": [ |
| { |
| "first": "Krasimir", |
| "middle": [], |
| "last": "Angelov", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Krasimir Angelov. 2011. The Mechanics of the Gram- matical Framework. Ph.D. thesis, Chalmers Univer- sity of Technology, Gothenburg, Sweden.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The TIGER treebank", |
| "authors": [ |
| { |
| "first": "Sabine", |
| "middle": [], |
| "last": "Brants", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefanie", |
| "middle": [], |
| "last": "Dipper", |
| "suffix": "" |
| }, |
| { |
| "first": "Silvia", |
| "middle": [], |
| "last": "Hansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Lezius", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of TLT 2002, the 1st Workshop on Treebanks and Linguistic Theories", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sabine Brants, Stefanie Dipper, Silvia Hansen, Wolf- gang Lezius, and George Smith. 2002. The TIGER treebank. In Proceedings of TLT 2002, the 1st Work- shop on Treebanks and Linguistic Theories, So- zopol, Bulgaria.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Parsing linear context-free rewriting systems", |
| "authors": [ |
| { |
| "first": "H\u00e5kan", |
| "middle": [], |
| "last": "Burden", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Ljungl\u00f6f", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of IWPT 2005, the 9th International Workshop on Parsing Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H\u00e5kan Burden and Peter Ljungl\u00f6f. 2005. Parsing lin- ear context-free rewriting systems. In Proceedings of IWPT 2005, the 9th International Workshop on Parsing Technologies, Vancouver, Canada.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Head-driven statistical models for natural language parsing", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Computational Linguistics", |
| "volume": "29", |
| "issue": "4", |
| "pages": "589--637", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Collins. 2003. Head-driven statistical models for natural language parsing. Computational Lin- guistics, 29(4):589-637.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "An efficient context-free parsing algorithm", |
| "authors": [ |
| { |
| "first": "Jay", |
| "middle": [], |
| "last": "Earley", |
| "suffix": "" |
| } |
| ], |
| "year": 1970, |
| "venue": "Communications of the ACM", |
| "volume": "13", |
| "issue": "2", |
| "pages": "94--102", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jay Earley. 1970. An efficient context-free parsing al- gorithm. Communications of the ACM, 13(2):94- 102.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A formal basis for the heuristic determination of minimum cost paths", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Hart", |
| "suffix": "" |
| }, |
| { |
| "first": "Nils", |
| "middle": [], |
| "last": "Nilsson", |
| "suffix": "" |
| }, |
| { |
| "first": "Bertram", |
| "middle": [], |
| "last": "Raphael", |
| "suffix": "" |
| } |
| ], |
| "year": 1968, |
| "venue": "IEEE Transactions of Systems Science and Cybernetics", |
| "volume": "4", |
| "issue": "", |
| "pages": "100--107", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Hart, Nils Nilsson, and Bertram Raphael. 1968. A formal basis for the heuristic determination of minimum cost paths. IEEE Transactions of Systems Science and Cybernetics, 4(2):100-107.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "An incremental Earley parser for simple range concatenation grammar", |
| "authors": [ |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Kallmeyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Maier", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of IWPT 2009, the 11th International Conference on Parsing Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laura Kallmeyer and Wolfgang Maier. 2009. An in- cremental Earley parser for simple range concatena- tion grammar. In Proceedings of IWPT 2009, the 11th International Conference on Parsing Technolo- gies, Paris, France.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Datadriven parsing using probabilistic linear contextfree rewriting systems", |
| "authors": [ |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Kallmeyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Maier", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Computational Linguistics", |
| "volume": "39", |
| "issue": "1", |
| "pages": "87--119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laura Kallmeyer and Wolfgang Maier. 2013. Data- driven parsing using probabilistic linear context- free rewriting systems. Computational Linguistics, 39(1):87-119.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Parsing Beyond Context-Free Grammars", |
| "authors": [ |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Kallmeyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laura Kallmeyer. 2010. Parsing Beyond Context-Free Grammars. Springer.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A prefix-correct Earley recognizer for multiple context-free grammars", |
| "authors": [ |
| { |
| "first": "Makoto", |
| "middle": [], |
| "last": "Kanazawa", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of TAG+9, the 9th International Workshop on Tree Adjoining Grammar and Related Formalisms", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Makoto Kanazawa. 2008. A prefix-correct Earley recognizer for multiple context-free grammars. In Proceedings of TAG+9, the 9th International Work- shop on Tree Adjoining Grammar and Related For- malisms, T\u00fcbingen, Germany.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Stochastic multiple context-free grammar for RNA pseudoknot modeling", |
| "authors": [ |
| { |
| "first": "Yuki", |
| "middle": [], |
| "last": "Kato", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroyuki", |
| "middle": [], |
| "last": "Seki", |
| "suffix": "" |
| }, |
| { |
| "first": "Tadao", |
| "middle": [], |
| "last": "Kasami", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of TAGRF 2006, the 8th International Workshop on Tree Adjoining Grammar and Related Formalisms", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuki Kato, Hiroyuki Seki, and Tadao Kasami. 2006. Stochastic multiple context-free grammar for RNA pseudoknot modeling. In Proceedings of TAGRF 2006, the 8th International Workshop on Tree Ad- joining Grammar and Related Formalisms, Sydney, Australia.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A * parsing: fast exact Viterbi parse selection", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of HLT-NAACL 2003, the Human Language Technology Conference of the North American Chapter", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Klein and Christopher D. Manning. 2003. A * parsing: fast exact Viterbi parse selection. In Pro- ceedings of HLT-NAACL 2003, the Human Lan- guage Technology Conference of the North Ameri- can Chapter of the Association for Computational Linguistics, Edmonton, Canada.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Practical parsing of parallel multiple context-free grammars", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Ljungl\u00f6f", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of TAG+11, the 11th International Workshop on Tree Adjoining Grammar and Related Formalisms", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Ljungl\u00f6f. 2012. Practical parsing of parallel multiple context-free grammars. In Proceedings of TAG+11, the 11th International Workshop on Tree Adjoining Grammar and Related Formalisms, Paris, France.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "PLCFRS parsing revisited: Restricting the fan-out to two", |
| "authors": [ |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Maier", |
| "suffix": "" |
| }, |
| { |
| "first": "Miriam", |
| "middle": [], |
| "last": "Kaeshammer", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Kallmeyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of TAG+11, the 11th International Workshop on Tree Adjoining Grammar and Related Formalisms", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wolfgang Maier, Miriam Kaeshammer, and Laura Kallmeyer. 2012. PLCFRS parsing revisited: Re- stricting the fan-out to two. In Proceedings of TAG+11, the 11th International Workshop on Tree Adjoining Grammar and Related Formalisms, Paris, France.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Direct parsing of discontinuous constituents in German", |
| "authors": [ |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Maier", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of SPRML 2010, the 1st Workshop on Statistical Parsing of Morphologically-Rich Languages, Los Angeles", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wolfgang Maier. 2010. Direct parsing of discontin- uous constituents in German. In Proceedings of SPRML 2010, the 1st Workshop on Statistical Pars- ing of Morphologically-Rich Languages, Los Ange- les, California.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "LCFRS binarization and debinarization for directional parsing", |
| "authors": [ |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Maier", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of IWPT 2013, the 13th International Conference on Parsing Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wolfgang Maier. 2013. LCFRS binarization and de- binarization for directional parsing. In Proceedings of IWPT 2013, the 13th International Conference on Parsing Technologies, Nara, Japan.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Building a large annotated corpus of English: the Penn Treebank", |
| "authors": [ |
| { |
| "first": "Mitchell", |
| "middle": [ |
| "P" |
| ], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [ |
| "Ann" |
| ], |
| "last": "Marcinkiewicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Beatrice", |
| "middle": [], |
| "last": "Santorini", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational Linguistics", |
| "volume": "19", |
| "issue": "", |
| "pages": "313--330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mitchell P. Marcus, Mary Ann Marcinkiewicz, and Beatrice Santorini. 1993. Building a large anno- tated corpus of English: the Penn Treebank. Com- putational Linguistics, 19:313-330.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A partial dictionary of English in computer-usable form", |
| "authors": [ |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Mitton", |
| "suffix": "" |
| } |
| ], |
| "year": 1986, |
| "venue": "Literary & Linguistic Computing", |
| "volume": "1", |
| "issue": "4", |
| "pages": "214--215", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roger Mitton. 1986. A partial dictionary of English in computer-usable form. Literary & Linguistic Com- puting, 1(4):214-215.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Weighted deductive parsing and Knuth's algorithm", |
| "authors": [ |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Mark", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Computational Linguistics", |
| "volume": "29", |
| "issue": "1", |
| "pages": "135--143", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark-Jan Nederhof. 2003. Weighted deductive pars- ing and Knuth's algorithm. Computational Linguis- tics, 29(1):135-143.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "The GF resource grammar library. Linguistic Issues in Language Technology", |
| "authors": [ |
| { |
| "first": "Aarne", |
| "middle": [], |
| "last": "Ranta", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aarne Ranta. 2009. The GF resource grammar library. Linguistic Issues in Language Technology, 2(2).", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Grammatical Framework: Programming with Multilingual Grammars. CSLI Publications", |
| "authors": [ |
| { |
| "first": "Aarne", |
| "middle": [], |
| "last": "Ranta", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aarne Ranta. 2011. Grammatical Framework: Pro- gramming with Multilingual Grammars. CSLI Pub- lications, Stanford.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "On multiple contextfree grammars", |
| "authors": [ |
| { |
| "first": "Hiroyuki", |
| "middle": [], |
| "last": "Seki", |
| "suffix": "" |
| }, |
| { |
| "first": "Takashi", |
| "middle": [], |
| "last": "Matsumura", |
| "suffix": "" |
| }, |
| { |
| "first": "Mamoru", |
| "middle": [], |
| "last": "Fujii", |
| "suffix": "" |
| }, |
| { |
| "first": "Tadao", |
| "middle": [], |
| "last": "Kasami", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "Theoretical Computer Science", |
| "volume": "88", |
| "issue": "2", |
| "pages": "191--229", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hiroyuki Seki, Takashi Matsumura, Mamoru Fujii, and Tadao Kasami. 1991. On multiple context- free grammars. Theoretical Computer Science, 88(2):191-229.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "An efficient probabilistic context-free parsing algorithm that computes prefix probabilities", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Computational Linguistics", |
| "volume": "21", |
| "issue": "2", |
| "pages": "165--201", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Stolcke. 1995. An efficient probabilis- tic context-free parsing algorithm that computes prefix probabilities. Computational Linguistics, 21(2):165-201.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Figure 1: Example Grammar" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Parsing time (seconds) compared with Rparse." |
| }, |
| "FIGREF2": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Parsing time (seconds) with different heuristics factors." |
| }, |
| "FIGREF3": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Relative parsing time for different values of h, compared to admissible heuristic." |
| }, |
| "TABREF0": { |
| "num": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null, |
| "text": "hold because the forest of trees forB d is included in the forest for B d . From this we conclude the monotonicity condition:" |
| } |
| } |
| } |
| } |