| { |
| "paper_id": "P12-1046", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:29:14.130130Z" |
| }, |
| "title": "Bayesian Symbol-Refined Tree Substitution Grammars for Syntactic Parsing", |
| "authors": [ |
| { |
| "first": "Hiroyuki", |
| "middle": [], |
| "last": "Shindo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "NTT Communication Science Laboratories", |
| "institution": "NTT Corporation", |
| "location": { |
| "addrLine": "2-4 Hikaridai, Seika-cho, Soraku-gun", |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "shindo.hiroyuki@lab.ntt.co.jp" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Miyao", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "yusuke@nii.ac.jp" |
| }, |
| { |
| "first": "Akinori", |
| "middle": [], |
| "last": "Fujino", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "NTT Communication Science Laboratories", |
| "institution": "NTT Corporation", |
| "location": { |
| "addrLine": "2-4 Hikaridai, Seika-cho, Soraku-gun", |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "fujino.akinori@lab.ntt.co.jp" |
| }, |
| { |
| "first": "Masaaki", |
| "middle": [], |
| "last": "Nagata", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "NTT Communication Science Laboratories", |
| "institution": "NTT Corporation", |
| "location": { |
| "addrLine": "2-4 Hikaridai, Seika-cho, Soraku-gun", |
| "settlement": "Kyoto", |
| "country": "Japan" |
| } |
| }, |
| "email": "nagata.masaaki@lab.ntt.co.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We propose Symbol-Refined Tree Substitution Grammars (SR-TSGs) for syntactic parsing. An SR-TSG is an extension of the conventional TSG model where each nonterminal symbol can be refined (subcategorized) to fit the training data. We aim to provide a unified model where TSG rules and symbol refinement are learned from training data in a fully automatic and consistent fashion. We present a novel probabilistic SR-TSG model based on the hierarchical Pitman-Yor Process to encode backoff smoothing from a fine-grained SR-TSG to simpler CFG rules, and develop an efficient training method based on Markov Chain Monte Carlo (MCMC) sampling. Our SR-TSG parser achieves an F1 score of 92.4% in the Wall Street Journal (WSJ) English Penn Treebank parsing task, which is a 7.7 point improvement over a conventional Bayesian TSG parser, and better than state-of-the-art discriminative reranking parsers.", |
| "pdf_parse": { |
| "paper_id": "P12-1046", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We propose Symbol-Refined Tree Substitution Grammars (SR-TSGs) for syntactic parsing. An SR-TSG is an extension of the conventional TSG model where each nonterminal symbol can be refined (subcategorized) to fit the training data. We aim to provide a unified model where TSG rules and symbol refinement are learned from training data in a fully automatic and consistent fashion. We present a novel probabilistic SR-TSG model based on the hierarchical Pitman-Yor Process to encode backoff smoothing from a fine-grained SR-TSG to simpler CFG rules, and develop an efficient training method based on Markov Chain Monte Carlo (MCMC) sampling. Our SR-TSG parser achieves an F1 score of 92.4% in the Wall Street Journal (WSJ) English Penn Treebank parsing task, which is a 7.7 point improvement over a conventional Bayesian TSG parser, and better than state-of-the-art discriminative reranking parsers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Syntactic parsing has played a central role in natural language processing. The resulting syntactic analysis can be used for various applications such as machine translation (Galley et al., 2004; DeNeefe and Knight, 2009) , sentence compression (Cohn and Lapata, 2009; Yamangil and Shieber, 2010) , and question answering (Wang et al., 2007) . Probabilistic context-free grammar (PCFG) underlies many statistical parsers, however, it is well known that the PCFG rules extracted from treebank data via maximum likelihood estimation do not perform well due to unrealistic context freedom assumptions (Klein and Manning, 2003) .", |
| "cite_spans": [ |
| { |
| "start": 174, |
| "end": 195, |
| "text": "(Galley et al., 2004;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 196, |
| "end": 221, |
| "text": "DeNeefe and Knight, 2009)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 245, |
| "end": 268, |
| "text": "(Cohn and Lapata, 2009;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 269, |
| "end": 296, |
| "text": "Yamangil and Shieber, 2010)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 322, |
| "end": 341, |
| "text": "(Wang et al., 2007)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 598, |
| "end": 623, |
| "text": "(Klein and Manning, 2003)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In recent years, there has been an increasing interest in tree substitution grammar (TSG) as an alternative to CFG for modeling syntax trees (Post and Gildea, 2009; Tenenbaum et al., 2009; . TSG is a natural extension of CFG in which nonterminal symbols can be rewritten (substituted) with arbitrarily large tree fragments. These tree fragments have great advantages over tiny CFG rules since they can capture non-local contexts explicitly such as predicate-argument structures, idioms and grammatical agreements . Previous work on TSG parsing Post and Gildea, 2009; Bansal and Klein, 2010) has consistently shown that a probabilistic TSG (PTSG) parser is significantly more accurate than a PCFG parser, but is still inferior to state-of-the-art parsers (e.g., the Berkeley parser (Petrov et al., 2006) and the Charniak parser (Charniak and Johnson, 2005) ). One major drawback of TSG is that the context freedom assumptions still remain at substitution sites, that is, TSG tree fragments are generated that are conditionally independent of all others given root nonterminal symbols. Furthermore, when a sentence is unparsable with large tree fragments, the PTSG parser usually uses naive CFG rules derived from its backoff model, which diminishes the benefits obtained from large tree fragments.", |
| "cite_spans": [ |
| { |
| "start": 141, |
| "end": 164, |
| "text": "(Post and Gildea, 2009;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 165, |
| "end": 188, |
| "text": "Tenenbaum et al., 2009;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 544, |
| "end": 566, |
| "text": "Post and Gildea, 2009;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 567, |
| "end": 590, |
| "text": "Bansal and Klein, 2010)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 781, |
| "end": 802, |
| "text": "(Petrov et al., 2006)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 827, |
| "end": 855, |
| "text": "(Charniak and Johnson, 2005)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "On the other hand, current state-of-the-art parsers use symbol refinement techniques (Johnson, 1998; Collins, 2003; Matsuzaki et al., 2005 ). Symbol refinement is a successful approach for weakening context freedom assumptions by dividing coarse treebank symbols (e.g. NP and VP) into subcategories, rather than extracting large tree fragments. As shown in several studies on TSG parsing (Zuidema, 2007; Bansal and Klein, 2010) , large tree fragments and symbol refinement work complementarily for syntactic parsing. For example, Bansal and Klein (2010) have reported that deterministic symbol refinement with heuristics helps improve the accuracy of a TSG parser.", |
| "cite_spans": [ |
| { |
| "start": 85, |
| "end": 100, |
| "text": "(Johnson, 1998;", |
| "ref_id": null |
| }, |
| { |
| "start": 101, |
| "end": 115, |
| "text": "Collins, 2003;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 116, |
| "end": 138, |
| "text": "Matsuzaki et al., 2005", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 388, |
| "end": 403, |
| "text": "(Zuidema, 2007;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 404, |
| "end": 427, |
| "text": "Bansal and Klein, 2010)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 530, |
| "end": 553, |
| "text": "Bansal and Klein (2010)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we propose Symbol-Refined Tree Substitution Grammars (SR-TSGs) for syntactic parsing. SR-TSG is an extension of the conventional TSG model where each nonterminal symbol can be refined (subcategorized) to fit the training data. Our work differs from previous studies in that we focus on a unified model where TSG rules and symbol refinement are learned from training data in a fully automatic and consistent fashion. We also propose a novel probabilistic SR-TSG model with the hierarchical Pitman-Yor Process (Pitman and Yor, 1997) , namely a sort of nonparametric Bayesian model, to encode backoff smoothing from a fine-grained SR-TSG to simpler CFG rules, and develop an efficient training method based on blocked MCMC sampling.", |
| "cite_spans": [ |
| { |
| "start": 523, |
| "end": 545, |
| "text": "(Pitman and Yor, 1997)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our SR-TSG parser achieves an F1 score of 92.4% in the WSJ English Penn Treebank parsing task, which is a 7.7 point improvement over a conventional Bayesian TSG parser, and superior to state-of-the-art discriminative reranking parsers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our SR-TSG work is built upon recent work on Bayesian TSG induction from parse trees (Post and Gildea, 2009; . We firstly review the Bayesian TSG model used in that work, and then present related work on TSGs and symbol refinement.", |
| "cite_spans": [ |
| { |
| "start": 85, |
| "end": 108, |
| "text": "(Post and Gildea, 2009;", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A TSG consists of a 4-tuple, G = (T, N, S, R), where T is a set of terminal symbols, N is a set of nonterminal symbols, S \u2208 N is the distinguished start nonterminal symbol and R is a set of productions (a.k.a. rules). The productions take the form of elementary trees i.e., tree fragments of height \u2265 1. The root and internal nodes of the elementary trees are labeled with nonterminal symbols, and leaf nodes are labeled with either terminal or nonterminal symbols. Nonterminal leaves are referred to as frontier nonterminals, and form the substitution sites to be combined with other elementary trees.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A derivation is a process of forming a parse tree. It starts with a root symbol and rewrites (substi-tutes) nonterminal symbols with elementary trees until there are no remaining frontier nonterminals. Figure 1a shows an example parse tree and Figure 1b shows its example TSG derivation. Since different derivations may produce the same parse tree, recent work on TSG induction (Post and Gildea, 2009; ) employs a probabilistic model of a TSG and predicts derivations from observed parse trees in an unsupervised way.", |
| "cite_spans": [ |
| { |
| "start": 379, |
| "end": 402, |
| "text": "(Post and Gildea, 2009;", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 202, |
| "end": 211, |
| "text": "Figure 1a", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 244, |
| "end": 254, |
| "text": "Figure 1b", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A Probabilistic Tree Substitution Grammar (PTSG) assigns a probability to each rule in the grammar. The probability of a derivation is defined as the product of the probabilities of its component elementary trees as follows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "p (e) = x\u2192e\u2208e p (e |x ) ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where e = (e 1 , e 2 , . . .) is a sequence of elementary trees used for the derivation, x = root (e) is the root symbol of e, and p (e |x ) is the probability of generating e given its root symbol x. As in a PCFG, e is generated conditionally independent of all others given x.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The posterior distribution over elementary trees given a parse tree t can be computed by using the Bayes' rule:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "p (e |t ) \u221d p (t |e ) p (e) .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where p (t |e ) is either equal to 1 (when t and e are consistent) or 0 (otherwise). Therefore, the task of TSG induction from parse trees turns out to consist of modeling the prior distribution p (e). Recent work on TSG induction defines p (e) as a nonparametric Bayesian model such as the Dirichlet Process (Ferguson, 1973) or the Pitman-Yor Process to encourage sparse and compact grammars.", |
| "cite_spans": [ |
| { |
| "start": 309, |
| "end": 325, |
| "text": "(Ferguson, 1973)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Several studies have combined TSG induction and symbol refinement. An adaptor grammar (Johnson et al., 2007a ) is a sort of nonparametric Bayesian TSG model with symbol refinement, and is thus closely related to our SR-TSG model. However, an adaptor grammar differs from ours in that all its rules are complete: all leaf nodes must be terminal symbols, while our model permits nonterminal symbols as leaf nodes. Furthermore, adaptor grammars have largely been applied to the task of unsupervised structural induction from raw texts such as morphology analysis, word segmentation (Johnson and Goldwater, 2009) , and dependency grammar induction (Cohen et al., 2010), rather than constituent syntax parsing. An all-fragments grammar (Bansal and Klein, 2010) is another variant of TSG that aims to utilize all possible subtrees as rules. It maps a TSG to an implicit representation to make the grammar tractable and practical for large-scale parsing. The manual symbol refinement described in (Klein and Manning, 2003) was applied to an all-fragments grammar and this improved accuracy in the English WSJ parsing task. As mentioned in the introduction, our model focuses on the automatic learning of a TSG and symbol refinement without heuristics.", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 108, |
| "text": "(Johnson et al., 2007a", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 579, |
| "end": 608, |
| "text": "(Johnson and Goldwater, 2009)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 731, |
| "end": 755, |
| "text": "(Bansal and Klein, 2010)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 990, |
| "end": 1015, |
| "text": "(Klein and Manning, 2003)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background and Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this section, we propose Symbol-Refined Tree Substitution Grammars (SR-TSGs) for syntactic parsing. Our SR-TSG model is an extension of the conventional TSG model where every symbol of the elementary trees can be refined to fit the training data. Figure 1c shows an example of SR-TSG derivation. As with previous work on TSG induction, our task is the induction of SR-TSG derivations from a corpus of parse trees in an unsupervised fashion. That is, we wish to infer the symbol subcategories of every node and substitution site (i.e., nodes where substitution occurs) from parse trees. Extracted rules and their probabilities can be used to parse new raw sentences.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 250, |
| "end": 259, |
| "text": "Figure 1c", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Symbol-Refined Tree Substitution Grammars", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We define a probabilistic model of an SR-TSG based on the Pitman-Yor Process (PYP) (Pitman and Yor, 1997) , namely a sort of nonparametric Bayesian model. The PYP produces power-law distributions, which have been shown to be well-suited for such uses as language modeling (Teh, 2006b) , and TSG induction . One major issue as regards modeling an SR-TSG is that the space of the grammar rules will be very sparse since SR-TSG allows for arbitrarily large tree fragments and also an arbitrarily large set of symbol subcategories. To address the sparseness problem, we employ a hierarchical PYP to encode a backoff scheme from the SR-TSG rules to simpler CFG rules, inspired by recent work on dependency parsing . Our model consists of a three-level hierarchy. Table 1 shows an example of the SR-TSG rule and its backoff tree fragments as an illustration of this threelevel hierarchy. The topmost level of our model is a distribution over the SR-TSG rules as follows.", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 105, |
| "text": "(Pitman and Yor, 1997)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 272, |
| "end": 284, |
| "text": "(Teh, 2006b)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "e |x k \u223c G x k G x k \u223c PYP d x k , \u03b8 x k , P sr-tsg (\u2022 |x k ) ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where x k is a refined root symbol of an elementary tree e, while x is a raw nonterminal symbol in the corpus and k = 0, 1, . . . is an index of the symbol subcategory. Suppose x is NP and its symbol subcategory is 0, then x k is NP 0 . The PYP has three parameters: is a base distribution over infinite space of symbolrefined elementary trees rooted with x k , which provides the backoff probability of e. The remaining parameters d x k and \u03b8 x k control the strength of the base distribution. The backoff probability P sr-tsg (e |x k ) is given by the product of symbol-refined CFG (SR-CFG) rules that e contains as follows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "(d x k , \u03b8 x k , P sr-tsg ). P sr-tsg (\u2022 |x k ) SR-TSG SR-CFG RU-CFG", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "P sr-tsg (e |x k ) = f \u2208F (e) s c f \u00d7 i\u2208I(e) (1 \u2212 s ci ) \u00d7 H (cfg-rules (e |x k )) \u03b1 |x k \u223c H x k H x k \u223c PYP d x , \u03b8 x , P sr-cfg (\u2022 |x k ) ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where F (e) is a set of frontier nonterminal nodes and I (e) is a set of internal nodes in e. c f and c i are nonterminal symbols of nodes f and i, respectively. s c is the probability of stopping the expansion of a node labeled with c. SR-CFG rules are CFG rules where every symbol is refined, as shown in Table 1 . The function cfg-rules (e |x k ) returns the SR-CFG rules that e contains, which take the form of x k \u2192 \u03b1. Each SR-CFG rule \u03b1 rooted with x k is drawn from the backoff distribution H x k , and H x k is produced by the PYP with parameters: d x , \u03b8 x , P sr-cfg . This distribution over the SR-CFG rules forms the second level hierarchy of our model. The backoff probability of the SR-CFG rule, P sr-cfg (\u03b1 |x k ), is given by the root-unrefined CFG (RU-CFG) rule as follows,", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 307, |
| "end": 314, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Probabilistic Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "P sr-cfg (\u03b1 |x k ) = I (root-unrefine (\u03b1 |x k )) \u03b1 |x \u223c I x I x \u223c PYP d x , \u03b8 x , P ru-cfg (\u2022 |x ) ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where the function root-unrefine (\u03b1 |x k ) returns the RU-CFG rule of \u03b1, which takes the form of x \u2192 \u03b1. The RU-CFG rule is a CFG rule where the root symbol is unrefined and all leaf nonterminal symbols are refined, as shown in Table 1 . Each RU-CFG rule \u03b1 rooted with x is drawn from the backoff distribution I x , and I x is produced by a PYP. This distribution over the RU-CFG rules forms the third level hierarchy of our model. Finally, we set the backoff probability of the RU-CFG rule, P ru-cfg (\u03b1 |x ), so that it is uniform as follows.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 227, |
| "end": 234, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Probabilistic Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "P ru-cfg (\u03b1 |x ) = 1 |x \u2192 \u2022| .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where |x \u2192 \u2022| is the number of RU-CFG rules rooted with x. Overall, our hierarchical model encodes backoff smoothing consistently from the SR-TSG rules to the SR-CFG rules, and from the SR-CFG rules to the RU-CFG rules. As shown in (Blunsom and Cohen et al., 2010) , the parsing accuracy of the TSG model is strongly affected by its backoff model. The effects of our hierarchical backoff model on parsing performance are evaluated in Section 5.", |
| "cite_spans": [ |
| { |
| "start": 245, |
| "end": 264, |
| "text": "Cohen et al., 2010)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Model", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We use Markov Chain Monte Carlo (MCMC) sampling to infer the SR-TSG derivations from parse trees. MCMC sampling is a widely used approach for obtaining random samples from a probability distribution. In our case, we wish to obtain derivation samples of an SR-TSG from the posterior distribution, p (e |t, d, \u03b8, s ) .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 296, |
| "end": 314, |
| "text": "p (e |t, d, \u03b8, s )", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The inference of the SR-TSG derivations corresponds to inferring two kinds of latent variables: latent symbol subcategories and latent substitution sites. We first infer latent symbol subcategories for every symbol in the parse trees, and then infer latent substitution sites stepwise. During the inference of symbol subcategories, every internal node is fixed as a substitution site. After that, we unfix that assumption and infer latent substitution sites given symbolrefined parse trees. This stepwise learning is simple and efficient in practice, but we believe that the joint learning of both latent variables is possible, and we will deal with this in future work. Here we describe each inference algorithm in detail.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For the inference of latent symbol subcategories, we adopt split and merge training (Petrov et al., 2006) as follows. In each split-merge step, each symbol is split into at most two subcategories. For example, every NP symbol in the training data is split into either NP 0 or NP 1 to maximize the posterior probability. After convergence, we measure the loss of each split symbol in terms of the likelihood incurred when removing it, then the smallest 50% of the newly split symbols as regards that loss are merged to avoid overfitting. The split-merge algorithm terminates when the total number of steps reaches the user-specified value.", |
| "cite_spans": [ |
| { |
| "start": 84, |
| "end": 105, |
| "text": "(Petrov et al., 2006)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Symbol Subcategories", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In each splitting step, we use two types of blocked MCMC algorithm: the sentence-level blocked Metroporil-Hastings (MH) sampler and the treelevel blocked Gibbs sampler, while (Petrov et al., 2006 ) use a different MLE-based model and the EM algorithm. Our sampler iterates sentence-level sampling and tree-level sampling alternately.", |
| "cite_spans": [ |
| { |
| "start": 175, |
| "end": 195, |
| "text": "(Petrov et al., 2006", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Symbol Subcategories", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The sentence-level MH sampler is a recently proposed algorithm for grammar induction (Johnson et al., 2007b; . In this work, we apply it to the training of symbol splitting. The MH sampler consists of the following three steps: for each sentence, 1) calculate the inside probability (Lari and Young, 1991) in a bottom-up manner, 2) sample a derivation tree in a top-down manner, and 3) accept or reject the derivation sample by using the MH test. See for details. This sampler simultaneously updates blocks of latent variables associated with a sentence, thus it can find MAP solutions efficiently.", |
| "cite_spans": [ |
| { |
| "start": 85, |
| "end": 108, |
| "text": "(Johnson et al., 2007b;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 283, |
| "end": 305, |
| "text": "(Lari and Young, 1991)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Symbol Subcategories", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The tree-level blocked Gibbs sampler focuses on the type of SR-TSG rules and simultaneously up-dates all root and child nodes that are annotated with the same SR-TSG rule. For example, the sampler collects all nodes that are annotated with S 0 \u2192 NP 1 VP 2 , then updates those nodes to another subcategory such as S 0 \u2192 NP 2 VP 0 according to the posterior distribution. This sampler is similar to table label resampling (Johnson and Goldwater, 2009) , but differs in that our sampler can update multiple table labels simultaneously when multiple tables are labeled with the same elementary tree. The tree-level sampler also simultaneously updates blocks of latent variables associated with the type of SR-TSG rules, thus it can find MAP solutions efficiently.", |
| "cite_spans": [ |
| { |
| "start": 421, |
| "end": 450, |
| "text": "(Johnson and Goldwater, 2009)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Symbol Subcategories", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "After the inference of symbol subcategories, we use Gibbs sampling to infer the substitution sites of parse trees as described in (Cohn and Lapata, 2009; Post and Gildea, 2009) . We assign a binary variable to each internal node in the training data, which indicates whether that node is a substitution site or not. For each iteration, the Gibbs sampler works by sampling the value of each binary variable in random order. See for details.", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 153, |
| "text": "(Cohn and Lapata, 2009;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 154, |
| "end": 176, |
| "text": "Post and Gildea, 2009)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Substitution Sites", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "During the inference, our sampler ignores the symbol subcategories of internal nodes of elementary trees since they do not affect the derivation of the SR-TSG. For example, the elementary trees \"(S 0 (NP 0 NNP 0 ) VP 0 )\" and \"(S 0 (NP 1 NNP 0 ) VP 0 )\" are regarded as being the same when we calculate the generation probabilities according to our model. This heuristics is helpful for finding large tree fragments and learning compact grammars.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Inference of Substitution Sites", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We treat hyperparameters {d, \u03b8} as random variables and update their values for every MCMC iteration. We place a prior on the hyperparameters as follows: (1, 1) . The values of d and \u03b8 are optimized with the auxiliary variable technique (Teh, 2006a) .", |
| "cite_spans": [ |
| { |
| "start": 237, |
| "end": 249, |
| "text": "(Teh, 2006a)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 154, |
| "end": 160, |
| "text": "(1, 1)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hyperparameter Estimation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "d \u223c Beta (1, 1), \u03b8 \u223c Gamma", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hyperparameter Estimation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We ran experiments on the Wall Street Journal (WSJ) portion of the English Penn Treebank data set (Marcus et al., 1993) , using a standard data split (sections 2-21 for training, 22 for development and 23 for testing). We also used section 2 as a small training set for evaluating the performance of our model under low-resource conditions. Henceforth, we distinguish the small training set (section 2) from the full training set (sections 2-21). The treebank data is right-binarized (Matsuzaki et al., 2005) to construct grammars with only unary and binary productions. We replace lexical words with count \u2264 5 in the training data with one of 50 unknown words using lexical features, following (Petrov et al., 2006) . We also split off all the function tags and eliminated empty nodes from the data set, following (Johnson, 1998) .", |
| "cite_spans": [ |
| { |
| "start": 98, |
| "end": 119, |
| "text": "(Marcus et al., 1993)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 484, |
| "end": 508, |
| "text": "(Matsuzaki et al., 2005)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 695, |
| "end": 716, |
| "text": "(Petrov et al., 2006)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 815, |
| "end": 830, |
| "text": "(Johnson, 1998)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Preparation", |
| "sec_num": "5.1.1" |
| }, |
| { |
| "text": "For the inference of symbol subcategories, we trained our model with the MCMC sampler by using 6 split-merge steps for the full training set and 3 split-merge steps for the small training set. Therefore, each symbol can be subdivided into a maximum of 2 6 = 64 and 2 3 = 8 subcategories, respectively. In each split-merge step, we initialized the sampler by randomly splitting every symbol in two subcategories and ran the MCMC sampler for 1000 iterations. After that, to infer the substitution sites, we initialized the model with the final sample from a run on the small training set, and used the Gibbs sampler for 2000 iterations. We estimated the optimal values of the stopping probabilities s by using the development set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training and Parsing", |
| "sec_num": "5.1.2" |
| }, |
| { |
| "text": "We obtained the parsing results with the MAX-RULE-PRODUCT algorithm (Petrov et al., 2006) by using the SR-TSG rules extracted from our model. We evaluated the accuracy of our parser by bracketing F1 score of predicted parse trees. We used EVALB 1 to compute the F1 score. In all our experiments, we conducted ten independent runs to train our model, and selected the one that performed best on the development set in terms of parsing accuracy. SR-TSG (P sr-tsg , P sr-cfg , P ru-cfg ) 81.7 91.1 Table 2 : Comparison of parsing accuracy with the small and full training sets. *Our reimplementation of . Figure 2 : Histogram of SR-TSG and TSG rule sizes on the small training set. The size is defined as the number of CFG rules that the elementary tree contains.", |
| "cite_spans": [ |
| { |
| "start": 68, |
| "end": 89, |
| "text": "(Petrov et al., 2006)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 495, |
| "end": 502, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 602, |
| "end": 610, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training and Parsing", |
| "sec_num": "5.1.2" |
| }, |
| { |
| "text": "We compared the SR-TSG model with the CFG and TSG models as regards parsing accuracy. We also tested our model with three backoff hierarchy settings to evaluate the effects of backoff smoothing on parsing accuracy. Table 2 shows the F1 scores of the CFG, TSG and SR-TSG parsers for small and full training sets. In Table 2 , SR-TSG (P sr-tsg ) denotes that we used only the topmost level of the hierarchy. Similary, SR-TSG (P sr-tsg , P sr-cfg ) denotes that we used only the P sr-tsg and P sr-cfg backoff models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 215, |
| "end": 222, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 315, |
| "end": 322, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison of SR-TSG with TSG", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "Our best model, SR-TSG (P sr-tsg , P sr-cfg , P ru-cfg ), outperformed both the CFG and TSG models on both the small and large training sets. This result suggests that the conventional TSG model trained from the vanilla treebank is insufficient to resolve Model F1 (\u2264 40) F1 (all) TSG (no symbol refinement) Post and Gildea (2009) 82. structural ambiguities caused by coarse symbol annotations in a training corpus. As we expected, symbol refinement can be helpful with the TSG model for further fitting the training set and improving the parsing accuracy. The performance of the SR-TSG parser was strongly affected by its backoff models. For example, the simplest model, P sr-tsg , performed poorly compared with our best model. This result suggests that the SR-TSG rules extracted from the training set are very sparse and cannot cover the space of unknown syntax patterns in the testing set. Therefore, sophisticated backoff modeling is essential for the SR-TSG parser. Our hierarchical PYP modeling technique is a successful way to achieve backoff smoothing from sparse SR-TSG rules to simpler CFG rules, and offers the advantage of automatically estimating the optimal backoff probabilities from the training set.", |
| "cite_spans": [ |
| { |
| "start": 308, |
| "end": 330, |
| "text": "Post and Gildea (2009)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of SR-TSG with TSG", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "We compared the rule sizes and frequencies of SR-TSG with those of TSG. The rule sizes of SR-TSG and TSG are defined as the number of CFG rules that the elementary tree contains. Figure 2 shows a histogram of the SR-TSG and TSG rule sizes (by unrefined token) on the small training set. For example, SR-TSG rules: S 1 \u2192 NP 0 VP 1 and S 0 \u2192 NP 1 VP 2 were considered to be the same token. In Figure 2 , we can see that there are almost the same number of SR-TSG rules and TSG rules with size = 1. However, there are more SR-TSG rules than TSG rules with size \u2265 2. This shows that an SR-TSG can use various large tree fragments depending on the context, which is specified by the symbol subcategories.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 179, |
| "end": 187, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 391, |
| "end": 399, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison of SR-TSG with TSG", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "We compared the accuracy of the SR-TSG parser with that of conventional high-performance parsers. Table 3 shows the F1 scores of an SR-TSG and conventional parsers with the full training set. In Table 3, SR-TSG (single) is a standard SR-TSG parser, and SR-TSG (multiple) is a combination of sixteen independently trained SR-TSG models, following the work of (Petrov, 2010) .", |
| "cite_spans": [ |
| { |
| "start": 358, |
| "end": 372, |
| "text": "(Petrov, 2010)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 98, |
| "end": 105, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison of SR-TSG with Other Models", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "Our SR-TSG (single) parser achieved an F1 score of 91.1%, which is a 6.4 point improvement over the conventional Bayesian TSG parser reported by . Our model can be viewed as an extension of Cohn's work by the incorporation of symbol refinement. Therefore, this result confirms that a TSG and symbol refinement work complementarily in improving parsing accuracy. Compared with a symbol-refined CFG model such as the Berkeley parser (Petrov et al., 2006) , the SR-TSG model can use large tree fragments, which strengthens the probability of frequent syntax patterns in the training set. Indeed, the few very large rules of our model memorized full parse trees of sentences, which were repeated in the training set.", |
| "cite_spans": [ |
| { |
| "start": 431, |
| "end": 452, |
| "text": "(Petrov et al., 2006)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of SR-TSG with Other Models", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "The SR-TSG (single) is a pure generative model of syntax trees but it achieved results comparable to those of discriminative parsers. It should be noted that discriminative reranking parsers such as (Charniak and Johnson, 2005) and (Huang, 2008) are constructed on a generative parser. The reranking parser takes the k-best lists of candidate trees or a packed forest produced by a baseline parser (usually a generative model), and then reranks the candidates using arbitrary features. Hence, we can expect that combining our SR-TSG model with a discriminative reranking parser would provide better performance than SR-TSG alone.", |
| "cite_spans": [ |
| { |
| "start": 213, |
| "end": 227, |
| "text": "Johnson, 2005)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 232, |
| "end": 245, |
| "text": "(Huang, 2008)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of SR-TSG with Other Models", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "Recently, (Petrov, 2010) has reported that combining multiple grammars trained independently gives significantly improved performance over a single grammar alone. We applied his method (referred to as a TREE-LEVEL inference) to the SR-TSG model as follows. We first trained sixteen SR-TSG models independently and produced a 100-best list of the derivations for each model. Then, we erased the subcategory information of parse trees and selected the best tree that achieved the highest likelihood under the product of sixteen models. The combination model, SR-TSG (multiple), achieved an F1 score of 92.4%, which is a state-of-the-art result for the WSJ parsing task. Compared with discriminative reranking parsers, combining multiple grammars by using the product model provides the advantage that it does not require any additional training. Several studies (Fossum and Knight, 2009; Zhang et al., 2009) have proposed different approaches that involve combining k-best lists of candidate trees. We will deal with those methods in future work.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 24, |
| "text": "(Petrov, 2010)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 860, |
| "end": 885, |
| "text": "(Fossum and Knight, 2009;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 886, |
| "end": 905, |
| "text": "Zhang et al., 2009)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of SR-TSG with Other Models", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "Let us note the relation between SR-CFG, TSG and SR-TSG. TSG is weakly equivalent to CFG and generates the same set of strings. For example, the TSG rule \"S \u2192 (NP NNP) VP\" with probability p can be converted to the equivalent CFG rules as follows: \"S \u2192 NP NNP VP \" with probability p and \"NP NNP \u2192 NNP\" with probability 1. From this viewpoint, TSG utilizes surrounding symbols (NNP of NP NNP in the above example) as latent variables with which to capture context information. The search space of learning a TSG given a parse tree is O (2 n ) where n is the number of internal nodes of the parse tree. On the other hand, an SR-CFG utilizes an arbitrary index such as 0, 1, . . . as latent variables and the search space is larger than that of a TSG when the symbol refinement model allows for more than two subcategories for each symbol. Our experimental results comfirm that jointly modeling both latent variables using our SR-TSG assists accurate parsing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of SR-TSG with Other Models", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "We have presented an SR-TSG, which is an extension of the conventional TSG model where each symbol of tree fragments can be automatically subcategorized to address the problem of the conditional independence assumptions of a TSG. We proposed a novel backoff modeling of an SR-TSG based on the hierarchical Pitman-Yor Process and sentence-level and tree-level blocked MCMC sampling for training our model. Our best model significantly outperformed the conventional TSG and achieved state-of-the-art result in a WSJ parsing task. Future work will involve examining the SR-TSG model for different languages and for unsupervised grammar induction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank Liang Huang for helpful comments and the three anonymous reviewers for thoughtful suggestions. We would also like to thank Slav Petrov and Hui Zhang for answering our questions about their parsers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Simple, Accurate Parsing with an All-Fragments Grammar", |
| "authors": [ |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1098--1107", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohit Bansal and Dan Klein. 2010. Simple, Accurate Parsing with an All-Fragments Grammar. In In Proc. of ACL, pages 1098-1107.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Unsupervised Induction of Tree Substitution Grammars for Dependency Parsing", |
| "authors": [ |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1204--1213", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Phil Blunsom and Trevor Cohn. 2010. Unsupervised Induction of Tree Substitution Grammars for Depen- dency Parsing. In Proc. of EMNLP, pages 1204-1213.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Coarseto-Fine n-Best Parsing and MaxEnt Discriminative Reranking", |
| "authors": [ |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proc. of ACL", |
| "volume": "1", |
| "issue": "", |
| "pages": "173--180", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eugene Charniak and Mark Johnson. 2005. Coarse- to-Fine n-Best Parsing and MaxEnt Discriminative Reranking. In Proc. of ACL, 1:173-180.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Variational Inference for Adaptor Grammars", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [ |
| "M" |
| ], |
| "last": "Shay B Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. of HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "564--572", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shay B Cohen, David M Blei, and Noah A Smith. 2010. Variational Inference for Adaptor Grammars. In In Proc. of HLT-NAACL, pages 564-572.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Sentence Compression as Tree Transduction", |
| "authors": [ |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "34", |
| "issue": "", |
| "pages": "637--674", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Trevor Cohn and Mirella Lapata. 2009. Sentence Com- pression as Tree Transduction. Journal of Artificial Intelligence Research, 34:637-674.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Inducing Tree-Substitution Grammars", |
| "authors": [ |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "11", |
| "issue": "", |
| "pages": "3053--3096", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Trevor Cohn, Phil Blunsom, and Sharon Goldwater. 2010. Inducing Tree-Substitution Grammars. Journal of Machine Learning Research, 11:3053-3096.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Head-Driven Statistical Models for Natural Language Parsing", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Computational Linguistics", |
| "volume": "29", |
| "issue": "", |
| "pages": "589--637", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Collins. 2003. Head-Driven Statistical Mod- els for Natural Language Parsing. Computational Lin- guistics, 29:589-637.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Synchronous Tree Adjoining Machine Translation", |
| "authors": [ |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Deneefe", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steve DeNeefe and Kevin Knight. 2009. Synchronous Tree Adjoining Machine Translation. In Proc. of EMNLP, page 727.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A Bayesian Analysis of Some Nonparametric Problems", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ferguson", |
| "suffix": "" |
| } |
| ], |
| "year": 1973, |
| "venue": "Annals of Statistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "209--230", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas S Ferguson. 1973. A Bayesian Analysis of Some Nonparametric Problems. Annals of Statistics, 1:209-230.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Combining Constituent Parsers", |
| "authors": [ |
| { |
| "first": "Victoria", |
| "middle": [], |
| "last": "Fossum", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Knight", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. of HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "253--256", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victoria Fossum and Kevin Knight. 2009. Combining Constituent Parsers. In Proc. of HLT-NAACL, pages 253-256.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "What's in a Translation Rule", |
| "authors": [ |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Hopkins", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Information Sciences", |
| "volume": "", |
| "issue": "", |
| "pages": "273--280", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michel Galley, Mark Hopkins, Kevin Knight, Daniel Marcu, Los Angeles, and Marina Del Rey. 2004. What's in a Translation Rule? Information Sciences, pages 273-280.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Forest Reranking : Discriminative Parsing with Non-Local Features", |
| "authors": [ |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liang Huang. 2008. Forest Reranking : Discriminative Parsing with Non-Local Features. In Proc. of ACL, 19104:0.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Improving nonparameteric Bayesian inference: experiments on unsupervised word segmentation with adaptor grammars", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. of HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "317--325", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Johnson and Sharon Goldwater. 2009. Improving nonparameteric Bayesian inference: experiments on unsupervised word segmentation with adaptor gram- mars. In In Proc. of HLT-NAACL, pages 317-325.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Adaptor Grammars : A Framework for Specifying Compositional Nonparametric Bayesian Models", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Griffiths", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "19", |
| "issue": "", |
| "pages": "641--648", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Johnson, Thomas L Griffiths, and Sharon Gold- water. 2007a. Adaptor Grammars : A Frame- work for Specifying Compositional Nonparametric Bayesian Models. Advances in Neural Information Processing Systems 19, 19:641-648.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Bayesian Inference for PCFGs via Markov chain Monte Carlo", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Griffiths", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "139--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Johnson, Thomas L Griffiths, and Sharon Goldwa- ter. 2007b. Bayesian Inference for PCFGs via Markov chain Monte Carlo. In In Proc. of HLT-NAACL, pages 139-146.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Accurate Unlexicalized Parsing", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proc. of ACL", |
| "volume": "1", |
| "issue": "", |
| "pages": "423--430", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Klein and Christopher D Manning. 2003. Accurate Unlexicalized Parsing. In Proc. of ACL, 1:423-430.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Applications of Stochastic Context-Free Grammars Using the Inside-Outside Algorithm", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Lari", |
| "suffix": "" |
| }, |
| { |
| "first": "S J", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "Computer Speech and Language", |
| "volume": "5", |
| "issue": "", |
| "pages": "237--257", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K Lari and S J Young. 1991. Applications of Stochas- tic Context-Free Grammars Using the Inside-Outside Algorithm. Computer Speech and Language, 5:237- 257.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Building a Large Annotated Corpus of English: The Penn Treebank", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Beatrice", |
| "middle": [], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [ |
| "Ann" |
| ], |
| "last": "Santorini", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Marcinkiewicz", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational Linguistics", |
| "volume": "19", |
| "issue": "", |
| "pages": "313--330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mitchell P Marcus, Beatrice Santorini, and Mary Ann Marcinkiewicz. 1993. Building a Large Annotated Corpus of English: The Penn Treebank. Computa- tional Linguistics, 19:313-330.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Probabilistic CFG with latent annotations", |
| "authors": [ |
| { |
| "first": "Takuya", |
| "middle": [], |
| "last": "Matsuzaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Miyao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun'ichi", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "75--82", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Takuya Matsuzaki, Yusuke Miyao, and Jun'ichi Tsujii. 2005. Probabilistic CFG with latent annotations. In Proc. of ACL, pages 75-82.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Learning Accurate, Compact, and Interpretable Tree Annotation", |
| "authors": [ |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Barrett", |
| "suffix": "" |
| }, |
| { |
| "first": "Romain", |
| "middle": [], |
| "last": "Thibaux", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "433--440", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Slav Petrov, Leon Barrett, Romain Thibaux, and Dan Klein. 2006. Learning Accurate, Compact, and In- terpretable Tree Annotation. In Proc. of ACL, pages 433-440.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Products of Random Latent Variable Grammars", |
| "authors": [ |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. of HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "19--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Slav Petrov. 2010. Products of Random Latent Variable Grammars. In Proc. of HLT-NAACL, pages 19-27.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "The two-parameter Poisson-Dirichlet distribution derived from a stable subordinator", |
| "authors": [ |
| { |
| "first": "Jim", |
| "middle": [], |
| "last": "Pitman", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Yor", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "The Annals of Probability", |
| "volume": "25", |
| "issue": "", |
| "pages": "855--900", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jim Pitman and Marc Yor. 1997. The two-parameter Poisson-Dirichlet distribution derived from a stable subordinator. The Annals of Probability, 25:855-900.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Bayesian Learning of a Tree Substitution Grammar", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Post", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Gildea", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. of ACL-IJCNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "45--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Post and Daniel Gildea. 2009. Bayesian Learning of a Tree Substitution Grammar. In In Proc. of ACL- IJCNLP, pages 45-48.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "A Bayesian Interpretation of Interpolated Kneser-Ney. NUS School of Computing", |
| "authors": [ |
| { |
| "first": "Yee Whye", |
| "middle": [], |
| "last": "Teh", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yee Whye Teh. 2006a. A Bayesian Interpretation of Interpolated Kneser-Ney. NUS School of Computing Technical Report TRA2/06.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "A Hierarchical Bayesian Language Model based on Pitman-Yor Processes", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [ |
| "W" |
| ], |
| "last": "Teh", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. of ACL", |
| "volume": "44", |
| "issue": "", |
| "pages": "985--992", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "YW Teh. 2006b. A Hierarchical Bayesian Language Model based on Pitman-Yor Processes. In Proc. of ACL, 44:985-992.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Fragment Grammars: Exploring Computation and Reuse in Language", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Tenenbaum", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "D" |
| ], |
| "last": "Tj O'donnell", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J Tenenbaum, TJ O'Donnell, and ND Goodman. 2009. Fragment Grammars: Exploring Computation and Reuse in Language. MIT Computer Science and Arti- ficial Intelligence Laboratory Technical Report Series.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "What is the Jeopardy Model ? A Quasi-Synchronous Grammar for QA", |
| "authors": [ |
| { |
| "first": "Mengqiu", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Noah", |
| "suffix": "" |
| }, |
| { |
| "first": "Teruko", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mitamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of EMNLP-CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "22--32", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mengqiu Wang, Noah A Smith, and Teruko Mitamura. 2007. What is the Jeopardy Model ? A Quasi- Synchronous Grammar for QA. In Proc. of EMNLP- CoNLL, pages 22-32.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Bayesian Synchronous Tree-Substitution Grammar Induction and Its Application to Sentence Compression", |
| "authors": [ |
| { |
| "first": "Elif", |
| "middle": [], |
| "last": "Yamangil", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stuart", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Shieber", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "937--947", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elif Yamangil and Stuart M Shieber. 2010. Bayesian Synchronous Tree-Substitution Grammar Induction and Its Application to Sentence Compression. In In Proc. of ACL, pages 937-947.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "K-Best Combination of Syntactic Parsers", |
| "authors": [ |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Haizhou", |
| "middle": [], |
| "last": "Chew Lim Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1552--1560", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hui Zhang, Min Zhang, Chew Lim Tan, and Haizhou Li. 2009. K-Best Combination of Syntactic Parsers. In Proc. of EMNLP, pages 1552-1560.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Parsimonious Data-Oriented Parsing", |
| "authors": [ |
| { |
| "first": "Willem", |
| "middle": [], |
| "last": "Zuidema", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of EMNLP-CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "551--560", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Willem Zuidema. 2007. Parsimonious Data-Oriented Parsing. In Proc. of EMNLP-CoNLL, pages 551-560.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "(a) Example parse tree. (b) Example TSG derivation of (a). (c) Example SR-TSG derivation of (a). The refinement annotation is hyphenated with a nonterminal symbol.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "text": "Example three-level backoff.", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "html": null, |
| "content": "<table/>", |
| "num": null, |
| "text": "Our parsing performance for the testing set compared with those of other parsers. *Results for the development set (\u2264 100).", |
| "type_str": "table" |
| } |
| } |
| } |
| } |