| { |
| "paper_id": "K15-1003", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:09:01.400499Z" |
| }, |
| "title": "A Supertag-Context Model for Weakly-Supervised CCG Parser Learning", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Garrette", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Washington", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "cdyer@cs.cmu.edu" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Baldridge", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Texas at Austin", |
| "location": {} |
| }, |
| "email": "jbaldrid@utexas.edu" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "nasmith@cs.cmu.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Combinatory Categorial Grammar (CCG) is a lexicalized grammar formalism in which words are associated with categories that specify the syntactic configurations in which they may occur. We present a novel parsing model with the capacity to capture the associative adjacent-category relationships intrinsic to CCG by parameterizing the relationships between each constituent label and the preterminal categories directly to its left and right, biasing the model toward constituent categories that can combine with their contexts. This builds on the intuitions of Klein and Manning's (2002) \"constituentcontext\" model, which demonstrated the value of modeling context, but has the advantage of being able to exploit the properties of CCG. Our experiments show that our model outperforms a baseline in which this context information is not captured.", |
| "pdf_parse": { |
| "paper_id": "K15-1003", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Combinatory Categorial Grammar (CCG) is a lexicalized grammar formalism in which words are associated with categories that specify the syntactic configurations in which they may occur. We present a novel parsing model with the capacity to capture the associative adjacent-category relationships intrinsic to CCG by parameterizing the relationships between each constituent label and the preterminal categories directly to its left and right, biasing the model toward constituent categories that can combine with their contexts. This builds on the intuitions of Klein and Manning's (2002) \"constituentcontext\" model, which demonstrated the value of modeling context, but has the advantage of being able to exploit the properties of CCG. Our experiments show that our model outperforms a baseline in which this context information is not captured.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Learning parsers from incomplete or indirect supervision is an important component of moving NLP research toward new domains and languages. But with less information, it becomes necessary to devise ways of making better use of the information that is available. In general, this means constructing inductive biases that take advantage of unannotated data to train probabilistic models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "One important example is the constituentcontext model (CCM) of Klein and Manning (2002) , which was specifically designed to capture the linguistic observation made by Radford (1988) that there are regularities to the contexts in which constituents appear. This phenomenon, known as substitutability, says that phrases of the same type appear in similar contexts. For example, the part-of-speech (POS) sequence ADJ NOUN frequently occurs between the tags DET and VERB. This DET-VERB context also frequently applies to the single-word sequence NOUN and to ADJ ADJ NOUN. From this, we might deduce that DET-VERB is a likely context for a noun phrase. CCM is able to learn which POS contexts are likely, and does so via a probabilistic generative model, providing a statistical, data-driven take on substitutability. However, since there is nothing intrinsic about the POS pair DET-VERB that indicates a priori that it is a likely constituent context, this fact must be inferred entirely from the data. Baldridge (2008) observed that unlike opaque, atomic POS labels, the rich structures of Combinatory Categorial Grammar (CCG) (Steedman, 2000; Steedman and Baldridge, 2011) categories reflect universal grammatical properties. CCG is a lexicalized grammar formalism in which every constituent in a sentence is associated with a structured category that specifies its syntactic relationship to other constituents. For example, a category might encode that \"this constituent can combine with a noun phrase to the right (an object) and then a noun phrase to the left (a subject) to produce a sentence\" instead of simply VERB. CCG has proven useful as a framework for grammar induction due to its ability to incorporate linguistic knowledge to guide parser learning by, for example, specifying rules in lexical-expansion algorithms (Bisk and Hockenmaier, 2012; 2013) or encoding that information as priors within a Bayesian framework (Garrette et al., 2015) .", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 87, |
| "text": "Klein and Manning (2002)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 168, |
| "end": 182, |
| "text": "Radford (1988)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1000, |
| "end": 1016, |
| "text": "Baldridge (2008)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1125, |
| "end": 1141, |
| "text": "(Steedman, 2000;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1142, |
| "end": 1171, |
| "text": "Steedman and Baldridge, 2011)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 1826, |
| "end": 1854, |
| "text": "(Bisk and Hockenmaier, 2012;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1855, |
| "end": 1860, |
| "text": "2013)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1928, |
| "end": 1951, |
| "text": "(Garrette et al., 2015)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Baldridge observed is that, cross-linguistically, grammars prefer simpler syntactic structures when possible, and that due to the natural correspondence of categories and syntactic structure, biasing toward simpler categories encourages simpler structures. In previous work, we were able to incorporate this preference into a Bayesian parsing model, biasing PCFG productions toward sim-pler categories by encoding a notion of category simplicity into a prior (Garrette et al., 2015) . Baldridge further notes that due to the natural associativity of CCG, adjacent categories tend to be combinable. We previously showed that incorporating this intuition into a Bayesian prior can help train a CCG supertagger (Garrette et al., 2014) .", |
| "cite_spans": [ |
| { |
| "start": 459, |
| "end": 482, |
| "text": "(Garrette et al., 2015)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 708, |
| "end": 731, |
| "text": "(Garrette et al., 2014)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we present a novel parsing model that is designed specifically for the capacity to capture both of these universal, intrinsic properties of CCG. We do so by extending our previous, PCFG-based parsing model to include parameters that govern the relationship between constituent categories and the preterminal categories (also known as supertags) to the left and right. The advantage of modeling context within a CCG framework is that while CCM must learn which contexts are likely purely from the data, the CCG categories give us obvious a priori information about whether a context is likely for a given constituent based on whether the categories are combinable. Biasing our model towards both simple categories and connecting contexts encourages learning structures with simpler syntax and that have a better global \"fit\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The Bayesian framework is well-matched to our problem since our inductive biases -those derived from universal grammar principles, weak supervision, and estimations based on unannotated data -can be encoded as priors, and we can use Markov chain Monte Carlo (MCMC) inference procedures to automatically blend these biases with unannotated text that reflects the way language is actually used \"in the wild\". Thus, we learn context information based on statistics in the data like CCM, but have the advantage of additional, a priori biases. It is important to note that the Bayesian setup allows us to use these universal biases as soft constraints: they guide the learner toward more appropriate grammars, but may be overridden when there is compelling contradictory evidence in the data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Methodologically, this work serves as an example of how linguistic-theoretical commitments can be used to benefit data-driven methods, not only through the construction of a model family from a grammar, as done in our previous work, but also when exploiting statistical associations about which the theory is silent. While there has been much work in computational modeling of the interaction between universal grammar and observ-able data in the context of studying child language acquisition (e.g., Villavicencio, 2002; Goldwater, 2007) , we are interested in applying these principles to the design of models and learning procedures that result in better parsing tools. Given our desire to train NLP models in low-supervision scenarios, the possibility of constructing inductive biases out of universal properties of language is enticing: if we can do this well, then it only needs to be done once, and can be applied to any language or domain without adaptation.", |
| "cite_spans": [ |
| { |
| "start": 501, |
| "end": 521, |
| "text": "Villavicencio, 2002;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 522, |
| "end": 538, |
| "text": "Goldwater, 2007)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we seek to learn from only raw data and an incomplete dictionary mapping some words to sets of potential supertags. In order to estimate the parameters of our model, we develop a blocked sampler based on that of Johnson et al. (2007) to sample parse trees for sentences in the raw training corpus according to their posterior probabilities. However, due to the very large sets of potential supertags used in a parse, computing inside charts is intractable, so we design a Metropolis-Hastings step that allows us to sample efficiently from the correct posterior. Our experiments show that the incorporation of supertag context parameters into the model improves learning, and that placing combinability-preferring priors on those parameters yields further gains in many scenarios.", |
| "cite_spans": [ |
| { |
| "start": 227, |
| "end": 248, |
| "text": "Johnson et al. (2007)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In the CCG formalism, every constituent, including those at the lexical level, is associated with a structured CCG category that defines that constituent's relationships to the other constituents in the sentence. Categories are defined by a recursive structure, where a category is either atomic (possibly with features), or a function from one category to another, as indicated by a slash operator:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Combinatory Categorial Grammar", |
| "sec_num": "2" |
| }, |
| { |
| "text": "C \u2192 {s, s dcl , s adj , s b , np, n, n num , pp, ...} C \u2192 {(C/C), (C \\C)}", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Combinatory Categorial Grammar", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Categories of adjacent constituents can be combined using one of a set of combination rules to form categories of higher-level constituents, as seen in Figure 1 . The direction of the slash operator gives the behavior of the function. A category (s\\np)/pp might describe an intransitive verb with a prepositional phrase complement; it combines on the right (/) with a constituent with category pp, and then on the left (\\) with a noun phrase (np) that serves as its subject. We follow Lewis and Steedman (2014) in allowing a small set of generic, linguistically-plausible unary and binary grammar rules. We further add rules for combining with punctuation to the left and right and allow for the merge rule X \u2192 X X of Clark and Curran (2007) .", |
| "cite_spans": [ |
| { |
| "start": 485, |
| "end": 510, |
| "text": "Lewis and Steedman (2014)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 718, |
| "end": 741, |
| "text": "Clark and Curran (2007)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 152, |
| "end": 160, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Combinatory Categorial Grammar", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this section, we present our novel supertagcontext model (SCM) that augments a standard PCFG with parameters governing the supertags to the left and right of each constituent.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The CCG formalism is said to be naturally associative since a constituent label is often able to combine on either the left or the right. As a motivating example, consider the sentence \"The lazy dog sleeps\", as shown in Figure 2 . The word lazy, with category n/n, can either combine with dog (n) via the Forward Application rule (>), or with The (np/n) via the Forward Composition (>B) rule. Baldridge (2008) showed that this tendency for adjacent supertags to be combinable can be used to bias a sequence model in order to learn better CCG supertaggers. However, we can see that if the supertags of adjacent words lazy (n/n) and dog (n) combine, then they will produce the category n, which describes the entire constituent span \"lazy dog\". Since we have produced a new category that subsumes that entire span, a valid parse must next combine that n with one of the remaining supertags to the left or right, producing either (The\u2022(lazy\u2022dog))\u2022sleeps or The\u2022((lazy\u2022dog)\u2022sleeps). Because we know that one (or both) of these combinations must be valid, we will similarly want a strong prior on the connectivity between lazy\u2022dog and its supertag context: The\u2194(lazy\u2022dog)\u2194sleeps.", |
| "cite_spans": [ |
| { |
| "start": 393, |
| "end": 409, |
| "text": "Baldridge (2008)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 220, |
| "end": 228, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Assuming T is the full set of known categories, the generative process for our model is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "np/n n/n n s\\np", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The lazy dog sleeps n Figure 2 : Higher-level category n subsumes the categories of its constituents. Thus, n should have a strong prior on combinability with its adjacent supertags np/n and s\\np.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 22, |
| "end": 30, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Parameters:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "\u03b8 ROOT \u223c Dir(\u03b1 ROOT , \u03b8 ROOT-0 ) \u03b8 BIN t \u223c Dir(\u03b1 BIN , \u03b8 BIN-0 ) \u2200t \u2208 T \u03b8 UN t \u223c Dir(\u03b1 UN , \u03b8 UN-0 ) \u2200t \u2208 T \u03b8 TERM t \u223c Dir(\u03b1 TERM , \u03b8 TERM-0 t ) \u2200t \u2208 T \u03bb t \u223c Dir(\u03b1 \u03bb , \u03bb 0 ) \u2200t \u2208 T \u03b8 LCTX t \u223c Dir(\u03b1 LCTX , \u03b8 LCTX-0 t ) \u2200t \u2208 T \u03b8 RCTX t \u223c Dir(\u03b1 RCTX , \u03b8 RCTX-0 t ) \u2200t \u2208 T Sentence: do s \u223c Cat(\u03b8 ROOT ) y | s \u223c SCM(s) until the tree y is valid where , y, r | t \u223c SCM(t) is defined as: z \u223c Cat(\u03bb t ) if z = B : u, v | t \u223c Cat(\u03b8 BIN t ) y L | u \u223c SCM(u), y R | v \u223c SCM(v) y = y L , y R if z = U : u | t \u223c Cat(\u03b8 UN t ) y | u \u223c SCM(u) if z = T : w | t \u223c Cat(\u03b8 TERM t ) y = w | t \u223c Cat(\u03b8 LCTX t ), r | t \u223c Cat(\u03b8 RCTX t )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The process begins by sampling the parameters from Dirichlet distributions: a distribution \u03b8 ROOT over root categories, a conditional distribution \u03b8 BIN t over binary branching productions given category t, \u03b8 UN t for unary rewrite productions, \u03b8 TERM t for terminal (word) productions, and \u03b8 LCTX t and \u03b8 RCTX t for left and right contexts. We also sample parameters \u03bb t for the probability of t producing a binary branch, unary rewrite, or terminal word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Next we sample a sentence. This begins by sampling first a root category s and then recursively sampling subtrees. For each subtree rooted by a category t, we generate a left context supertag and a right context supertag r. Then, we sam- Figure 3 : The generative process starting with non-terminal A ij , where t x is the supertag for w x , the word at position x, and \"A \u2192 B C\" is a valid production in the grammar. We can see that nonterminal A ij generates nonterminals B ik and C kj (solid arrows) as well as generating left context t i-1 and right context t j (dashed arrows); likewise for B ik and C kj . The triangle under a non-terminal indicates the complete subtree rooted by the node. ple a production type z corresponding to either a (B) binary, (U) unary, or (T) terminal production. Depending on z, we then sample either a binary production u, v and recurse, a unary production u and recurse, or a terminal word w and end that branch. A tree is complete when all branches end in terminal words. See Figure 3 for a graphical depiction of the generative behavior of the process. Finally, since it is possible to generate a supertag context category that does not match the actual category generated by the neighboring constituent, we must allow our process to reject such invalid trees and re-attempt to sample.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 238, |
| "end": 246, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 1014, |
| "end": 1022, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "A ij B ik C kj t i-1 t j t i t j-1 t k-1 t k", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Like CCM, this model is deficient since the same supertags are generated multiple times, and parses with conflicting supertags are not valid. Since we are not generating from the model, this does not introduce difficulties (Klein and Manning, 2002) .", |
| "cite_spans": [ |
| { |
| "start": 223, |
| "end": 248, |
| "text": "(Klein and Manning, 2002)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "One additional complication that must be addressed is that left-frontier non-terminal categories -those whose subtree span includes the first word of the sentence -do not have a left-side supertag to use as context. For these cases, we use the special sentence-start symbol S to serve as context. Similarly, we use the end symbol E for the right-side context of the right-frontier.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We next discuss how the prior distributions are constructed to encode desirable biases, using universal CCG properties.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Model", |
| "sec_num": "3" |
| }, |
| { |
| "text": "For the root, binary, and unary parameters, we want to choose prior means that encode our bias toward cross-linguistically-plausible categories. To formalize the notion of what it means for a category to be more \"plausible\", we extend the category generator of our previous work, which we will call P CAT . We can define P CAT using a probabilistic grammar (Garrette et al., 2014) . The grammar may first generate a start or end category ( S , E ) with probability p se or a special tokendeletion category ( D ; explained in \u00a75) with probability p del , or a standard CCG category C:", |
| "cite_spans": [ |
| { |
| "start": 357, |
| "end": 380, |
| "text": "(Garrette et al., 2014)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-terminal production prior means", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "X\u2192 S | E p se X\u2192 D p del X\u2192C (1 \u2212 (2p se + p del )) \u2022 P C (C)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-terminal production prior means", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For each sentence s, there will be one S and one E , so we set p se = 1/(25 + 2), since the average sentence length in the corpora is roughly 25. To discourage the model from deleting tokens (only applies during testing), we set p del = 10 \u2212100 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-terminal production prior means", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For P C , the distribution over standard categories, we use a recursive definition based on the structure of a CCG category. If p = 1 \u2212 p, then: 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-terminal production prior means", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "p term \u2022 p atom (a) C\u2192A/A p term \u2022 p fwd \u2022 ( p mod \u2022 P C (A) + p mod \u2022 P C (A) 2 ) C\u2192A/B p term \u2022 p fwd \u2022 p mod \u2022 P C (A) \u2022 P C (B) C\u2192A\\A p term \u2022 p fwd \u2022 ( p mod \u2022 P C (A) + p mod \u2022 P C (A) 2 ) C\u2192A\\B p term \u2022 p fwd \u2022 p mod \u2022 P C (A) \u2022 P C (B)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C\u2192a", |
| "sec_num": null |
| }, |
| { |
| "text": "The category grammar captures important aspects of what makes a category more or less likely: (1) simplicity is preferred, with a higher p term meaning a stronger emphasis on simplicity; 2 (2) atomic types may occur at different rates, as given by p atom ; (3) modifier categories (A/A or A\\A) are more likely than similar-complexity non-modifiers (such as an adverb that modifies a verb); and (4) operators may occur at different rates, as given by p fwd .", |
| "cite_spans": [ |
| { |
| "start": 187, |
| "end": 188, |
| "text": "2", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C\u2192a", |
| "sec_num": null |
| }, |
| { |
| "text": "We can use P CAT to define priors on our production parameters that bias our model toward rules that result in a priori more likely categories: 3 \u03b8 ROOT-0 (t) = P CAT (t)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C\u2192a", |
| "sec_num": null |
| }, |
| { |
| "text": "\u03b8 BIN-0 ( u, v ) = P CAT (u) \u2022 P CAT (v) \u03b8 UN-0 ( u ) = P CAT (u)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C\u2192a", |
| "sec_num": null |
| }, |
| { |
| "text": "For simplicity, we assume the production-type mixture prior to be uniform:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C\u2192a", |
| "sec_num": null |
| }, |
| { |
| "text": "\u03bb 0 = 1 3 , 1 3 , 1 3 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C\u2192a", |
| "sec_num": null |
| }, |
| { |
| "text": "We employ the same procedure as our previous work for setting the terminal production prior distributions \u03b8 TERM-0 t (w) by estimating word-givencategory relationships from the weak supervision: the tag dictionary and raw corpus (Garrette and Baldridge, 2012; Garrette et al., 2015) . 4 This procedure attempts to automatically estimate the frequency of each word/tag combination by dividing the number of raw-corpus occurrences of each word in the dictionary evenly across all of its associated tags. These counts are then combined with estimates of the \"openness\" of each tag in order to assess its likelihood of appearing with new words.", |
| "cite_spans": [ |
| { |
| "start": 229, |
| "end": 259, |
| "text": "(Garrette and Baldridge, 2012;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 260, |
| "end": 282, |
| "text": "Garrette et al., 2015)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 285, |
| "end": 286, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Terminal production prior means", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In order to encourage our model to choose trees in which the constituent labels \"fit\" into their supertag contexts, we want to bias our context parameters toward context categories that are combinable with the constituent label.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context parameter prior means", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The right-side context of a non-terminal category -the probability of generating a category to the right of the current constituent's category -corresponds directly to the category transitions used for the HMM supertagger of Garrette et al. (2014) . Thus, the right-side context prior mean \u03b8 RCTX-0 t can be biased in exactly the same way as the HMM supertagger's transitions: toward context supertags that connect to the constituent label.", |
| "cite_spans": [ |
| { |
| "start": 225, |
| "end": 247, |
| "text": "Garrette et al. (2014)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context parameter prior means", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "To encode a notion of combinability, we follow Baldridge's (2008) definition. Briefly, let \u03ba(t, u) \u2208 {0, 1} be an indicator of whether t combines with u (in that order). For any binary rule that can combine t to u, \u03ba(t, u)=1. To ensure that our prior captures the natural associativity of CCG, we define combinability in this context to include composition rules as well as application rules. If 3 For our experiments, we normalize PCAT by dividing by c\u2208T PCAT(c). This allows for experiments contrasting with a uniform prior (1/|T |) without adjusting \u03b1 values. 4 We refer the reader to the previous work (Garrette et al., 2015) for a fuller discussion and implementation details.", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 65, |
| "text": "Baldridge's (2008)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 396, |
| "end": 397, |
| "text": "3", |
| "ref_id": null |
| }, |
| { |
| "start": 563, |
| "end": 564, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 606, |
| "end": 629, |
| "text": "(Garrette et al., 2015)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context parameter prior means", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "atoms have features associated, then the atoms are allowed to unify if the features match, or if at least one of them does not have a feature. In defining \u03ba, it is also important to ignore possible arguments on the wrong side of the combination since they can be consumed without affecting the connection between the two. To achieve this for \u03ba(t, u), it is assumed that it is possible to consume all preceding arguments of t and all following arguments of u. So \u03ba(np, (s\\np)/np) = 1. This helps to ensure the associativity discussed earlier. For \"combining\" with the start or end of a sentence, we define \u03ba( S , u)=1 when u seeks no left-side arguments (since there are no tags to the left with which to combine) and \u03ba(t, E )=1 when t seeks no right-side arguments. So \u03ba( S , np/n)=1, but \u03ba( S , s\\np)=0. Finally, due to the frequent use of the unary rule that allows n to be rewritten as np, the atom np is allowed to unify with n if n is the argument. So \u03ba(n, s\\np) = 1, but \u03ba(np/n, np) = 0.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context parameter prior means", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The prior mean of producing a right-context supertag r from a constituent category t, P right (r | t), is defined so that combinable pairs are given higher probability than non-combinable pairs. We further experimented with a prior that biases toward both combinability and category likelihood, replacing the uniform treatment of categories with our prior over categories, yielding P right CAT (r | t). If T is the full set of known CCG categories:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context parameter prior means", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "P right (r | t) = \u03c3 \u2022 1/|T | if \u03ba(t, r) \u03c3 > 1 1/|T | otherwise P right CAT (r | t) = \u03c3 \u2022 P CAT (r) if \u03ba(t, r) \u03c3 > 1 P CAT (r) otherwise", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context parameter prior means", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Distributions P left ( | t) and P left CAT ( | t) are defined in the same way, but with the combinability direction flipped: \u03ba( , t), since the left context supertag precedes the constituent category.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Context parameter prior means", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We wish to infer the distribution over CCG parses, given the model we just described and a corpus of sentences. Since there is no way to analytically compute these modes, we resort to Gibbs sampling to find an approximate solution. Our strategy is based on the approach presented by Johnson et al. (2007) . At a high level, we alternate between resampling model parameters (\u03b8 ROOT , \u03b8 BIN , \u03b8 UN , \u03b8 TERM , \u03bb, \u03b8 LCTX , \u03b8 RCTX ) given the current set of parse trees and resampling those trees given the current model parameters and observed word sequences. To efficiently sample new model parameters, we exploit Dirichlet-multinomial conjugacy. By repeating these alternating steps and accumulating the productions, we obtain an approximation of the required posterior quantities.", |
| "cite_spans": [ |
| { |
| "start": 283, |
| "end": 304, |
| "text": "Johnson et al. (2007)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Our inference procedure takes as input the distribution prior means, along with the raw corpus and tag dictionary. During sampling, we restrict the tag choices for a word w to categories allowed by the tag dictionary. Since real-world learning scenarios will always lack complete knowledge of the lexicon, we, too, want to allow for unknown words; for these, we assume the word may take any known supertag. We refer to the sequence of word tokens as w and a non-terminal category covering the span i through j \u2212 1 as y ij .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "While it is technically possible to sample directly from our context-sensitive model, the high number of potential supertags available for each context means that computing the inside chart for this model is intractable for most sentences. In order to overcome this limitation, we employ an accept/reject Metropolis-Hastings (MH) step. The basic idea is that we sample trees according to a simpler proposal distribution Q that approximates the full distribution and for which direct sampling is tractable, and then choose to accept or reject those trees based on the true distribution P .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For our model, there is a straightforward and intuitive choice for the proposal distribution: the PCFG model without our context parameters: (\u03b8 ROOT , \u03b8 BIN , \u03b8 UN , \u03b8 TERM , \u03bb), which is known to have an efficient sampling method. Our acceptance step is therefore based on the remaining parameters: the context (\u03b8 LCTX , \u03b8 RCTX ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To sample from our proposal distribution, we use a blocked Gibbs sampler based on the one proposed by Goodman (1998) and used by Johnson et al. (2007) that samples entire parse trees. For a sentence w, the strategy is to use the Inside algorithm (Lari and Young, 1990) to inductively compute, for each potential non-terminal position spanning words w i through w j\u22121 and category t, going \"up\" the tree, the probability of generating w i , . . . , w j\u22121 via any arrangement of productions that is rooted by y ij = t.", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 116, |
| "text": "Goodman (1998)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 129, |
| "end": 150, |
| "text": "Johnson et al. (2007)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 246, |
| "end": 268, |
| "text": "(Lari and Young, 1990)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "p(w i | y i,i+1 = t) = \u03bb t (T) \u2022 \u03b8 TERM t (w i ) + t\u2192u \u03bb t (U) \u2022 \u03b8 UN t ( u ) \u2022 p(w i:j\u22121 | y ij = u) p(w i:j\u22121 | y ij = t) = t\u2192u \u03bb t (U) \u2022 \u03b8 UN t ( u ) \u2022 p(w i:j\u22121 | y ij = u) + t\u2192u v i<k<j \u03bb t (B) \u2022 \u03b8 BIN t ( u, v ) \u2022 p(w i:k\u22121 | y ik = u) \u2022 p(w k:j\u22121 | y kj = v)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We then pass \"downward\" through the chart, sampling productions until we reach a terminal word on all branches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "y 0n \u223c \u03b8 ROOT t \u2022 p(w 0:n\u22121 | y 0n = t) x | y ij \u223c \u03b8 BIN y ij ( u, v ) \u2022 p(w i:k\u22121 | y ik = u) \u2022 p(w k:j\u22121 | y kj = v) \u2200 y ik , y kj when j > i + 1, \u03b8 UN y ij ( u ) \u2022 p(w i:j\u22121 | y ij = u) \u2200 y ij , \u03b8 TERM y ij (w i ) when j = i + 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where x is either a split point k and pair of categories y ik , y kj resulting from a binary rewrite rule, a single category y ij resulting from a unary rule, or a word w resulting from a terminal rule. The MH procedure requires an acceptance distribution A that is used to accept or reject a tree sampled from the proposal Q. The probability of accepting new tree y given the previous tree y is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "A(y | y) = min 1, P (y ) P (y) Q(y) Q(y )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Since Q is defined as a subset of P 's parameters, it is the case that:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "P (y) = Q(y) \u2022 p(y | \u03b8 LCTX , \u03b8 RCTX )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "After substituting this for each P in A, all of the Q factors cancel, yielding the acceptance distribution defined purely in terms of context parameters:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "A(y | y) = min 1, p(y | \u03b8 LCTX , \u03b8 RCTX ) p(y | \u03b8 LCTX , \u03b8 RCTX )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For completeness, we note that the probability of a tree y given only the context parameters is: 5", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "p(y | \u03b8 LCTX , \u03b8 RCTX ) = 0\u2264i<j\u2264n \u03b8 LCTX (y i\u22121,i | y ij ) \u2022 \u03b8 RCTX (y j,j+1 | y ij )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Before we begin sampling, we initialize each distribution to its prior mean (\u03b8 ROOT =\u03b8 ROOT-0 , \u03b8 BIN t =\u03b8 etc) . Since MH requires an initial set of trees to begin sampling, we parse the raw corpus with probabilistic CKY using these initial parameters (excluding the context parameters) to guess an initial tree for each raw sentence.", |
| "cite_spans": [ |
| { |
| "start": 107, |
| "end": 111, |
| "text": "etc)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The sampler alternates sampling parse trees for the entire corpus of sentences using the above procedure with resampling the model parameters. Resampling the parameters requires empirical counts of each production. These counts are taken from the trees resulting from the previous round of sampling: new trees that have been \"accepted\" by the MH step, as well as existing trees for sentences in which the newly-sampled tree was rejected.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "\u03b8 ROOT \u223c Dir( \u03b1 ROOT \u2022 \u03b8 ROOT-0 (t) + Croot(t) t\u2208T ) \u03b8 BIN t \u223c Dir( \u03b1 BIN \u2022 \u03b8 BIN-0 ( u, v ) + C(t\u2192 u, v ) u,v\u2208T ) \u03b8 UN t \u223c Dir( \u03b1 UN \u2022 \u03b8 UN-0 ( u ) + C(t\u2192 u ) u\u2208T ) \u03b8 TERM t \u223c Dir( \u03b1 TERM \u2022 \u03b8 TERM-0 t (w) + C(t \u2192 w) w\u2208V ) \u03bbt \u223c Dir( \u03b1 \u03bb \u2022 \u03bb 0 (B) + u,v\u2208T C(t\u2192 u, v ), \u03b1 \u03bb \u2022 \u03bb 0 (U) + u\u2208T C(t\u2192 u ), \u03b1 \u03bb \u2022 \u03bb 0 (T) + w\u2208V C(t\u2192w) ) \u03b8 LCTX t \u223c Dir( \u03b1 LCTX \u2022 \u03b8 LCTX-0 t ( ) + C left (t, ) \u2208T ) \u03b8 RCTX t \u223c Dir( \u03b1 RCTX \u2022 \u03b8 RCTX-0 t (r) + C right (t, r) r\u2208T )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "It is important to note that this method of resampling allows the draws to incorporate both the data, in the form of counts, and the prior mean, which includes all of our carefully-constructed biases derived from both the intrinsic, universal CCG properties as well as the information we induced from the raw corpus and tag dictionary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "After all sampling iterations have completed, the final model is estimated by pooling the trees resulting from each sampling iteration, including trees accepted by the MH steps as well as the duplicated trees retained due to rejections. We use this pool of trees to compute model parameters using the same procedure as we used directly above to sample parameters, except that instead of drawing a Dirichlet sample based on the vector of counts, we simply normalize those counts. However, since we require a final model that can parse sentences efficiently, we drop the context parameters, making the model a standard PCFG, which allows us to use the probabilistic CKY algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Posterior Inference", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In our evaluation we compared our supertagcontext approach to (our reimplementation of) the best-performing model of our previous work (Garrette et al., 2015) , which SCM extends. We evaluated on the English CCGBank (Hockenmaier and Steedman, 2007) , which is a transformation of the Penn Treebank (Marcus et al., 1993) ; the CTB-CCG (Tse and Curran, 2010) transformation of the Penn Chinese Treebank (Xue et al., 2005) ; and the CCG-TUT corpus (Bos et al., 2009) , built from the TUT corpus of Italian text (Bosco et al., 2000) .", |
| "cite_spans": [ |
| { |
| "start": 135, |
| "end": 158, |
| "text": "(Garrette et al., 2015)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 216, |
| "end": 248, |
| "text": "(Hockenmaier and Steedman, 2007)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 298, |
| "end": 319, |
| "text": "(Marcus et al., 1993)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 334, |
| "end": 356, |
| "text": "(Tse and Curran, 2010)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 401, |
| "end": 419, |
| "text": "(Xue et al., 2005)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 445, |
| "end": 463, |
| "text": "(Bos et al., 2009)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 508, |
| "end": 528, |
| "text": "(Bosco et al., 2000)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Each corpus was divided into four distinct data sets: a set from which we extract the tag dictionaries, a set of raw (unannotated) sentences, a development set, and a test set. We use the same splits as Garrette et al. (2014) . Since these treebanks use special representations for conjunctions, we chose to rewrite the trees to use conjunction categories of the form (X\\X)/X rather than introducing special conjunction rules. In order to increase the amount of raw data available to the sampler, we supplemented the English data with raw, unannotated newswire sentences from the NYT Gigaword 5 corpus (Parker et al., 2011) and supplemented Italian with the out-of-domain WaCky corpus (Baroni et al., 1999) . For English and Italian, this allowed us to use 100k raw tokens for training (Chinese uses 62k). For Chinese and Italian, for training efficiency, we used only raw sentences that were 50 words or fewer (note that we did not drop tag dictionary set or test set sentences).", |
| "cite_spans": [ |
| { |
| "start": 203, |
| "end": 225, |
| "text": "Garrette et al. (2014)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 602, |
| "end": 623, |
| "text": "(Parker et al., 2011)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 685, |
| "end": 706, |
| "text": "(Baroni et al., 1999)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The English development set was used to tune hyperparameters using grid search, and the same hyperparameters were then used for all three languages. For the category grammar, we used p punc =0.1, p term =0.7, p mod =0.2, p fwd =0.5. For the priors, we use 6 For the context prior, we used \u03c3=10 5 . We ran our sampler for 50 burn-in and 50 sampling iterations.", |
| "cite_spans": [ |
| { |
| "start": 256, |
| "end": 257, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u03b1 ROOT =1, \u03b1 BIN =100, \u03b1 UN =100, \u03b1 TERM =10 4 , \u03b1 \u03bb =3, \u03b1 LCTX =\u03b1 RCTX =10 3 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "CCG parsers are typically evaluated on the dependencies they produce instead of their CCG derivations directly since there can be many different CCG parse trees that all represent the same dependency relationships (spurious ambiguity), and CCG-to-dependency conversion can collapse those differences. To convert a CCG tree into a dependency tree, we follow Lewis (1) a no-context model baseline, Garrette et al. (2015) directly;", |
| "cite_spans": [ |
| { |
| "start": 357, |
| "end": 362, |
| "text": "Lewis", |
| "ref_id": null |
| }, |
| { |
| "start": 396, |
| "end": 418, |
| "text": "Garrette et al. (2015)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "(2) our supertag-context model, with uniform priors on contexts;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "(3) supertag-context model with priors that prefer combinability; (4) supertagcontext model with priors that prefer combinability and simpler categories. Results are shown for six different levels of supervision, as determined by the size of the corpus used to extract a tag dictionary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "(2014). We traverse the parse tree, dictating at every branching node which words will be the dependents of which. For binary branching nodes of forward rules, the right side-the argument sideis the dependent, unless the left side is a modifier (X/X) of the right, in which case the left is the dependent. The opposite is true for backward rules. For punctuation rules, the punctuation is always the dependent. For merge rules, the right side is always made the parent. The results presented in this paper are dependency accuracy scores: the proportion of words that were assigned the correct parent (or \"root\" for the root of a tree).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "When evaluating on test set sentences, if the model is unable to find a parse given the constraints of the tag dictionary, then we would have to take a score of zero for that sentence: every dependency would be \"wrong\". Thus, it is important that we make a best effort to find a parse. To accomplish this, we implemented a parsing backoff strategy. The parser first tries to find a valid parse that has either s dcl or np at its root. If that fails, then it searches for a parse with any root. If no parse is found yet, then the parser attempts to strategically allow tokens to subsume a neighbor by making it a dependent (first with a restricted root set, then without). This is similar to the \"deletion\" strategy employed by Zettlemoyer and Collins (2007) , but we do it directly in the grammar. We add unary rules of the form D \u2192u for every potential supertag u in the tree. Then, at each node spanning exactly two tokens (but no higher in the tree), we allow rules t\u2192 D , v and t\u2192 v, D . Recall that in \u00a73.1, we stated that D is given extremely low probability, meaning that the parser will avoid its use unless it is absolutely necessary. Additionally, since u will still remain as the preterminal, it will be the category examined as the context by adjacent constituents.", |
| "cite_spans": [ |
| { |
| "start": 727, |
| "end": 757, |
| "text": "Zettlemoyer and Collins (2007)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For each language and level of supervision, we executed four experiments. The no-context baseline used (a reimplementation of) the best model from our previous work (Garrette et al., 2015) : using only the non-context parameters (\u03b8 ROOT , \u03b8 BIN , \u03b8 UN , \u03b8 TERM , \u03bb) along with the category prior P CAT to bias toward likely categories throughout the tree, and \u03b8 TERM-0 t estimated from the tag dictionary and raw corpus. We then added the supertagcontext parameters (\u03b8 LCTX , \u03b8 RCTX ), but used uniform priors for those (still using P CAT for the rest). Then, we evaluated the supertag-context model using context parameter priors that bias toward categories that combine with their contexts: P left and P right (see \u00a73.3). Finally, we evaluated the supertag-context model using context parameter priors that bias toward combinability and toward a priori more likely categories, based on the category grammar (P left CAT and P right CAT ). Because we are interested in understanding how our models perform under varying amounts of su-pervision, we executed sequences of experiments in which we reduced the size of the corpus from which the tag dictionary is drawn, thus reducing the amount of information provided to the model. As this information is reduced, so is the size of the full inventory of known CCG categories that can be used as supertags. Additionally, a smaller tag dictionary means that there will be vastly more unknown words; since our model must assume that these words may take any supertag from the full set of known labels, the model must contend with a greatly increased level of ambiguity.", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 188, |
| "text": "(Garrette et al., 2015)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The results of our experiments are given in Table 1. We find that the incorporation of supertagcontext parameters into a CCG model improves performance in every scenario we tested; we see gains of 2-5% across the board. Adding context parameters never hurts, and in most cases, using priors based on intrinsic, cross-lingual aspects of the CCG formalism to bias those parameters toward connectivity provides further gains. In particular, biasing the model toward trees in which constituent labels are combinable with their adjacent supertags frequently helps the model. However, for English, we found that additionally biasing context priors toward simpler categories using P left CAT /P right CAT degraded performance. This is likely due to the fact that the priors on production parameters (\u03b8 BIN , \u03b8 UN ) are already biasing the model toward likely categories, and that having the context parameters do the same ends up over-emphasizing the need for simple categories, preventing the model from choosing more complex categories when they are needed. On the other hand, this bias helps in Chinese and Italian.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "6 Related Work Klein and Manning (2002) 's CCM is an unlabeled bracketing model that generates the span of part-of-speech tags that make up each constituent and the pair of tags surrounding each constituent span (as well as the spans and contexts of each non-constituent). They found that modeling constituent context aids in parser learning because it is able to capture the observation that the same contexts tend to appear repeatedly in a corpus, even with different constituents. While CCM is designed to learn which tag pairs make for likely contexts, without regard for the constituents themselves, our model attempts to learn the relationships between context categories and the types of the constituents, allowing us to take advantage of the natural a priori knowledge about which contexts fit with which constituent labels.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 39, |
| "text": "Klein and Manning (2002)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Other researchers have shown positive results for grammar induction by introducing relatively small amounts of linguistic knowledge. Naseem et al. (2010) induced dependency parsers by handconstructing a small set of linguistically-universal dependency rules and using them as soft constraints during learning. These rules were useful for disambiguating between various structures in cases where the data alone suggests multiple valid analyses. Boonkwan and Steedman (2011) made use of language-specific linguistic knowledge collected from non-native linguists via a questionnaire that covered a variety of syntactic parameters. They were able to use this information to induce CCG parsers for multiple languages. Bisk and Hockenmaier (2012; 2013) induced CCG parsers by using a smaller number of linguistically-universal principles to propose syntactic categories for each word in a sentence, allowing EM to estimate the model parameters. This allowed them to induce the inventory of languagespecific types from the training data, without prior language-specific knowledge.", |
| "cite_spans": [ |
| { |
| "start": 133, |
| "end": 153, |
| "text": "Naseem et al. (2010)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 444, |
| "end": 472, |
| "text": "Boonkwan and Steedman (2011)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 713, |
| "end": 740, |
| "text": "Bisk and Hockenmaier (2012;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 741, |
| "end": 746, |
| "text": "2013)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Because of the structured nature of CCG categories and the logical framework in which they must assemble to form valid parse trees, the CCG formalism offers multiple opportunities to bias model learning based on universal, intrinsic properties of the grammar. In this paper we presented a novel parsing model with the capacity to capture the associative adjacent-category relationships intrinsic to CCG by parameterizing supertag contexts, the supertags appearing on either side of each constituent. In our Bayesian formulation, we place priors on those context parameters to bias the model toward trees in which constituent labels are combinable with their contexts, thus preferring trees that \"fit\" together better. Our experiments demonstrate that, across languages, this additional context helps in weak-supervision scenarios.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Note that this version has also updated the probability definitions for modifiers to be sums, incorporating the fact that any A/A is also a A/B (likewise for A\\A). This ensures that our grammar defines a valid probability distribution.2 The probability distribution over categories is guaranteed to be proper so long as pterm > 1 2 since the probability of the depth of a tree will decrease geometrically(Chi, 1999).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Note that there may actually be multiple yij due to unary rules that \"loop back\" to the same position (i, j); all of these much be included in the product.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In order to ensure that these concentration parameters, while high, were not dominating the posterior distributions, we ran experiments in which they were set much higher (including using the prior alone), and found that accuracies plummeted in those cases, demonstrating that there is a good balance with the prior.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank Yonatan Bisk for his valuable feedback. This work was supported by the U.S. Department of Defense through the U.S. Army Research Office grant W911NF-10-1-0533.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Weakly supervised supertagging with grammar-informed initialization", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Baldridge", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proc. of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Baldridge. 2008. Weakly supervised supertag- ging with grammar-informed initialization. In Proc. of COLING.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "The WaCky wide web: a collection of very large linguistically processed web-crawled corpora", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Silvia", |
| "middle": [], |
| "last": "Bernardini", |
| "suffix": "" |
| }, |
| { |
| "first": "Adriano", |
| "middle": [], |
| "last": "Ferraresi", |
| "suffix": "" |
| }, |
| { |
| "first": "Eros", |
| "middle": [], |
| "last": "Zanchetta", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Language Resources and Evaluation", |
| "volume": "", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Baroni, Silvia Bernardini, Adriano Ferraresi, and Eros Zanchetta. 1999. The WaCky wide web: a collection of very large linguistically pro- cessed web-crawled corpora. Language Resources and Evaluation, 43(3).", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Simple robust grammar induction with combinatory categorial grammar", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Bisk", |
| "suffix": "" |
| }, |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hockenmaier", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Bisk and Julia Hockenmaier. 2012. Simple robust grammar induction with combinatory catego- rial grammar. In Proc. of AAAI.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "An HDP model for inducing combinatory categorial grammars", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Bisk", |
| "suffix": "" |
| }, |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hockenmaier", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Bisk and Julia Hockenmaier. 2013. An HDP model for inducing combinatory categorial gram- mars. Transactions of the Association for Compu- tational Linguistics, 1.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Grammar induction from text using small syntactic prototypes", |
| "authors": [ |
| { |
| "first": "Prachya", |
| "middle": [], |
| "last": "Boonkwan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proc. of IJCNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Prachya Boonkwan and Mark Steedman. 2011. Gram- mar induction from text using small syntactic proto- types. In Proc. of IJCNLP.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Converting a dependency treebank to a categorial grammar treebank for Italian", |
| "authors": [ |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Bos", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Mazzei", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. of the Eighth International Workshop on Treebanks and Linguistic Theories (TLT8)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johan Bos, Cristina Bosco, and Alessandro Mazzei. 2009. Converting a dependency treebank to a cat- egorial grammar treebank for Italian. In M. Pas- sarotti, Adam Przepi\u00f3rkowski, S. Raynaud, and Frank Van Eynde, editors, Proc. of the Eighth In- ternational Workshop on Treebanks and Linguistic Theories (TLT8).", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Building a treebank for Italian: a data-driven annotation schema", |
| "authors": [ |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincenzo", |
| "middle": [], |
| "last": "Lombardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniela", |
| "middle": [], |
| "last": "Vassallo", |
| "suffix": "" |
| }, |
| { |
| "first": "Leonardo", |
| "middle": [], |
| "last": "Lesmo", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proc. of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cristina Bosco, Vincenzo Lombardo, Daniela Vassallo, and Leonardo Lesmo. 2000. Building a treebank for Italian: a data-driven annotation schema. In Proc. of LREC.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Statistical properties of probabilistic context-free grammars", |
| "authors": [ |
| { |
| "first": "Zhiyi", |
| "middle": [], |
| "last": "Chi", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhiyi Chi. 1999. Statistical properties of probabilistic context-free grammars. Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Widecoverage efficient statistical parsing with CCG and log-linear models", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "R" |
| ], |
| "last": "Curran", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Clark and James R. Curran. 2007. Wide- coverage efficient statistical parsing with CCG and log-linear models. Computational Linguistics, 33.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Typesupervised hidden Markov models for part-ofspeech tagging with incomplete tag dictionaries", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Garrette", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Baldridge", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Garrette and Jason Baldridge. 2012. Type- supervised hidden Markov models for part-of- speech tagging with incomplete tag dictionaries. In Proc. of EMNLP.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Weakly-supervised Bayesian learning of a CCG supertagger", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Garrette", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Baldridge", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. of CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Garrette, Chris Dyer, Jason Baldridge, and Noah A. Smith. 2014. Weakly-supervised Bayesian learning of a CCG supertagger. In Proc. of CoNLL.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Weakly-supervised grammar-informed Bayesian CCG parser learning", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Garrette", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Baldridge", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. of AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Garrette, Chris Dyer, Jason Baldridge, and Noah A. Smith. 2015. Weakly-supervised grammar-informed Bayesian CCG parser learning. In Proc. of AAAI.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Nonparametric Bayesian Models of Lexical Acquisition", |
| "authors": [ |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sharon Goldwater. 2007. Nonparametric Bayesian Models of Lexical Acquisition. Ph.D. thesis, Brown University.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Parsing inside-out", |
| "authors": [ |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joshua Goodman. 1998. Parsing inside-out. Ph.D. thesis, Harvard University.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "CCGbank: A corpus of CCG derivations and dependency structures extracted from the Penn Treebank", |
| "authors": [ |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hockenmaier", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Computational Linguistics", |
| "volume": "", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julia Hockenmaier and Mark Steedman. 2007. CCG- bank: A corpus of CCG derivations and dependency structures extracted from the Penn Treebank. Com- putational Linguistics, 33(3).", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Bayesian inference for PCFGs via Markov chain Monte Carlo", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Griffiths", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Johnson, Thomas Griffiths, and Sharon Gold- water. 2007. Bayesian inference for PCFGs via Markov chain Monte Carlo. In Proc. of NAACL.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "A generative constituent-context model for improved grammar induction", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Klein and Christopher D. Manning. 2002. A generative constituent-context model for improved grammar induction. In Proc. of ACL.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "The estimation of stochastic context-free grammars using the inside-outside algorithm", |
| "authors": [ |
| { |
| "first": "Karim", |
| "middle": [], |
| "last": "Lari", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [ |
| "J" |
| ], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Computer Speech and Language", |
| "volume": "4", |
| "issue": "", |
| "pages": "35--56", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karim Lari and Steve J. Young. 1990. The esti- mation of stochastic context-free grammars using the inside-outside algorithm. Computer Speech and Language, 4:35-56.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A* CCG parsing with a supertag-factored model", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Lewis and Mark Steedman. 2014. A* CCG parsing with a supertag-factored model. In Proc. of EMNLP.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Building a large annotated corpus of English: The Penn Treebank", |
| "authors": [ |
| { |
| "first": "Mitchell", |
| "middle": [ |
| "P" |
| ], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "Beatrice", |
| "middle": [], |
| "last": "Santorini", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [ |
| "Ann" |
| ], |
| "last": "Marcinkiewicz", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational Linguistics", |
| "volume": "19", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mitchell P. Marcus, Beatrice Santorini, and Mary Ann Marcinkiewicz. 1993. Building a large annotated corpus of English: The Penn Treebank. Computa- tional Linguistics, 19(2).", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Using universal linguistic knowledge to guide grammar induction", |
| "authors": [ |
| { |
| "first": "Tahira", |
| "middle": [], |
| "last": "Naseem", |
| "suffix": "" |
| }, |
| { |
| "first": "Harr", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Regina", |
| "middle": [], |
| "last": "Barzilay", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tahira Naseem, Harr Chen, Regina Barzilay, and Mark Johnson. 2010. Using universal linguistic knowl- edge to guide grammar induction. In Proc. of EMNLP.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "English Gigaword Fifth Edition LDC2011T07. Linguistic Data Consortium", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Parker", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Graff", |
| "suffix": "" |
| }, |
| { |
| "first": "Junbo", |
| "middle": [], |
| "last": "Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "Ke", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Kazuaki", |
| "middle": [], |
| "last": "Maeda", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Parker, David Graff, Junbo Kong, Ke Chen, and Kazuaki Maeda. 2011. English Gigaword Fifth Edi- tion LDC2011T07. Linguistic Data Consortium.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Transformational Grammar", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| } |
| ], |
| "year": 1988, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Radford. 1988. Transformational Grammar. Cambridge University Press.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Combinatory categorial grammar", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Baldridge", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Non-Transformational Syntax: Formal and Explicit Models of Grammar", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Steedman and Jason Baldridge. 2011. Combina- tory categorial grammar. In Robert Borsley and Ker- sti Borjars, editors, Non-Transformational Syntax: Formal and Explicit Models of Grammar. Wiley- Blackwell.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "The Syntactic Process", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Steedman. 2000. The Syntactic Process. MIT Press.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Chinese CCGbank: Extracting CCG derivations from the Penn Chinese Treebank", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Tse", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "James", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Curran", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Tse and James R. Curran. 2010. Chinese CCG- bank: Extracting CCG derivations from the Penn Chinese Treebank. In Proc. of COLING.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "The acquisition of a unification-based generalised categorial grammar", |
| "authors": [ |
| { |
| "first": "Aline", |
| "middle": [], |
| "last": "Villavicencio", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aline Villavicencio. 2002. The acquisition of a unification-based generalised categorial grammar. Ph.D. thesis, University of Cambridge.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "The Penn Chinese TreeBank: Phrase structure annotation of a large corpus", |
| "authors": [ |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Fu-Dong", |
| "middle": [], |
| "last": "Chiou", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Natural Language Engineering", |
| "volume": "11", |
| "issue": "2", |
| "pages": "207--238", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nianwen Xue, Fei Xia, Fu-Dong Chiou, and Martha Palmer. 2005. The Penn Chinese TreeBank: Phrase structure annotation of a large corpus. Natural Lan- guage Engineering, 11(2):207-238.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Online learning of relaxed CCG grammars for parsing to logical form", |
| "authors": [ |
| { |
| "first": "Luke", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luke S. Zettlemoyer and Michael Collins. 2007. On- line learning of relaxed CCG grammars for parsing to logical form. In Proc. of EMNLP.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "CCG parse for \"The man walks to work.\"" |
| } |
| } |
| } |
| } |