| { |
| "paper_id": "N13-1015", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:40:59.066330Z" |
| }, |
| "title": "Experiments with Spectral Learning of Latent-Variable PCFGs", |
| "authors": [ |
| { |
| "first": "Shay", |
| "middle": [ |
| "B" |
| ], |
| "last": "Cohen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Columbia University", |
| "location": {} |
| }, |
| "email": "scohen@cs.columbia.edu" |
| }, |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Stratos", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Columbia University", |
| "location": {} |
| }, |
| "email": "stratos@cs.columbia.edu" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Columbia University", |
| "location": {} |
| }, |
| "email": "mcollins@cs.columbia.edu" |
| }, |
| { |
| "first": "Dean", |
| "middle": [ |
| "P" |
| ], |
| "last": "Foster", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Pennsylvania", |
| "location": {} |
| }, |
| "email": "foster@wharton.upenn.edu" |
| }, |
| { |
| "first": "Lyle", |
| "middle": [], |
| "last": "Ungar", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "ungar@cis.upenn.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Latent-variable PCFGs (L-PCFGs) are a highly successful model for natural language parsing. Recent work (Cohen et al., 2012) has introduced a spectral algorithm for parameter estimation of L-PCFGs, which-unlike the EM algorithm-is guaranteed to give consistent parameter estimates (it has PAC-style guarantees of sample complexity). This paper describes experiments using the spectral algorithm. We show that the algorithm provides models with the same accuracy as EM, but is an order of magnitude more efficient. We describe a number of key steps used to obtain this level of performance; these should be relevant to other work on the application of spectral learning algorithms. We view our results as strong empirical evidence for the viability of spectral methods as an alternative to EM.", |
| "pdf_parse": { |
| "paper_id": "N13-1015", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Latent-variable PCFGs (L-PCFGs) are a highly successful model for natural language parsing. Recent work (Cohen et al., 2012) has introduced a spectral algorithm for parameter estimation of L-PCFGs, which-unlike the EM algorithm-is guaranteed to give consistent parameter estimates (it has PAC-style guarantees of sample complexity). This paper describes experiments using the spectral algorithm. We show that the algorithm provides models with the same accuracy as EM, but is an order of magnitude more efficient. We describe a number of key steps used to obtain this level of performance; these should be relevant to other work on the application of spectral learning algorithms. We view our results as strong empirical evidence for the viability of spectral methods as an alternative to EM.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Latent-variable PCFGS (L-PCFGs) are a highly successful model for natural language parsing (Matsuzaki et al., 2005; Petrov et al., 2006) . Recent work (Cohen et al., 2012) has introduced a spectral learning algorithm for L-PCFGs. A crucial property of the algorithm is that it is guaranteed to provide consistent parameter estimates-in fact it has PAC-style guarantees of sample complexity. 1 This is in contrast to the EM algorithm, the usual method for parameter estimation in L-PCFGs, which has the weaker guarantee of reaching a local maximum of the likelihood function. The spectral algorithm is relatively simple and efficient, relying on a singular value decomposition of the training examples, followed by a single pass over the data where parameter values are calculated. Cohen et al. (2012) describe the algorithm, and the theory behind it, but as yet no experimental results have been reported for the method. This paper describes experiments on natural language parsing using the spectral algorithm for parameter estimation. The algorithm provides models with slightly higher accuracy than EM (88.05% F-measure on test data for the spectral algorithm, vs 87.76% for EM), but is an order of magnitude more efficient (9h52m for training, compared to 187h12m, a speed-up of 19 times).", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 115, |
| "text": "(Matsuzaki et al., 2005;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 116, |
| "end": 136, |
| "text": "Petrov et al., 2006)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 151, |
| "end": 171, |
| "text": "(Cohen et al., 2012)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 781, |
| "end": 800, |
| "text": "Cohen et al. (2012)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We describe a number of key steps in obtaining this level of performance. A simple backed-off smoothing method is used to estimate the large number of parameters in the model. The spectral algorithm requires functions mapping inside and outside trees to feature vectors-we make use of features corresponding to single level rules, and larger tree fragments composed of two or three levels of rules. We show that it is important to scale features by their inverse variance, in a manner that is closely related to methods used in canonical correlation analysis. Negative values can cause issues in spectral algorithms, but we describe a solution to these problems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In recent work there has been a series of results in spectral learning algorithms for latent-variable models (Vempala and Wang, 2004; Hsu et al., 2009; Bailly et al., 2010; Siddiqi et al., 2010; Parikh et al., 2011; Balle et al., 2011; Arora et al., 2012; Dhillon et al., 2012; Anandkumar et al., 2012) . Most of these results are theoretical (although see Luque et al. (2012) for empirical results of spectral learning for dependency parsing). While the focus of our experiments is on parsing, our findings should be relevant to the application of spectral methods to other latent-variable models. We view our results as strong empirical evidence for the viability of spectral methods as an alternative to EM.", |
| "cite_spans": [ |
| { |
| "start": 109, |
| "end": 133, |
| "text": "(Vempala and Wang, 2004;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 134, |
| "end": 151, |
| "text": "Hsu et al., 2009;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 152, |
| "end": 172, |
| "text": "Bailly et al., 2010;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 173, |
| "end": 194, |
| "text": "Siddiqi et al., 2010;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 195, |
| "end": 215, |
| "text": "Parikh et al., 2011;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 216, |
| "end": 235, |
| "text": "Balle et al., 2011;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 236, |
| "end": 255, |
| "text": "Arora et al., 2012;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 256, |
| "end": 277, |
| "text": "Dhillon et al., 2012;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 278, |
| "end": 302, |
| "text": "Anandkumar et al., 2012)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 357, |
| "end": 376, |
| "text": "Luque et al. (2012)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section we first give basic definitions for L-PCFGs, and then describe the spectral learning algorithm of Cohen et al. (2012) .", |
| "cite_spans": [ |
| { |
| "start": 114, |
| "end": 133, |
| "text": "Cohen et al. (2012)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We follow the definition in Cohen et al. (2012) of L-PCFGs.", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 47, |
| "text": "Cohen et al. (2012)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "An L-PCFG is an 8-tuple (N , I, P, m, n, \u03c0, t, q) where:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 N is the set of non-terminal symbols in the grammar. I \u2282 N is a finite set of in-terminals. P \u2282 N is a finite set of pre-terminals. We assume that N = I\u222aP, and I\u2229P = \u2205. Hence we have partitioned the set of non-terminals into two subsets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 [m] is the set of possible hidden states. 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 [n] is the set of possible words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 For all", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "a \u2208 I, b, c \u2208 N , h 1 , h 2 , h 3 \u2208 [m], we have a context-free rule a(h 1 ) \u2192 b(h 2 ) c(h 3 ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The rule has an associated parameter t(a \u2192 b c, h 2 , h 3 |a, h 1 ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 For all a \u2208 P, h \u2208 [m], x \u2208 [n], we have a context-free rule a(h) \u2192 x. The rule has an associated parameter q(a \u2192 x|a, h).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "\u2022 For all a \u2208 I, h \u2208 [m], \u03c0(a, h) is a parameter specifying the probability of a(h) being at the root of a tree.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "A skeletal tree (s-tree) is a sequence of rules r 1 . . . r N where each r i is either of the form a \u2192 b c or a \u2192 x. The rule sequence forms a top-down, leftmost derivation under a CFG with skeletal rules.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "A full tree consists of an s-tree r 1 . . . r N , together with values h 1 . . . h N . Each h i is the value for the hidden variable for the left-hand-side of rule r i . Each h i can take any value in [m] .", |
| "cite_spans": [ |
| { |
| "start": 201, |
| "end": 204, |
| "text": "[m]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "For a given skeletal tree r 1 . . . r N , define a i to be the non-terminal on the left-hand-side of rule r i . For any i \u2208 [N ] such that r i is of the form a \u2192 b c, define h", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "(2) i and h", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "(3) i as the hidden state value of the left and right child respectively. The model then defines a probability mass function (PMF) as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "p(r 1 . . . r N , h 1 . . . h N ) = \u03c0(a 1 , h 1 ) i:ai\u2208I t(r i , h (2) i , h (3) i |a i , h i ) i:ai\u2208P q(r i |a i , h i ) The PMF over skeletal trees is p(r 1 . . . r N ) = h 1 ...h N p(r 1 . . . r N , h 1 . . . h N ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "2 For any integer n, we use [n] to denote the set {1, 2, . . . n}.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The parsing problem is to take a sentence as input, and produce a skeletal tree as output. A standard method for parsing with L-PCFGs is as follows. First, for a given input sentence x 1 . . . x n , for any triple (a, i, j) such that a \u2208 N and 1 \u2264 i \u2264 j \u2264 n, the marginal \u00b5(a, i, j) is defined as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u00b5(a, i, j) = t:(a,i,j)\u2208t p(t)", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where the sum is over all skeletal trees t for x 1 . . . x n that include non-terminal a spanning words x i . . . x j . A variant of the inside-outside algorithm can be used to calculate marginals. Once marginals have been computed, Goodman's algorithm (Goodman, 1996) is used to find arg max t (a,i,j)\u2208t \u00b5(a, i, j). 3", |
| "cite_spans": [ |
| { |
| "start": 253, |
| "end": 268, |
| "text": "(Goodman, 1996)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "L-PCFGs: Basic Definitions", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We now give a sketch of the spectral learning algorithm. The training data for the algorithm is a set of skeletal trees. The output from the algorithm is a set of parameter estimates for t, q and \u03c0 (more precisely, the estimates are estimates of linearly transformed parameters; see Cohen et al. (2012) and section 2.3.1 for more details). The algorithm takes two inputs in addition to the set of skeletal trees. The first is an integer m, specifying the number of latent state values in the model. Typically m is a relatively small number; in our experiments we test values such as m = 8, 16 or 32. The second is a pair of functions \u03c6 and \u03c8, that respectively map inside and outside trees to feature vectors in R d and R d , where d and d are integers. Each non-terminal in a skeletal tree has an associated inside and outside tree. The inside tree for a node contains the entire subtree below that node; the outside tree contains everything in the tree excluding the inside tree. We will refer to the node above the inside tree that has been removed as the \"foot\" of the outside tree. See figure 1 for an example. Figure 1 : The inside tree (shown left) and outside tree (shown right) for the non-terminal VP in the parse tree [S [NP [D the ", |
| "cite_spans": [ |
| { |
| "start": 283, |
| "end": 302, |
| "text": "Cohen et al. (2012)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1229, |
| "end": 1242, |
| "text": "[S [NP [D the", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1116, |
| "end": 1124, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Spectral Learning Algorithm", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "] [N cat]] [VP [V saw] [NP [D the] [N dog]]]]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Spectral Learning Algorithm", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u03c8(o) are typically high-dimensional, sparse feature vectors, similar to those in log-linear models. For example \u03c6 might track the rule immediately below the root of the inside tree, or larger tree fragments; \u03c8 might include similar features tracking rules or larger rule fragments above the relevant node.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Spectral Learning Algorithm", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The spectral learning algorithm proceeds in two steps. In step 1, we learn an m-dimensional representation of inside and outside trees, using the functions \u03c6 and \u03c8 in combination with a projection step defined through singular value decomposition (SVD). In step 2, we derive parameter estimates directly from training examples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Spectral Learning Algorithm", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "For a given non-terminal a \u2208 N , each instance of a in the training data has an associated outside tree, and an associated inside tree. We define O a to be the set of pairs of inside/outside trees seen with a in the training data: each member of O a is a pair (o, t) where o is an outside tree, and t is an inside tree.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 1: An SVD-Based Projection", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "Step 1 of the algorithm is then as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 1: An SVD-Based Projection", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "1. For each a \u2208 N calculate\u03a9 a \u2208 R d\u00d7d as [\u03a9 a ] i,j = 1 |O a | (o,t)\u2208O a \u03c6 i (t)\u03c8 j (o) 2. Perform an SVD on\u03a9 a . Define U a \u2208 R d\u00d7m (V a \u2208 R d \u00d7m )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 1: An SVD-Based Projection", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "to be a matrix containing the m left (right) singular vectors corresponding to the m largest singular values; define \u03a3 a \u2208 R m\u00d7m to be the diagonal matrix with the m largest singular values on its diagonal.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 1: An SVD-Based Projection", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "3. For each inside tree in the corpus with root label a, define", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 1: An SVD-Based Projection", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "Y (t) = (U a ) \u03c6(t)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 1: An SVD-Based Projection", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "For each outside tree with a foot node labeled a, define", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 1: An SVD-Based Projection", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "Z(o) = (\u03a3 a ) \u22121 (V a ) \u03c8(o)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 1: An SVD-Based Projection", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "Note that Y (t) and Z(o) are both m-dimensional vectors; thus we have used SVD to project inside and outside trees to m-dimensional vectors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 1: An SVD-Based Projection", |
| "sec_num": "2.2.1" |
| }, |
| { |
| "text": "We now describe how the functions Y (t) and Z(o) are used in estimating parameters of the model. First, consider the t(a \u2192 b c, h 2 , h 3 |a, h 1 ) parameters. Each instance of a given rule a \u2192 b c in the training corpus has an outside tree o associated with the parent labeled a, and inside trees t 2 and t 3 associated with the children labeled b and c. For any rule a \u2192 b c we define Q a\u2192b c to be the set of triples (o, t (2) , t (3) ) occurring with that rule in the corpus. The parameter estimate is then", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 2: Parameter Estimation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "c(a \u2192 b c, j, k|a, i) = count(a \u2192 b c) count(a) \u00d7 E a\u2192b c i,j,k", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Step 2: Parameter Estimation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 2: Parameter Estimation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "E a\u2192b c i,j,k = (o,t (2) ,t (3) ) \u2208Q a\u2192b c Z i (o) \u00d7 Y j (t (2) ) \u00d7 Y k (t (3) ) |Q a\u2192b c |", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 2: Parameter Estimation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Here we use count(a \u2192 b c) and count(a) to refer to the count of the rule a \u2192 b c and the non-terminal a in the corpus. Note that once the SVD step has been used to compute representations Y (t) and Z(o) for each inside and outside tree in the corpus, calculating the parameter value\u0109(a \u2192 b c, j, k|a, i) is a very simple operation. Similarly, for any rule a \u2192 x, define Q a\u2192x to be the set of outside trees seen with that rule in the training corpus. The parameter estimate is then", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 2: Parameter Estimation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "c(a \u2192 x|a, i) = count(a \u2192 x) count(a) \u00d7 E a\u2192x i (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 2: Parameter Estimation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 2: Parameter Estimation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "E a\u2192x i = o\u2208Q a\u2192x Z i (o)/|Q a\u2192x |.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 2: Parameter Estimation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "A similar method is used for estimating parameters\u0109(a, i) that play the role of the \u03c0 parameters (details omitted for brevity; see Cohen et al. (2012) ).", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 150, |
| "text": "Cohen et al. (2012)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Step 2: Parameter Estimation", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Once the\u0109(a \u2192 b c, j, k|a, i),\u0109(a \u2192 x|a, i) and c(a, i) parameters have been estimated from the training corpus, they can be used in place of the t, q and \u03c0 parameters in the inside-outside algorithm for computing marginals (see Eq. 1). Call the resulting marginals\u03bc(a, i, j). The guarantees for the parameter estimation method are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Guarantees for the Algorithm", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "\u2022 Define \u2126 a = E[\u03c6(T )(\u03c8(O)) |A = a]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Guarantees for the Algorithm", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "where A, O, T are random variables corresponding to the non-terminal label at a node, the outside tree, and the inside tree (see Cohen et al. (2012) for a precise definition). Note that\u03a9 a , as defined above, is an estimate of \u2126 a . Then if \u2126 a has rank m, the marginals\u03bc will converge to the true values \u00b5 as the number of training examples goes to infinity, assuming that the training samples are i.i.d. samples from an L-PCFG.", |
| "cite_spans": [ |
| { |
| "start": 129, |
| "end": 148, |
| "text": "Cohen et al. (2012)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Guarantees for the Algorithm", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "\u2022 Define \u03c3 to be the m'th largest singular value of \u2126 a . Then the number of samples required for\u03bc to be -close to \u00b5 with probability at least 1 \u2212 \u03b4 is polynomial in 1/ , 1/\u03b4, and 1/\u03c3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Guarantees for the Algorithm", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "Under the first assumption, (Cohen et al., 2012) show that the\u0109 parameters converge to values that are linear transforms of the original parameters in the L-PCFG. For example, define c(a \u2192 b c, j, k|a, i) to be the value that c(a \u2192 b c, j, k|a, i) converges to in the limit of infinite data. Then there exist invertible matrices G a \u2208 R m\u00d7m for all a \u2208 N such that for any a \u2192 b c, for", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 48, |
| "text": "(Cohen et al., 2012)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Guarantees for the Algorithm", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "any h 1 , h 2 , h 3 \u2208 R m , t(a \u2192 b c, h 2 , h 3 |a, h 1 ) = i,j,k [G a ] i,h1 [(G b ) \u22121 ] j,h2 [(G c ) \u22121 ] k,h3 c(a \u2192 b c, j, k|a, i)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Guarantees for the Algorithm", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "The transforms defined by the G a matrices are benign, in that they cancel in the inside-outside algorithm when marginals \u00b5(a, i, j) are calculated. Similar relationships hold for the \u03c0 and q parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Guarantees for the Algorithm", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "3 Implementation of the Algorithm Cohen et al. (2012) introduced the spectral learning algorithm, but did not perform experiments, leaving several choices open in how the algorithm is implemented in practice. This section describes a number of key choices made in our implementation of the algorithm. In brief, they are as follows:", |
| "cite_spans": [ |
| { |
| "start": 34, |
| "end": 53, |
| "text": "Cohen et al. (2012)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Guarantees for the Algorithm", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "The choice of functions \u03c6 and \u03c8. We will describe basic features used in \u03c6 and \u03c8 (single-level rules, larger tree fragments, etc.). We will also describe a method for scaling different features in \u03c6 and \u03c8 by their variance, which turns out to be important for empirical results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Guarantees for the Algorithm", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "Estimation of E a\u2192b c i,j,k and E a\u2192x i . There are a very large number of parameters in the model, leading to challenges in estimation. The estimates in Eqs. 2 and 3 are unsmoothed. We describe a simple backed-off smoothing method that leads to significant improvements in performance of the method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Guarantees for the Algorithm", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "Handling positive and negative values. As defined, the\u0109 parameters may be positive or negative; as a result, the\u03bc values may also be positive or negative. We find that negative values can be a significant problem if not handled correctly; but with a very simple fix to the algorithm, it performs well.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Guarantees for the Algorithm", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "We now turn to these three issues in more detail. Section 4 will describe experiments measuring the impact of the different choices.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Guarantees for the Algorithm", |
| "sec_num": "2.3.1" |
| }, |
| { |
| "text": "Cohen et al. 2012show that the choice of feature definitions \u03c6 and \u03c8 is crucial in two respects. First, for all non-terminals a \u2208 N , the matrix \u2126 a must be of rank m: otherwise the parameter-estimation algorithm will not be consistent. Second, the number of samples required for learning is polynomial in 1/\u03c3, where \u03c3 = min a\u2208N \u03c3 m (\u2126 a ), and \u03c3 m (\u2126 a ) is the m'th smallest singular value of \u2126 a . (Note that the second condition is stronger than the first; \u03c3 > 0 implies that \u2126 a is of rank m for all a.) The choice of \u03c6 and \u03c8 has a direct impact on the value for \u03c3: roughly speaking, the value for \u03c3 can be thought of as a measure of how informative the functions \u03c6 and \u03c8 are about the hidden state values.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "With this in mind, our goal is to define a relatively simple set of features, which nevertheless provide significant information about hidden-state values, and hence provide high accuracy under the model. The inside-tree feature function \u03c6(t) makes use of the following indicator features (throughout these definitions assume that a \u2192 b c is at the root of the inside tree t):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The pair of nonterminals (a, b). E.g., for the inside tree in figure 1 this would be the pair (VP, V).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The pair (a, c). E.g., (VP, NP).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The rule a \u2192 b c. E.g., VP \u2192 V NP.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The rule a \u2192 b c paired with the rule at the root of t (i,2) . E.g., for the inside tree in figure 1 this would correspond to the tree fragment (VP (V saw) NP).", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 62, |
| "text": "(i,2)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The rule a \u2192 b c paired with the rule at the root of t (i,3) . E.g., the tree fragment (VP V (NP D N) ).", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 62, |
| "text": "(i,3)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 95, |
| "end": 103, |
| "text": "(NP D N)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The head part-of-speech of t (i,1) paired with a. 4 E.g., the pair (VP, V).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The number of words dominated by t (i,1) paired with a (this is an integer valued feature).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In the case of an inside tree consisting of a single rule a \u2192 x the feature vector simply indicates the identity of that rule.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To illustrate the function \u03c8, it will be useful to make use of the following example outside tree:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "S NP D the N cat VP V saw NP D N dog", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Note that in this example the foot node of the outside tree is labeled D. The features are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The rule above the foot node. We take care to mark which non-terminal is the foot, using a * symbol. In the above example this feature is NP \u2192 D * N.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The two-level and three-level rule fragments above the foot node. In the above example these features would be", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "VP V NP D * N S NP VP V NP D * N", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The label of the foot node, together with the label of its parent. In the above example this is (D, NP).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The label of the foot node, together with the label of its parent and grandparent. In the above example this is (D, NP, VP).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The part of speech of the first head word along the path from the foot of the outside tree to the root of the tree which is different from the head node of the foot node. In the above example this is N.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The width of the span to the left of the foot node, paired with the label of the foot node.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 The width of the span to the right of the foot node, paired with the label of the foot node.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Scaling of features. The features defined above are almost all binary valued features. We scale the features in the following way. For each feature \u03c6 i (t), define count(i) to be the number of times the feature is equal to 1, and M to be the number of training examples. The feature is then redefined to be", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u03c6 i (t) \u00d7 M count(i) + \u03ba", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where \u03ba is a smoothing term (the method is relatively insensitive to the choice of \u03ba; we set \u03ba = 5 in our experiments). A similar process is applied to the \u03c8 features. The method has the effect of decreasing the importance of more frequent features in the SVD step of the algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The SVD-based step of the algorithm is very closely related to previous work on CCA (Hotelling, 1936; Hardoon et al., 2004; Kakade and Foster, 2009) ; and the scaling step is derived from previous work on CCA (Dhillon et al., 2011) . In CCA the \u03c6 and \u03c8 vectors are \"whitened\" in a preprocessing step, before an SVD is applied. This whitening process involves calculating covariance matrices C x = E[\u03c6\u03c6 ] and C y = E[\u03c8\u03c8 ], and replacing \u03c6 by (C x ) \u22121/2 \u03c6 and \u03c8 by (C y ) \u22121/2 \u03c8. The exact calculation of (C x ) \u22121/2 and (C y ) \u22121/2 is challenging in high dimensions, however, as these matrices will not be sparse; the transformation described above can be considered an approximation where off-diagonal members of C x and C y are set to zero. We will see that empirically this scaling gives much improved accuracy.", |
| "cite_spans": [ |
| { |
| "start": 84, |
| "end": 101, |
| "text": "(Hotelling, 1936;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 102, |
| "end": 123, |
| "text": "Hardoon et al., 2004;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 124, |
| "end": 148, |
| "text": "Kakade and Foster, 2009)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 209, |
| "end": 231, |
| "text": "(Dhillon et al., 2011)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Choice of Functions \u03c6 and \u03c8", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The number of E a\u2192b c i,j,k parameters is very large, and the estimation method described in Eqs. 2-3 is unsmoothed. We have found significant improvements in performance using a relatively simple backoff smoothing method. The intuition behind this method is as follows: given two random variables X and Y , under the assumption that the random variables are independent, E[XY ] = E[X] \u00d7 E[Y ]. It makes sense to define \"backed off\" estimates which make increasingly strong independence assumptions of this form.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Smoothing of binary rules For any rule a \u2192 b c and indices i, j \u2208 [m] we can define a second-order moment as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "E a\u2192b c i,j,\u2022 = (o,t (2) ,t (3) ) \u2208Q a\u2192b c Z i (o) \u00d7 Y j (t (2) ) |Q a\u2192b c |", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The definitions of E a\u2192b c i,\u2022,k and E a\u2192b c \u2022,j,k are analogous. We can define a first-order estimate as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "E a\u2192b c \u2022,\u2022,k = (o,t (2) ,t (3) ) \u2208Q a\u2192b c Y k (t (3) ) |Q a\u2192b c | Again, we have analogous definitions of E a\u2192b c i,\u2022,\u2022 and E a\u2192b c", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022,j,\u2022 . Different levels of smoothed estimate can be derived from these different terms. The first is", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "E 2,a\u2192b c i,j,k = E a\u2192b c i,j,\u2022 \u00d7 E a\u2192b c \u2022,\u2022,k + E a\u2192b c i,\u2022,k \u00d7 E a\u2192b c \u2022,j,\u2022 + E a\u2192b c \u2022,j,k \u00d7 E a\u2192b c i,\u2022,\u2022 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Note that we give an equal weight of 1/3 to each of the three backed-off estimates seen in the numerator. A second smoothed estimate is", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "E 3,a\u2192b c i,j,k = E a\u2192b c i,\u2022,\u2022 \u00d7 E a\u2192b c \u2022,j,\u2022 \u00d7 E a\u2192b c \u2022,\u2022,k", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Using the definition of O a given in section 2.2.1, we also define", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "F a i = (o,t)\u2208O a Y i (t) |O a | H a i = (o,t)\u2208O a Z i (o) |O a |", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "and our next smoothed estimate as E 4,a\u2192b c", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "i,j,k = H a i \u00d7 F b j \u00d7 F c k . Our final estimate is \u03bbE a\u2192b c i,j,k + (1 \u2212 \u03bb) \u03bbE 2,a\u2192b c i,j,k + (1 \u2212 \u03bb)K a\u2192b c i,j,k", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where K a\u2192b c i,j,k = \u03bbE 3,a\u2192b c i,j,k", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "+ (1 \u2212 \u03bb)E 4,a\u2192b c i,j,k", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": ". Here \u03bb \u2208 [0, 1] is a smoothing parameter, set to |Q a\u2192b c |/(C + |Q a\u2192b c |) in our experiments, where C is a parameter that is chosen by optimization of accuracy on a held-out set of data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Smoothing lexical rules We define a similar method for the E a\u2192x i parameters. Define", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "E a i = x o\u2208Q a\u2192x Z i (o)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "x |Q a\u2192x | hence E a i ignores the identity of x in making its estimate. The smoothed estimate is then defined as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u03bdE a\u2192x i +(1\u2212\u03bd)E a i .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Here, \u03bd is a value in [0, 1] which is tuned on a development set. We only smooth lexical rules which appear in the data less than a fixed number of times. Unlike binary rules, for which the estimation depends on a high order moment (third moment), the lexical rules use first-order moments, and therefore it is not required to smooth rules with a relatively high count. The maximal count for this kind of smoothing is set using a development set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of E a\u2192b c i,j,k and E a\u2192x i", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "As described before, the parameter estimates may be positive or negative, and as a result the marginals computed by the algorithm may in some cases themselves be negative. In early experiments we found this to be a signficant problem, with some parses having a very large number of negatives, and being extremely poor in quality. Our fix is to define the output of the parser to be arg max t (a,i,j)\u2208t |\u00b5(a, i, j)| rather than arg max t (a,i,j)\u2208t \u00b5(a, i, j) as defined in Goodman's algorithm. Thus if a marginal value \u00b5(a, i, j) is negative, we simply replace it with its absolute value. This step was derived after inspection of the parsing charts for bad parses, where we saw evidence that in these cases the entire set of marginal values had been negated (and hence decoding under Eq. 1 actually leads to the lowest probability parse being output under the model). We suspect that this is because in some cases a dominant parameter has had its sign flipped due to sampling error; more theoretical and empirical work is required in fully understanding this issue.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Handling Positive and Negative Values", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We use the Penn WSJ treebank (Marcus et al., 1993) for our experiments. Sections 2-21 were used as training data, and sections 0 and 22 were used as development data. Section 23 is used as the final test set. We binarize the trees in training data using the same method as that described in Petrov et al. (2006) . For example, the non-binary rule VP \u2192 V NP PP SBAR would be converted to the structure [VP [@VP [@VP V NP] PP] SBAR] where @VP is a new symbol in the grammar. Unary rules are removed by collapsing non-terminal chains: for example the unary rule S \u2192 VP would be replaced by a single non-terminal S|VP.", |
| "cite_spans": [ |
| { |
| "start": 29, |
| "end": 50, |
| "text": "(Marcus et al., 1993)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 291, |
| "end": 311, |
| "text": "Petrov et al. (2006)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Handling Positive and Negative Values", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For the EM algorithm we use the initialization method described in Matsuzaki et al. (2005) . For efficiency, we use a coarse-to-fine algorithm for parsing with either the EM or spectral derived grammar: a PCFG without latent states is used to calculate marginals, and dynamic programming items are removed if their marginal probability is lower than some threshold (0.00005 in our experiments).", |
| "cite_spans": [ |
| { |
| "start": 67, |
| "end": 90, |
| "text": "Matsuzaki et al. (2005)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Handling Positive and Negative Values", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "For simplicity the parser takes part-of-speech tagged sentences as input. We use automatically tagged data from Turbo Tagger (Martins et al., 2010) . The tagger is used to tag both the development data and the test data. The tagger was retrained on sections 2-21. We use the F 1 measure according to the Parseval metric (Black et al., 1991) . For the spectral algorithm, we tuned the smoothing parameters using section 0 of the treebank.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 147, |
| "text": "(Martins et al., 2010)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 320, |
| "end": 340, |
| "text": "(Black et al., 1991)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Handling Positive and Negative Values", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We compare models trained using EM and the spectral algorithm using values for m in {8, 16, 24, 32}. 5 For EM, we found that it was important to use development data to choose the number of iterations of training. We train the models for 100 iterations, then test accuracy of the model on section 22 (development data) at different iteration numbers. Table 1 shows that a peak level of accuracy is reached for all values of m, other than m = 8, at iteration 20-30, with sometimes substantial overtraining beyond that point.", |
| "cite_spans": [ |
| { |
| "start": 101, |
| "end": 102, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 351, |
| "end": 358, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison to EM: Accuracy", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The performance of a regular PCFG model, estimated using maximum likelihood and with no latent states, is 68.62%. Table 2 gives results for the EM-trained models and spectral-trained models. The spectral models give very similar accuracy to the EM-trained model on the test set. Results on the development set with varying m show that the EM-based models perform better for m = 8, but that the spectral algorithm quickly catches up as m increases. Table 3 gives training times for the EM algorithm and the spectral algorithm for m \u2208 {8, 16, 24, 32}.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 114, |
| "end": 121, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 448, |
| "end": 455, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison to EM: Accuracy", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "All timing experiments were done on a single Intel Xeon 2.67GHz CPU. The implementations for the EM algorithm and the spectral algorithm were written in Java. The spectral algorithm also made use of Matlab for several matrix calculations such as the SVD calculation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison to EM: Training Speed", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For EM we show the time to train a single iteration, and also the time to train the optimal model (time for 30 iterations of training for m = 8, 16, 24, and time for 20 iterations for m = 32). Note that this latter time is optimistic, as it assumes an oracle specifying exactly when it is possible to terminate EM training with no loss in performance. The spectral method is considerably faster than EM: for example, for m = 32 the time for training the spectral model is just under 10 hours, compared to 187 hours for EM, a factor of almost 19 times faster. 6 The reason for these speed ups is as follows.", |
| "cite_spans": [ |
| { |
| "start": 559, |
| "end": 560, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison to EM: Training Speed", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Step 1 of the spectral algorithm (feature calculation, transfer + scaling, and SVD) is not required by EM, but takes a relatively small amount of time (about 1.2 hours for all values of m). Once step 1 has been completed, step 2 of the spectral algorithm takes a Table 3 : Running time for the EM algorithm and the various stages in the spectral algorithm. For EM we show the time for a single iteration, and the time to train the optimal model (time for 30 iterations of training for m = 8, 16, 24, time for 20 iterations of training for m = 32). For the spectral method we show the following: \"total\" is the total training time; \"feature\" is the time to compute the \u03c6 and \u03c8 vectors for all data points; \"transfer + scaling\" is time to transfer the data from Java to Matlab, combined with the time for scaling of the features; \"SVD\" is the time for the SVD computation; a \u2192 b c is the time to compute the\u0109(a \u2192 b c, h 2 , h 3 |a, h 1 ) parameters; a \u2192 x is the time to compute the\u0109(a \u2192 x, h|a, h) parameters. Note that \"feature\" and \"transfer + scaling\" are the same step for all values of m, so we quote a single runtime for these steps.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 263, |
| "end": 270, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Comparison to EM: Training Speed", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "single pass over the data: in contrast, EM requires a few tens of passes (certainly more than 10 passes, from the results in table 1). The computations performed by the spectral algorithm in its single pass are relatively cheap. In contrast to EM, the insideoutside algorithm is not required; however various operations such as calculating smoothing terms in the spectral method add some overhead. The net result is that for m = 32 the time for training the spectral method takes a very similar amount of time to a single pass of the EM algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison to EM: Training Speed", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We now describe experiments demonstrating the impact of various components described in section 3. Handling negative values (section 3.3) Replacing marginal values \u00b5(a, i, j) with their absolute values is also important: without this step, accuracy on section 22 decreases to 80.61% (m = 32). 319 sentences out of 1700 examples have different parses when this step is implemented, implying that the problem with negative values described in section 3.3 occurs on around 18% of all sentences. The effect of feature functions To test the effect of features on accuracy, we experimented with a simpler set of features than those described in section 3.1. This simple set just includes an indicator for the rule below a nonterminal (for inside trees) and the rule above a nonterminal (for outside trees). Even this simpler set of features achieves relatively high accuracy (m = 8: 86.44 , m = 16: 86.86, m = 24: 87.24 , m = 32: 88.07 ). This set of features is reminiscent of a PCFG model where the nonterminals are augmented their parents (vertical Markovization of order 2) and binarization is done while retaining sibling information (horizontal Markovization of order 1). See Klein and Manning (2003) for more information. The per-formance of this Markovized PCFG model lags behind the spectral model: it is 82.59%. This is probably due to the complexity of the grammar which causes ovefitting. Condensing the sibling and parent information using latent states as done in the spectral model leads to better generalization.", |
| "cite_spans": [ |
| { |
| "start": 869, |
| "end": 929, |
| "text": "(m = 8: 86.44 , m = 16: 86.86, m = 24: 87.24 , m = 32: 88.07", |
| "ref_id": null |
| }, |
| { |
| "start": 1176, |
| "end": 1200, |
| "text": "Klein and Manning (2003)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Smoothing, Features, and Negatives", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "It is important to note that the results for both EM and the spectral algorithm are comparable to state of the art, but there are other results previously reported in the literature which are higher. For example, Hiroyuki et al. (2012) report an accuracy of 92.4 F 1 on section 23 of the Penn WSJ treebank using a Bayesian tree substitution grammar; Charniak and Johnson (2005) report accuracy of 91.4 using a discriminative reranking model; Carreras et al. (2008) report 91.1 F 1 accuracy for a discriminative, perceptron-trained model; Petrov and Klein (2007) report an accuracy of 90.1 F 1 , using L-PCFGs, but with a split-merge training procedure. Collins (2003) reports an accuracy of 88.2 F 1 , which is comparable to the results in this paper.", |
| "cite_spans": [ |
| { |
| "start": 213, |
| "end": 235, |
| "text": "Hiroyuki et al. (2012)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 350, |
| "end": 377, |
| "text": "Charniak and Johnson (2005)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 442, |
| "end": 464, |
| "text": "Carreras et al. (2008)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 538, |
| "end": 561, |
| "text": "Petrov and Klein (2007)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 653, |
| "end": 667, |
| "text": "Collins (2003)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Smoothing, Features, and Negatives", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The spectral learning algorithm gives the same level of accuracy as EM in our experiments, but has significantly faster training times. There are several areas for future work. There are a large number of parameters in the model, and we suspect that more sophisticated regularization methods than the smoothing method we have described may improve performance. Future work should also investigate other choices for the functions \u03c6 and \u03c8. There are natural ways to extend the approach to semi-supervised learning; for example the SVD step, where representations of outside and inside trees are learned, could be applied to unlabeled data parsed by a firstpass parser. Finally, the methods we have described should be applicable to spectral learning for other latent variable models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "under assumptions on certain singular values in the model; see section 2.3.1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In fact, in our implementation we calculate marginals \u00b5(a \u2192 b c, i, k, j) for a, b, c \u2208 N and 1 \u2264 i \u2264 k < j, and \u00b5(a, i, i) for a \u2208 N , 1 \u2264 i \u2264 n, then apply the CKY algorithm to find the parse tree that maximizes the sum of the marginals. For simplicity of presentation we will refer to marginals of the form \u00b5(a, i, j) in the remainder of this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We use the English head rules from the Stanford parser(Klein and Manning, 2003).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "ExperimentsIn this section we describe parsing experiments using the L-PCFG estimation method. We give comparisons to the EM algorithm, considering both speed of training, and accuracy of the resulting model; we also give experiments investigating the various choices described in the previous section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Lower values of m, such as 2 or 4, lead to substantially lower performance for both models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In practice, in order to overcome the speed issue with EM training, we parallelized the E-step on multiple cores. The spectral algorithm can be similarly parallelized, computing statistics and parameters for each nonterminal separately.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "Columbia University gratefully acknowledges the support of the Defense Advanced Research Projects Agency (DARPA) Machine Reading Program under Air Force Research Laboratory (AFRL) prime contract no. FA8750-09-C-0181. Any opinions, findings, and conclusions or recommendations ex-pressed in this material are those of the author(s) and do not necessarily reflect the view of DARPA, AFRL, or the US government. Shay Cohen was supported by the National Science Foundation under Grant #1136996 to the Computing Research Association for the CIFellows Project. Dean Foster was supported by National Science Foundation grant 1106743.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Tensor decompositions for learning latent-variable models", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Anandkumar", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Ge", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Hsu", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "M" |
| ], |
| "last": "Kakade", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Telgarsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1210.7559" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Anandkumar, R. Ge, D. Hsu, S. M. Kakade, and M. Telgarsky. 2012. Tensor decompositions for learn- ing latent-variable models. arXiv:1210.7559.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Learning topic models -going beyond SVD", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Arora", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Se", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Moitra", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of FOCS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arora, R. Se, and A. Moitra. 2012. Learning topic models -going beyond SVD. In Proceedings of FOCS.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A spectral approach for probabilistic grammatical inference on trees", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Bailly", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Habrar", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Denis", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of ALT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Bailly, A. Habrar, and F. Denis. 2010. A spectral approach for probabilistic grammatical inference on trees. In Proceedings of ALT.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A spectral learning algorithm for finite state transducers", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Balle", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Quattoni", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Carreras", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of ECML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "B. Balle, A. Quattoni, and X. Carreras. 2011. A spec- tral learning algorithm for finite state transducers. In Proceedings of ECML.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A procedure for quantitatively comparing the syntactic coverage of English grammars", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Abney", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Flickenger", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Gdaniec", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Grishman", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Harrison", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Hindle", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Ingria", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Jelinek", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Klavans", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Liberman", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Santorini", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Strzalkowski", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "Proceedings of DARPA Workshop on Speech and Natural Language", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Black, S. Abney, D. Flickenger, C. Gdaniec, R. Gr- ishman, P Harrison, D. Hindle, R. Ingria, F. Jelinek, J. Klavans, M. Liberman, M. Marcus, S. Roukos, B. Santorini, and T. Strzalkowski. 1991. A procedure for quantitatively comparing the syntactic coverage of English grammars. In Proceedings of DARPA Work- shop on Speech and Natural Language.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "TAG, Dynamic Programming, and the Perceptron for Efficient, Feature-rich Parsing", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Carreras", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Koo", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "9--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "X. Carreras, M. Collins, and T. Koo. 2008. TAG, Dy- namic Programming, and the Perceptron for Efficient, Feature-rich Parsing. In Proceedings of CoNLL, pages 9-16.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Coarse-to-fine nbest parsing and maxent discriminative reranking", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. Charniak and M. Johnson. 2005. Coarse-to-fine n- best parsing and maxent discriminative reranking. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Spectral learning of latent-variable PCFGs", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "B" |
| ], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Stratos", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "F" |
| ], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Ungar", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. B. Cohen, K. Stratos, M. Collins, D. F. Foster, and L. Ungar. 2012. Spectral learning of latent-variable PCFGs. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Head-driven statistical models for natural language processing", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Computational Linguistics", |
| "volume": "29", |
| "issue": "", |
| "pages": "589--637", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Collins. 2003. Head-driven statistical models for nat- ural language processing. Computational Linguistics, 29:589-637.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Multiview learning of word embeddings via CCA", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Dhillon", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "P" |
| ], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "H" |
| ], |
| "last": "Ungar", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. Dhillon, D. P. Foster, and L. H. Ungar. 2011. Multi- view learning of word embeddings via CCA. In Pro- ceedings of NIPS.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Spectral dependency parsing with latent variables", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Dhillon", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Rodu", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "P" |
| ], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "H" |
| ], |
| "last": "Ungar", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "P. Dhillon, J. Rodu, M. Collins, D. P. Foster, and L. H. Ungar. 2012. Spectral dependency parsing with latent variables. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Parsing algorithms and metrics", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Goodman. 1996. Parsing algorithms and metrics. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Canonical correlation analysis: An overview with application to learning methods", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Hardoon", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Szedmak", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Shawe-Taylor", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Neural Computation", |
| "volume": "16", |
| "issue": "12", |
| "pages": "2639--2664", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Hardoon, S. Szedmak, and J. Shawe-Taylor. 2004. Canonical correlation analysis: An overview with ap- plication to learning methods. Neural Computation, 16(12):2639-2664.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Bayesian symbol-refined tree substitution grammars for syntactic parsing", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Hiroyuki", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Yusuke", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Akinori", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Masaaki", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "440--448", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Hiroyuki, M. Yusuke, F. Akinori, and N. Masaaki. 2012. Bayesian symbol-refined tree substitution gram- mars for syntactic parsing. In Proceedings of ACL, pages 440-448.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Relations between two sets of variants", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Hotelling", |
| "suffix": "" |
| } |
| ], |
| "year": 1936, |
| "venue": "Biometrika", |
| "volume": "28", |
| "issue": "", |
| "pages": "321--377", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. Hotelling. 1936. Relations between two sets of vari- ants. Biometrika, 28:321-377.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A spectral algorithm for learning hidden Markov models", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Hsu", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "M" |
| ], |
| "last": "Kakade", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of COLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Hsu, S. M. Kakade, and T. Zhang. 2009. A spec- tral algorithm for learning hidden Markov models. In Proceedings of COLT.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Multi-view regression via canonical correlation analysis", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Kakade", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "P" |
| ], |
| "last": "Foster", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "COLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Kakade and D. P. Foster. 2009. Multi-view regres- sion via canonical correlation analysis. In COLT.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Accurate unlexicalized parsing", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proc. of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "423--430", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Klein and C. D. Manning. 2003. Accurate unlexical- ized parsing. In Proc. of ACL, pages 423-430.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Spectral learning for non-deterministic dependency parsing", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [ |
| "M" |
| ], |
| "last": "Luque", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Quattoni", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Balle", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Carreras", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "F. M. Luque, A. Quattoni, B. Balle, and X. Carreras. 2012. Spectral learning for non-deterministic depen- dency parsing. In Proceedings of EACL.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Building a large annotated corpus of English: The Penn treebank", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "P" |
| ], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Santorini", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "A" |
| ], |
| "last": "Marcinkiewicz", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational Linguistics", |
| "volume": "19", |
| "issue": "", |
| "pages": "313--330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. P. Marcus, B. Santorini, and M. A. Marcinkiewicz. 1993. Building a large annotated corpus of En- glish: The Penn treebank. Computational Linguistics, 19:313-330.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "TurboParsers: Dependency parsing by approximate variational inference", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "F T" |
| ], |
| "last": "Martins", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "P" |
| ], |
| "last": "Xing", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "T" |
| ], |
| "last": "Figueiredo", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "Q" |
| ], |
| "last": "Aguiar", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. F. T. Martins, N. A. Smith, E. P. Xing, M. T. Figueiredo, and M. Q. Aguiar. 2010. TurboParsers: Dependency parsing by approximate variational infer- ence. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Probabilistic CFG with latent annotations", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Matsuzaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Miyao", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Tsujii", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Matsuzaki, Y. Miyao, and J. Tsujii. 2005. Proba- bilistic CFG with latent annotations. In Proceedings of ACL.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "A spectral algorithm for latent tree graphical models", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "P" |
| ], |
| "last": "Xing", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of The 28th International Conference on Machine Learningy", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Parikh, L. Song, and E. P. Xing. 2011. A spectral al- gorithm for latent tree graphical models. In Proceed- ings of The 28th International Conference on Machine Learningy (ICML 2011).", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Improved inference for unlexicalized parsing", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Petrov and D. Klein. 2007. Improved inference for unlexicalized parsing. In Proc. of HLT-NAACL.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Learning accurate, compact, and interpretable tree annotation", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Barrett", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Thibaux", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of COLING-ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Petrov, L. Barrett, R. Thibaux, and D. Klein. 2006. Learning accurate, compact, and interpretable tree an- notation. In Proceedings of COLING-ACL.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Reducedrank hidden markov models", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Siddiqi", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Boots", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Gordon", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "JMLR", |
| "volume": "9", |
| "issue": "", |
| "pages": "741--748", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Siddiqi, B. Boots, and G. Gordon. 2010. Reduced- rank hidden markov models. JMLR, 9:741-748.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "A spectral algorithm for learning mixtures of distributions", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Vempala", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Journal of Computer and System Sciences", |
| "volume": "68", |
| "issue": "4", |
| "pages": "841--860", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S. Vempala and G. Wang. 2004. A spectral algorithm for learning mixtures of distributions. Journal of Com- puter and System Sciences, 68(4):841-860.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "Section 3.1 gives definitions of \u03c6(t) and \u03c8(o) used in our experiments. The definitions of \u03c6(t)" |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "html": null, |
| "text": "83.51 86.45 86.68 86.69 86.63 86.67 86.70 86.82 86.87 86.83 m = 16 85.18 87.94 88.32 88.21 88.10 87.86 87.70 87.46 87.34 87.24 m = 24 83.62 88.19 88.35 88.25 87.73 87.41 87.35 87.26 87.02 86.80 m = 32 83.23 88.56 88.52 87.82 87.06 86.47 86.38 85.85 85.75 85.57 Table 1: Results on section 22 for the EM algorithm, varying the number of iterations used. Best results in each row are in boldface.", |
| "content": "<table><tr><td/><td>10</td><td>20</td><td>30</td><td>40</td><td>50</td><td>60</td><td>70</td><td>80</td><td>90</td><td>100</td></tr><tr><td colspan=\"2\">m = 8</td><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td>single</td><td>EM</td><td/><td/><td/><td colspan=\"3\">spectral algorithm</td><td/></tr><tr><td/><td colspan=\"3\">EM iter. best model total</td><td/><td colspan=\"6\">feature transfer + scaling SVD a \u2192 b c a \u2192 x</td></tr><tr><td>m = 8</td><td>6m</td><td>3h</td><td colspan=\"2\">3h32m</td><td/><td/><td/><td colspan=\"3\">36m 1h34m</td><td>10m</td></tr><tr><td>m = 16 m = 24</td><td>52m 3h7m</td><td>26h6m 93h36m</td><td colspan=\"2\">5h19m 7h15m</td><td>22m</td><td>49m</td><td/><td colspan=\"3\">34m 3h13m 36m 4h54m</td><td>19m 28m</td></tr><tr><td>m = 32</td><td>9h21m</td><td colspan=\"3\">187h12m 9h52m</td><td/><td/><td/><td colspan=\"3\">35m 7h16m</td><td>41m</td></tr></table>", |
| "num": null |
| } |
| } |
| } |
| } |