| { |
| "paper_id": "D15-1032", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:27:51.405756Z" |
| }, |
| "title": "An Empirical Analysis of Optimization for Max-Margin NLP", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [ |
| "K" |
| ], |
| "last": "Kummerfeld", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of California", |
| "location": { |
| "postCode": "94720", |
| "settlement": "Berkeley Berkeley", |
| "region": "CA", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Taylor", |
| "middle": [], |
| "last": "Berg-Kirkpatrick", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of California", |
| "location": { |
| "postCode": "94720", |
| "settlement": "Berkeley Berkeley", |
| "region": "CA", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of California", |
| "location": { |
| "postCode": "94720", |
| "settlement": "Berkeley Berkeley", |
| "region": "CA", |
| "country": "USA" |
| } |
| }, |
| "email": "klein@cs.berkeley.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Despite the convexity of structured maxmargin objectives (Taskar et al., 2004; Tsochantaridis et al., 2004), the many ways to optimize them are not equally effective in practice. We compare a range of online optimization methods over a variety of structured NLP tasks (coreference, summarization, parsing, etc) and find several broad trends. First, margin methods do tend to outperform both likelihood and the perceptron. Second, for max-margin objectives, primal optimization methods are often more robust and progress faster than dual methods. This advantage is most pronounced for tasks with dense or continuous-valued features. Overall, we argue for a particularly simple online primal subgradient descent method that, despite being rarely mentioned in the literature, is surprisingly effective in relation to its alternatives.", |
| "pdf_parse": { |
| "paper_id": "D15-1032", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Despite the convexity of structured maxmargin objectives (Taskar et al., 2004; Tsochantaridis et al., 2004), the many ways to optimize them are not equally effective in practice. We compare a range of online optimization methods over a variety of structured NLP tasks (coreference, summarization, parsing, etc) and find several broad trends. First, margin methods do tend to outperform both likelihood and the perceptron. Second, for max-margin objectives, primal optimization methods are often more robust and progress faster than dual methods. This advantage is most pronounced for tasks with dense or continuous-valued features. Overall, we argue for a particularly simple online primal subgradient descent method that, despite being rarely mentioned in the literature, is surprisingly effective in relation to its alternatives.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Structured discriminative models have proven effective across a range of tasks in NLP including tagging (Lafferty et al., 2001; Collins, 2002) , reranking parses (Charniak and Johnson, 2005) , and many more (Taskar, 2004; Smith, 2011) . Common approaches to training such models include margin methods, likelihood methods, and mistake-driven procedures like the averaged perceptron algorithm. In this paper, we primarily consider the relative empirical behavior of several online optimization methods for margin-based objectives, with secondary attention to other approaches for calibration.", |
| "cite_spans": [ |
| { |
| "start": 104, |
| "end": 127, |
| "text": "(Lafferty et al., 2001;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 128, |
| "end": 142, |
| "text": "Collins, 2002)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 162, |
| "end": 190, |
| "text": "(Charniak and Johnson, 2005)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 207, |
| "end": 221, |
| "text": "(Taskar, 2004;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 222, |
| "end": 234, |
| "text": "Smith, 2011)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "It is increasingly common to train structured models using a max-margin objective that incorporates a loss function that decomposes in the same way as the dynamic program used for inference (Taskar, 2004) . Fortunately, most structured margin objectives are convex, so a range of optimization methods with similar theoretical properties are available -in short, any of these methods will work in the end. However, in practice, how fast each method converges varies across tasks. Moreover, some of the most popular methods more loosely associated with the margin objective, such as the MIRA algorithm (Crammer and Singer, 2003) or even the averaged perceptron (Freund and Schapire, 1999) are not global optimizations and can have different properties.", |
| "cite_spans": [ |
| { |
| "start": 190, |
| "end": 204, |
| "text": "(Taskar, 2004)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 600, |
| "end": 626, |
| "text": "(Crammer and Singer, 2003)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 659, |
| "end": 686, |
| "text": "(Freund and Schapire, 1999)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We analyze a range of methods empirically, to understand on which tasks and with which feature types, they are most effective. We modified six existing, high-performance, systems to enable loss-augmented decoding, and trained these models with six different methods. We have released our learning code as a Java library. 1 Our results provide support for the conventional wisdom that margin-based optimization is broadly effective, frequently outperforming likelihood optimization and the perceptron algorithm. We also found that directly optimizing the primal structured margin objective based on subgradients calculated from single training instances is surprisingly effective, performing consistently well across all tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We implemented a range of optimization methods that are widely used in NLP; below we categorize them into margin, likelihood, and perceptron-like methods. In each case, we used a structured loss function, modified to suit each task. In general, we focus on online methods because of their substantial speed advantages, rather than algorithms such as LBFGS (Liu and Nocedal, 1989) or batch Exponentiated Gradient (Collins et al., 2008) .", |
| "cite_spans": [ |
| { |
| "start": 356, |
| "end": 379, |
| "text": "(Liu and Nocedal, 1989)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 412, |
| "end": 434, |
| "text": "(Collins et al., 2008)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Algorithms", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Algorithm 1 The Online Primal Subgradient Algorithm with 1 or 2 regularization, and sparse updates Parameters: g iters Number of iterations C Regularization constant (10 \u22121 to 10 \u22128 ) \u03b7", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Algorithms", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Learning rate (10 0 to 10 \u22124 ) \u03b4 Initializer for q (10 \u22126 ) w = 0 Weight vector q = \u03b4 Cumulative squared gradient u = 0 Time of last update for each weight n = 0 Number of updates so far for iter \u2208 [1, iters] do for batch \u2208 data do Sum gradients from loss-aug. decodes", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Algorithms", |
| "sec_num": "2" |
| }, |
| { |
| "text": "g = 0 for (x i , y i ) \u2208 batch do for y = argmax y \u2208Y (x i ) [SCORE(y ) + L(y , y i )] for g += (f (y) \u2212 f (y i )) Update the active features q += g 2 ......Element-wise square n += 1 for f \u2208 nonzero features in g do w f = UPDATE-ACTIVE(w f , g f , q f ) u f = n", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Algorithms", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The AdaGrad update", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Algorithms", |
| "sec_num": "2" |
| }, |
| { |
| "text": "function UPDATE-ACTIVE(w, g, q) return w \u221a q\u2212\u03b7g \u03b7C+ \u221a q [ 2 ] d = |w \u2212 \u03b7 \u221a q g| \u2212 \u03b7 \u221a q C [ 1 ] return sign(w \u2212 \u03b7 \u221a q g) \u2022 max(0, d) [ 1 ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Algorithms", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Functions only needed for sparse updates A single update equivalent to a series of AdaGrad updates where the weight's subgradient was zero", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Algorithms", |
| "sec_num": "2" |
| }, |
| { |
| "text": "function UPDATE-CATCHUP(w, q, t) return w \u221a q \u03b7C+ \u221a q t [ 2 ] return sign(w) \u2022 max(0, |w| \u2212 \u03b7C \u221a q t) [ 1 ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Algorithms", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Compute w f (y ), but for each weight, apply an update to catch up on the steps in which the gradient for that weight was zero", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Algorithms", |
| "sec_num": "2" |
| }, |
| { |
| "text": "function SCORE(y ) s = 0 for f \u2208 f (y ) do for w f = UPDATE-CATCHUP(w f , q f , n\u2212u f ) for u f = n for s += w f return s", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Algorithms", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Note: To implement without the sparse update, use SCORE = w f (y ), and run the update loop on the left over all features. Also, for comparison, to implement perceptron, remove the sparse update and use UPDATE-ACTIVE = return w + g.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Learning Algorithms", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Cutting Plane (Tsochantaridis et al., 2004) Solves a sequence of quadratic programs (QP), each of which is an approximation to the dual formulation of the margin-based learning problem. At each iteration, the current QP is refined by adding additional active constraints. We solve each approximate QP using Sequential Minimal Optimization (Platt, 1999; .", |
| "cite_spans": [ |
| { |
| "start": 14, |
| "end": 43, |
| "text": "(Tsochantaridis et al., 2004)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 339, |
| "end": 352, |
| "text": "(Platt, 1999;", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Margin", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Online Cutting Plane (Chang and Yih, 2013) A modified form of cutting plane that only partially solves the QP on each iteration, operating in the dual space and optimizing a single dual variable on each iteration. We use a variant of Chang and Yih (2013) for the L 1 loss margin objective.", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 42, |
| "text": "(Chang and Yih, 2013)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 234, |
| "end": 254, |
| "text": "Chang and Yih (2013)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Margin", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Online Primal Subgradient (Ratliff et al., 2007) Computes the subgradient of the margin objective on each instance by performing a loss-augmented decode, then uses these instance-wise subgradients to optimize the global objective using Ada-Grad (Duchi et al., 2011) with either L 1 or L 2 regularization. The simplest implementation of Ada-Grad touches every weight when doing the update for a batch. To save time, we distinguish between two different types of update. When the subgradient is nonzero, we apply the usual update. When the subgradient is zero, we apply a numerically equivalent update later, at the next time the weight is queried. This saves time, as we only touch the weights corresponding to the (usually sparse) nonzero directions in the current batch's subgradient. Algorithm 1 gives pseudocode for our implementation, which was based on Dyer (2013).", |
| "cite_spans": [ |
| { |
| "start": 26, |
| "end": 48, |
| "text": "(Ratliff et al., 2007)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 245, |
| "end": 265, |
| "text": "(Duchi et al., 2011)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Margin", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Stochastic Gradient Descent The built-in training method for many of the systems was softmax-margin likelihood optimization (Gimpel and Smith, 2010) via subgradient descent with either AdaGrad or AdaDelta (Duchi et al., 2011; Zeiler, 2012) . We include results with each system's default settings as a point of comparison.", |
| "cite_spans": [ |
| { |
| "start": 124, |
| "end": 148, |
| "text": "(Gimpel and Smith, 2010)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 205, |
| "end": 225, |
| "text": "(Duchi et al., 2011;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 226, |
| "end": 239, |
| "text": "Zeiler, 2012)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Likelihood", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Averaged Perceptron (Freund and Schapire, 1999; Collins, 2002) On a mistake, weights for features on the system output are decremented and weights for features on the gold output are incre-mented. Weights are averaged over the course of training, and decoding is not loss-augmented.", |
| "cite_spans": [ |
| { |
| "start": 20, |
| "end": 47, |
| "text": "(Freund and Schapire, 1999;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 48, |
| "end": 62, |
| "text": "Collins, 2002)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mistake Driven", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Margin Infused Relaxed Algorithm (Crammer and Singer, 2003) A modified form of the perceptron that uses loss-augmented decoding and makes the smallest update necessary to give a margin at least as large as the loss of each solution. MIRA is generally presented as being related to the perceptron because it does not explicitly optimize a global objective, but it also has connections to margin methods, as explored by Chiang (2012) . We consider one-best decoding, where the quadratic program for determining the magnitude of the update has a closed form.", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 59, |
| "text": "(Crammer and Singer, 2003)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 418, |
| "end": 431, |
| "text": "Chiang (2012)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mistake Driven", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We considered tasks covering a range of structured output spaces, from sequences to non-projective trees. Most of the corresponding systems use models designed for likelihood-based structured prediction. Some use sparse indicator features, while others use dense continuous-valued features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tasks and Systems", |
| "sec_num": "3" |
| }, |
| { |
| "text": "This task provides a case of sequence prediction. We used the NER component of 's entity stack, training it independently of the other components. We define the loss as the number of incorrectly labelled words, and train on the CoNLL 2012 division of OntoNotes (Pradhan et al., 2007) .", |
| "cite_spans": [ |
| { |
| "start": 261, |
| "end": 283, |
| "text": "(Pradhan et al., 2007)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": null |
| }, |
| { |
| "text": "Coreference Resolution This gives an example of training when there are multiple gold outputs for each instance. The system we consider uses latent links between mentions in the same cluster, marginalizing over the possibilities during learning (Durrett and Klein, 2013 ). Since the model decomposes across mentions, we train by treating them as independent predictions with multiple gold outputs, comparing the inferred link with the gold link that is scored highest under the current model. We use the system's weighted loss function, and the same data as for NER.", |
| "cite_spans": [ |
| { |
| "start": 245, |
| "end": 269, |
| "text": "(Durrett and Klein, 2013", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Named Entity Recognition", |
| "sec_num": null |
| }, |
| { |
| "text": "We considered two different systems. The first uses only sparse indicator features (Hall et al., 2014) , while the second is parameterized via a neural network and adds dense features derived from word vectors (Durrett and Klein, 2015) . 2 We define the loss as the number of incorrect rule productions, and use the standard Penn Treebank division (Marcus et al., 1993) .", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 102, |
| "text": "(Hall et al., 2014)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 210, |
| "end": 235, |
| "text": "(Durrett and Klein, 2015)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 238, |
| "end": 239, |
| "text": "2", |
| "ref_id": null |
| }, |
| { |
| "start": 348, |
| "end": 369, |
| "text": "(Marcus et al., 1993)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constituency Parsing", |
| "sec_num": null |
| }, |
| { |
| "text": "We used the first-order MST parser in two modes, Eisner's algorithm for projective trees (Eisner, 1996; McDonald et al., 2005b) , and the Chu-Liu-Edmonds algorithm for non-projective trees (Chu and Liu, 1965; Edmonds, 1967; McDonald et al., 2005a) . The loss function was the number of arcs with an incorrect parent or label, and we used the standard division of the English Universal Dependencies (Agi\u0107 et al., 2015) . The built-in training method for MST parser is averaged, 1-best MIRA, which we include for comparison purposes.", |
| "cite_spans": [ |
| { |
| "start": 89, |
| "end": 103, |
| "text": "(Eisner, 1996;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 104, |
| "end": 127, |
| "text": "McDonald et al., 2005b)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 189, |
| "end": 208, |
| "text": "(Chu and Liu, 1965;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 209, |
| "end": 223, |
| "text": "Edmonds, 1967;", |
| "ref_id": null |
| }, |
| { |
| "start": 224, |
| "end": 247, |
| "text": "McDonald et al., 2005a)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 398, |
| "end": 417, |
| "text": "(Agi\u0107 et al., 2015)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dependency Parsing", |
| "sec_num": null |
| }, |
| { |
| "text": "Summarization With this task, we explore a case in which there is relatively little training data and the model uses a small number of dense features. The system uses a linear model with features considering counts of bigrams in the input document collection. The system forms the output summary by selecting a subset of the sentences in the input collection that does not exceed a fixed word-length limit (Berg-Kirkpatrick et al., 2011) . Inference involves solving an integer linear program, the loss function is bigram recall, and the data is from the TAC shared tasks (Dang and Owczarzak, 2008; Dang and Owczarzak, 2009) .", |
| "cite_spans": [ |
| { |
| "start": 406, |
| "end": 437, |
| "text": "(Berg-Kirkpatrick et al., 2011)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 572, |
| "end": 598, |
| "text": "(Dang and Owczarzak, 2008;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 599, |
| "end": 624, |
| "text": "Dang and Owczarzak, 2009)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dependency Parsing", |
| "sec_num": null |
| }, |
| { |
| "text": "For each method we tuned hyperparameters by considering a grid of values and measuring dev set performance over five training iterations, except for constituency parsing, where we took five measurements, 4k instances apart. For the cutting plane methods we cached constraints in memory to save time, but the memory cost was too great to run batch cutting plane on constituency parsing (over 60 Gb), and so is not included in the results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tuning", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "From the results in Figure 1 and during tuning, we can make several observations about these optimization methods' performance on these tasks.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 20, |
| "end": 28, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Observations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Observation 1: Margin methods generally perform best As expected given prior work, margin methods equal or surpass the performance of likelihood and perceptron methods across almost all of these tasks. Coreference resolution is an exception, but that model has latent variables that likelihood may treat more effectively, Time per iteration relative to averaged perceptron Method NER Coref Span Parser Neural Parser MST Proj. MST Non-Proj. Summ. AP 1.0 1.0 1.0 -1.0 1.0 1.0 MIRA 1.9 1.0 1.0 1.0 1.0 1.0 1.0 CP 60.8 2.7 --6.8 8.4 0.6 OCP 2.7 1.7 0.9 0.9 1.5 1.6 1.1 OPS 3.9 1.3 1.1 1.0 1.8 2.0 0.9 Decoding 0.6 0.2 0.9 0.7 0.7 0.6 0.7 Table 1 : Comparison of time per iteration relative to the perceptron (or MIRA for the Neural Parser).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 634, |
| "end": 641, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Observations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Decoding shows the time spent on inference. Times were averaged across the entire run. OPS uses batch size 10 for NER to save time, but performs just as well as with batch size 1 in Figure 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 182, |
| "end": 190, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Observations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "and has a weighted loss function tuned for likelihood (softmax-margin).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Observations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Observation 2: Dual cutting plane methods appear to learn more slowly Both cutting plane methods took more iterations to reach peak performance than the other methods. In addition, for batch cutting plane, accuracy varied so drastically that we extended tuning to ten iterations, and even then choosing the best parameters was sometimes difficult. Table 1 shows that the online cutting plane method did take slightly less time per iteration than OPS, but not enough to compensate for the slower learning rate.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 348, |
| "end": 355, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Observations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Observation 3: Learning with real-valued features is difficult for perceptron methods Learning models for tasks such as NER, which are driven by sparse indicator features, often roughly amounts to tallying the features that are contrastively present in correct hypotheses. In such cases, most learning methods work fairly well. However, when models use real-valued features, learning may involve determining a more delicate balance between features. In the models we consider that have real-valued features, summarization and parsing with a neural model, we can see that perceptron methods indeed have difficulty. 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Observations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Observation 4: Online Primal Subgradient is robust and effective All of the margin based methods, and gradient descent on likelihood, require tuning of a regularization constant and a step size (or convergence requirements for SMO). The dual methods were particularly sensitive to these hyperparameters, performing poorly if they were not chosen carefully. In contrast, performance for the primal methods remained high over a broad range of values. Our implementation of sparse updates for Ada-Grad was crucial for high-speed performance, decreasing time by an order of magnitude on tasks with many sparse features, such as NER and dependency parsing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Observations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Observation 5: Other minor properties We found that varying the batch size did not substantially impact performance after a given number of decodes, but did enable a speed improvement as decoding of multiple instances can occur in parallel. Increasing batch sizes leads to a further improvement to OPS, as overall there are fewer updates per iteration. For some tasks, re-tuning the step size was necessary when changing batch size.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Observations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The effectiveness of max-margin optimization methods is widely known, but the default choice of learning algorithm in NLP is often a form of the perceptron (or likelihood) instead. Our results illustrate some of the pitfalls of perceptron methods and suggest that online optimization of the maxmargin objective via primal subgradients is a simple, well-behaved alternative.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We would like to thank Greg Durrett for assistance running his code, Adam Pauls for advice on dual methods, and the anonymous reviewers for their helpful suggestions. This work was supported by National Science Foundation grant CNS-1237265, Office of Naval Research MURI grant N000140911081, and a General Sir John Monash Fellowship to the first author. Opinions, findings, conclusions and recommendations expressed in this material are those of the authors and do not necessarily reflect the views of sponsors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": "6" |
| }, |
| { |
| "text": "http://nlp.cs.berkeley.edu/software.shtml", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Our results are slightly lower as we save time by only using the dense features and a reduced n-gram context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For the neural parser, the perceptron took a gradient step for each mistake, but this had dismal performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Prokopis Prokopidis, Sampo Pyysalo, Wolfgang Seeker", |
| "authors": [ |
| { |
| "first": "Zeljko", |
| "middle": [], |
| "last": "Agi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [ |
| "Jesus" |
| ], |
| "last": "Aranzabe", |
| "suffix": "" |
| }, |
| { |
| "first": "Aitziber", |
| "middle": [], |
| "last": "Atutxa", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinho", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Catherine", |
| "middle": [], |
| "last": "De Marneffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Dozat", |
| "suffix": "" |
| }, |
| { |
| "first": "Rich\u00e1rd", |
| "middle": [], |
| "last": "Farkas", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "Filip", |
| "middle": [], |
| "last": "Ginter", |
| "suffix": "" |
| }, |
| { |
| "first": "Iakes", |
| "middle": [], |
| "last": "Goenaga", |
| "suffix": "" |
| }, |
| { |
| "first": "Koldo", |
| "middle": [], |
| "last": "Gojenola", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Haji\u010d", |
| "suffix": "" |
| }, |
| { |
| "first": "Anders", |
| "middle": [ |
| "Traerup" |
| ], |
| "last": "Johannsen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jenna", |
| "middle": [], |
| "last": "Kanerva", |
| "suffix": "" |
| }, |
| { |
| "first": "Juha", |
| "middle": [], |
| "last": "Kuokkala", |
| "suffix": "" |
| }, |
| { |
| "first": "Veronika", |
| "middle": [], |
| "last": "Laippala", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Lenci", |
| "suffix": "" |
| }, |
| { |
| "first": "Krister", |
| "middle": [], |
| "last": "Lind\u00e9n", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Ljube\u0161i\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zeljko Agi\u0107, Maria Jesus Aranzabe, Aitziber Atutxa, Cristina Bosco, Jinho Choi, Marie-Catherine de Marneffe, Timothy Dozat, Rich\u00e1rd Farkas, Jennifer Foster, Filip Ginter, Iakes Goenaga, Koldo Gojenola, Yoav Goldberg, Jan Haji\u010d, An- ders Traerup Johannsen, Jenna Kanerva, Juha Kuokkala, Veronika Laippala, Alessandro Lenci, Krister Lind\u00e9n, Nikola Ljube\u0161i\u0107, Teresa Lynn, Christopher Manning, H\u00e9ctor Alonso Mart\u00ednez, Ryan McDonald, Anna Missil\u00e4, Simonetta Monte- magni, Joakim Nivre, Hanna Nurmi, Petya Osen- ova, Slav Petrov, Jussi Piitulainen, Barbara Plank, Prokopis Prokopidis, Sampo Pyysalo, Wolfgang Seeker, Mojgan Seraji, Natalia Silveira, Maria Simi, Kiril Simov, Aaron Smith, Reut Tsarfaty, Veronika Vincze, and Daniel Zeman. 2015. Universal depen- dencies 1.1.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Jointly learning to extract and compress", |
| "authors": [ |
| { |
| "first": "Taylor", |
| "middle": [], |
| "last": "Berg-Kirkpatrick", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Gillick", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "481--490", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Taylor Berg-Kirkpatrick, Dan Gillick, and Dan Klein. 2011. Jointly learning to extract and compress. In Proceedings of the 49th Annual Meeting of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, pages 481-490, Portland, Ore- gon, USA, June.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Dual coordinate descent algorithms for efficient large margin structured prediction", |
| "authors": [ |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen-Tau", |
| "middle": [], |
| "last": "Yih", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "207--218", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ming-Wei Chang and Wen-Tau Yih. 2013. Dual coor- dinate descent algorithms for efficient large margin structured prediction. Transactions of the Associa- tion for Computational Linguistics, 1:207-218.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Coarseto-fine N-best parsing and MaxEnt discriminative reranking", |
| "authors": [ |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL'05)", |
| "volume": "", |
| "issue": "", |
| "pages": "173--180", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eugene Charniak and Mark Johnson. 2005. Coarse- to-fine N-best parsing and MaxEnt discriminative reranking. In Proceedings of the 43rd Annual Meet- ing of the Association for Computational Linguistics (ACL'05), pages 173-180, June.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Hope and fear for discriminative training of statistical translation models", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Chiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "13", |
| "issue": "1", |
| "pages": "1159--1187", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Chiang. 2012. Hope and fear for discriminative training of statistical translation models. Journal of Machine Learning Research, 13(1):1159-1187, April.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "On the shortest arborescence of a directed graph", |
| "authors": [ |
| { |
| "first": "Yoeng-Jin", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tseng-Hong", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 1965, |
| "venue": "Science Sinica", |
| "volume": "", |
| "issue": "", |
| "pages": "1396--1400", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoeng-jin Chu and Tseng-hong Liu. 1965. On the shortest arborescence of a directed graph. Science Sinica, pages 1396-1400.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Exponentiated gradient algorithms for conditional random fields and max-margin markov networks", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Globerson", |
| "suffix": "" |
| }, |
| { |
| "first": "Terry", |
| "middle": [], |
| "last": "Koo", |
| "suffix": "" |
| }, |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Carreras", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "L" |
| ], |
| "last": "Bartlett", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "9", |
| "issue": "", |
| "pages": "1775--1822", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Collins, Amir Globerson, Terry Koo, Xavier Carreras, and Peter L. Bartlett. 2008. Exponen- tiated gradient algorithms for conditional random fields and max-margin markov networks. Journal of Machine Learning Research, 9:1775-1822, June.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Discriminative training methods for hidden markov models: Theory and experiments with perceptron algorithms", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the ACL-02 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "10", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Collins. 2002. Discriminative training meth- ods for hidden markov models: Theory and exper- iments with perceptron algorithms. In Proceedings of the ACL-02 Conference on Empirical Methods in Natural Language Processing -Volume 10, pages 1- 8.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Ultraconservative online algorithms for multiclass problems", |
| "authors": [ |
| { |
| "first": "Koby", |
| "middle": [], |
| "last": "Crammer", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoram", |
| "middle": [], |
| "last": "Singer", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "951--991", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Koby Crammer and Yoram Singer. 2003. Ultracon- servative online algorithms for multiclass problems. Journal of Machine Learning Research, 3:951-991, March.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Overview of the TAC 2008 update summarization task", |
| "authors": [ |
| { |
| "first": "Trang", |
| "middle": [], |
| "last": "Hoa", |
| "suffix": "" |
| }, |
| { |
| "first": "Karolina", |
| "middle": [], |
| "last": "Dang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Owczarzak", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Text Analysis Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hoa Trang Dang and Karolina Owczarzak. 2008. Overview of the TAC 2008 update summarization task. In Text Analysis Conference.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Overview of the TAC 2009 summarization track", |
| "authors": [ |
| { |
| "first": "Trang", |
| "middle": [], |
| "last": "Hoa", |
| "suffix": "" |
| }, |
| { |
| "first": "Karolina", |
| "middle": [], |
| "last": "Dang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Owczarzak", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Text Analysis Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hoa Trang Dang and Karolina Owczarzak. 2009. Overview of the TAC 2009 summarization track. In Text Analysis Conference.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Adaptive subgradient methods for online learning and stochastic optimization", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Duchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Elad", |
| "middle": [], |
| "last": "Hazan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoram", |
| "middle": [], |
| "last": "Singer", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2121--2159", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Duchi, Elad Hazan, and Yoram Singer. 2011. Adaptive subgradient methods for online learning and stochastic optimization. Journal of Machine Learning Research, 12:2121-2159, July.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Easy victories and uphill battles in coreference resolution", |
| "authors": [ |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Durrett", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1971--1982", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Greg Durrett and Dan Klein. 2013. Easy victories and uphill battles in coreference resolution. In Proceed- ings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1971-1982, Seattle, Washington, USA, October.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A joint model for entity analysis: Coreference, typing, and linking", |
| "authors": [ |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Durrett", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "477--490", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Greg Durrett and Dan Klein. 2014. A joint model for entity analysis: Coreference, typing, and linking. volume 2, pages 477-490.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Neural CRF parsing", |
| "authors": [ |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Durrett", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "302--312", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Greg Durrett and Dan Klein. 2015. Neural CRF parsing. In Proceedings of the 53rd Annual Meet- ing of the Association for Computational Linguistics and the 7th International Joint Conference on Natu- ral Language Processing (Volume 1: Long Papers), pages 302-312, Beijing, China, July.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Notes on AdaGrad", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Dyer. 2013. Notes on AdaGrad. Technical re- port, Carnegie Mellon University, June.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Optimum branchings. Journal of Research of the National Bureau of Standards", |
| "authors": [], |
| "year": 1967, |
| "venue": "", |
| "volume": "71", |
| "issue": "", |
| "pages": "233--240", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jack Edmonds. 1967. Optimum branchings. Jour- nal of Research of the National Bureau of Standards, 71B:233-240.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Three new probabilistic models for dependency parsing: An exploration", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of the 16th Conference on Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "340--345", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Eisner. 1996. Three new probabilistic models for dependency parsing: An exploration. In Pro- ceedings of the 16th Conference on Computational Linguistics -Volume 1, pages 340-345.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Large margin classification using the perceptron algorithm", |
| "authors": [ |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Freund", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [ |
| "E" |
| ], |
| "last": "Schapire", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Machine Learning", |
| "volume": "37", |
| "issue": "", |
| "pages": "277--296", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoav Freund and Robert E. Schapire. 1999. Large margin classification using the perceptron algorithm. Machine Learning, 37(3):277-296, December.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Softmaxmargin CRFs: Training log-linear models with cost functions", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Noah", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "733--736", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Gimpel and Noah A. Smith. 2010. Softmax- margin CRFs: Training log-linear models with cost functions. In Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Lin- guistics, pages 733-736.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Less grammar, more features", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Hall", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Durrett", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "228--237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Hall, Greg Durrett, and Dan Klein. 2014. Less grammar, more features. In Proceedings of the 52nd Annual Meeting of the Association for Computa- tional Linguistics (Volume 1: Long Papers), pages 228-237, Baltimore, Maryland, USA, June.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [ |
| "D" |
| ], |
| "last": "Lafferty", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [ |
| "C N" |
| ], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the Eighteenth International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "282--289", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. 2001. Conditional random fields: Probabilistic models for segmenting and labeling se- quence data. In Proceedings of the Eighteenth In- ternational Conference on Machine Learning, pages 282-289.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "On the limited memory BFGS method for large scale optimization", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "C" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 1989, |
| "venue": "Mathematical Programming", |
| "volume": "45", |
| "issue": "3", |
| "pages": "503--528", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. C. Liu and J. Nocedal. 1989. On the limited memory BFGS method for large scale optimiza- tion. Mathematical Programming, 45(3):503-528, December.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Building a large annotated corpus of english: the penn treebank", |
| "authors": [ |
| { |
| "first": "Mitchell", |
| "middle": [ |
| "P" |
| ], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [ |
| "Ann" |
| ], |
| "last": "Marcinkiewicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Beatrice", |
| "middle": [], |
| "last": "Santorini", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational Linguistics", |
| "volume": "19", |
| "issue": "2", |
| "pages": "313--330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mitchell P. Marcus, Mary Ann Marcinkiewicz, and Beatrice Santorini. 1993. Building a large anno- tated corpus of english: the penn treebank. Compu- tational Linguistics, 19(2):313-330.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Online large-margin training of dependency parsers", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "Koby", |
| "middle": [], |
| "last": "Crammer", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL'05)", |
| "volume": "", |
| "issue": "", |
| "pages": "91--98", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan McDonald, Koby Crammer, and Fernando Pereira. 2005a. Online large-margin training of dependency parsers. In Proceedings of the 43rd Annual Meeting of the Association for Computa- tional Linguistics (ACL'05), pages 91-98, Ann Ar- bor, Michigan, USA, June.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Non-projective dependency parsing using spanning tree algorithms", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Kiril", |
| "middle": [], |
| "last": "Ribarov", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "523--530", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan McDonald, Fernando Pereira, Kiril Ribarov, and Jan Hajic. 2005b. Non-projective dependency pars- ing using spanning tree algorithms. In Proceed- ings of Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing, pages 523-530, Vancouver, British Columbia, Canada, October.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Fast training of support vector machines using sequential minimal optimization", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [ |
| "C" |
| ], |
| "last": "Platt", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Advances in Kernel Methods", |
| "volume": "", |
| "issue": "", |
| "pages": "185--208", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John C. Platt. 1999. Fast training of support vec- tor machines using sequential minimal optimization. In Bernhard Sch\u00f6lkopf, Christopher J. C. Burges, and Alexander J. Smola, editors, Advances in Ker- nel Methods, pages 185-208. MIT Press.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Unrestricted coreference: Identifying entities and events in OntoNotes", |
| "authors": [ |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Lance", |
| "middle": [], |
| "last": "Ramshaw", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Weischedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Jessica", |
| "middle": [], |
| "last": "Macbride", |
| "suffix": "" |
| }, |
| { |
| "first": "Linnea", |
| "middle": [], |
| "last": "Micciulla", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the International Conference on Semantic Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "446--453", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sameer Pradhan, Lance Ramshaw, Ralph Weischedel, Jessica MacBride, and Linnea Micciulla. 2007. Unrestricted coreference: Identifying entities and events in OntoNotes. In Proceedings of the Inter- national Conference on Semantic Computing, pages 446-453, September.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Online) subgradient methods for structured prediction", |
| "authors": [ |
| { |
| "first": "Nathan", |
| "middle": [], |
| "last": "Ratliff", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Eleventh International Conference on Artificial Intelligence and Statistics (AIStats)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nathan Ratliff, J. Andrew (Drew) Bagnell, and Mar- tin Zinkevich. 2007. (Online) subgradient methods for structured prediction. In Eleventh International Conference on Artificial Intelligence and Statistics (AIStats), March.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Linguistic Structure Prediction. Synthesis Lectures on Human Language Technologies", |
| "authors": [ |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noah A. Smith. 2011. Linguistic Structure Prediction. Synthesis Lectures on Human Language Technolo- gies. Morgan and Claypool.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Max-margin parsing", |
| "authors": [ |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Taskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "Daphne", |
| "middle": [], |
| "last": "Koller", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of EMNLP 2004", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ben Taskar, Dan Klein, Michael Collins, Daphne Koller, and Chris Manning. 2004. Max-margin parsing. In Proceedings of EMNLP 2004, pages 1- 8, Barcelona, Spain, July.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Learning Structured Prediction Models: A Large Margin Approach", |
| "authors": [ |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Taskar", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ben Taskar. 2004. Learning Structured Prediction Models: A Large Margin Approach. Ph.D. thesis, Stanford University.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Support vector machine learning for interdependent and structured output spaces", |
| "authors": [ |
| { |
| "first": "Ioannis", |
| "middle": [], |
| "last": "Tsochantaridis", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Hofmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Thorsten", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| }, |
| { |
| "first": "Yasemin", |
| "middle": [], |
| "last": "Altun", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Twenty-first International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "104--112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ioannis Tsochantaridis, Thomas Hofmann, Thorsten Joachims, and Yasemin Altun. 2004. Support vector machine learning for interdependent and structured output spaces. In Proceedings of the Twenty-first In- ternational Conference on Machine Learning, pages 104-112, July.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "ADADELTA: an adaptive learning rate method", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zeiler", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew D. Zeiler. 2012. ADADELTA: an adaptive learning rate method. CoRR, abs/1212.5701.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "text": "Variation in dev set performance (y) across training iterations (x). To show all variation, the scale of the y-axis changes partway, as indicated. Lines that stop early had converged.", |
| "type_str": "figure" |
| } |
| } |
| } |
| } |