| { |
| "paper_id": "P15-1033", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:10:47.149812Z" |
| }, |
| "title": "Transition-Based Dependency Parsing with Stack Long Short-Term Memory", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Marianas Labs \u2666 NLP Group", |
| "institution": "Pompeu Fabra University \u2660 Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Ballesteros", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Marianas Labs \u2666 NLP Group", |
| "institution": "Pompeu Fabra University \u2660 Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "miguel.ballesteros@upf.edu" |
| }, |
| { |
| "first": "\u2666", |
| "middle": [ |
| "\u2660" |
| ], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Marianas Labs \u2666 NLP Group", |
| "institution": "Pompeu Fabra University \u2660 Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Austin", |
| "middle": [], |
| "last": "Matthews", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Marianas Labs \u2666 NLP Group", |
| "institution": "Pompeu Fabra University \u2660 Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Marianas Labs \u2666 NLP Group", |
| "institution": "Pompeu Fabra University \u2660 Carnegie Mellon University", |
| "location": {} |
| }, |
| "email": "nasmith@cs.cmu.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We propose a technique for learning representations of parser states in transitionbased dependency parsers. Our primary innovation is a new control structure for sequence-to-sequence neural networksthe stack LSTM. Like the conventional stack data structures used in transitionbased parsing, elements can be pushed to or popped from the top of the stack in constant time, but, in addition, an LSTM maintains a continuous space embedding of the stack contents. This lets us formulate an efficient parsing model that captures three facets of a parser's state: (i) unbounded look-ahead into the buffer of incoming words, (ii) the complete history of actions taken by the parser, and (iii) the complete contents of the stack of partially built tree fragments, including their internal structures. Standard backpropagation techniques are used for training and yield state-of-the-art parsing performance.", |
| "pdf_parse": { |
| "paper_id": "P15-1033", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We propose a technique for learning representations of parser states in transitionbased dependency parsers. Our primary innovation is a new control structure for sequence-to-sequence neural networksthe stack LSTM. Like the conventional stack data structures used in transitionbased parsing, elements can be pushed to or popped from the top of the stack in constant time, but, in addition, an LSTM maintains a continuous space embedding of the stack contents. This lets us formulate an efficient parsing model that captures three facets of a parser's state: (i) unbounded look-ahead into the buffer of incoming words, (ii) the complete history of actions taken by the parser, and (iii) the complete contents of the stack of partially built tree fragments, including their internal structures. Standard backpropagation techniques are used for training and yield state-of-the-art parsing performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Transition-based dependency parsing formalizes the parsing problem as a series of decisions that read words sequentially from a buffer and combine them incrementally into syntactic structures (Yamada and Matsumoto, 2003; Nivre, 2003; Nivre, 2004) . This formalization is attractive since the number of operations required to build any projective parse tree is linear in the length of the sentence, making transition-based parsing computationally efficient relative to graph-and grammarbased formalisms. The challenge in transitionbased parsing is modeling which action should be taken in each of the unboundedly many states encountered as the parser progresses.", |
| "cite_spans": [ |
| { |
| "start": 192, |
| "end": 220, |
| "text": "(Yamada and Matsumoto, 2003;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 221, |
| "end": 233, |
| "text": "Nivre, 2003;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 234, |
| "end": 246, |
| "text": "Nivre, 2004)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This challenge has been addressed by development of alternative transition sets that simplify the modeling problem by making better attachment decisions (Nivre, 2007; Nivre, 2008; Nivre, 2009; Choi and McCallum, 2013; Bohnet and Nivre, 2012) , through feature engineering (Zhang and Nivre, 2011; Ballesteros and Nivre, 2014; Ballesteros and Bohnet, 2014) and more recently using neural networks (Chen and Manning, 2014; Stenetorp, 2013) .", |
| "cite_spans": [ |
| { |
| "start": 153, |
| "end": 166, |
| "text": "(Nivre, 2007;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 167, |
| "end": 179, |
| "text": "Nivre, 2008;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 180, |
| "end": 192, |
| "text": "Nivre, 2009;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 193, |
| "end": 217, |
| "text": "Choi and McCallum, 2013;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 218, |
| "end": 241, |
| "text": "Bohnet and Nivre, 2012)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 272, |
| "end": 295, |
| "text": "(Zhang and Nivre, 2011;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 296, |
| "end": 324, |
| "text": "Ballesteros and Nivre, 2014;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 325, |
| "end": 354, |
| "text": "Ballesteros and Bohnet, 2014)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 395, |
| "end": 419, |
| "text": "(Chen and Manning, 2014;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 420, |
| "end": 436, |
| "text": "Stenetorp, 2013)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We extend this last line of work by learning representations of the parser state that are sensitive to the complete contents of the parser's state: that is, the complete input buffer, the complete history of parser actions, and the complete contents of the stack of partially constructed syntactic structures. This \"global\" sensitivity to the state contrasts with previous work in transitionbased dependency parsing that uses only a narrow view of the parsing state when constructing representations (e.g., just the next few incoming words, the head words of the top few positions in the stack, etc.). Although our parser integrates large amounts of information, the representation used for prediction at each time step is constructed incrementally, and therefore parsing and training time remain linear in the length of the input sentence. The technical innovation that lets us do this is a variation of recurrent neural networks with long short-term memory units (LSTMs) which we call stack LSTMs ( \u00a72), and which support both reading (pushing) and \"forgetting\" (popping) inputs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our parsing model uses three stack LSTMs: one representing the input, one representing the stack of partial syntactic trees, and one representing the history of parse actions to encode parser states ( \u00a73). Since the stack of partial syntactic trees may contain both individual tokens and partial syntactic structures, representations of individual tree fragments are computed compositionally with recursive (i.e., similar to Socher et al., 2014) neural networks. The parameters are learned with backpropagation ( \u00a74), and we obtain state-of-the-art results on Chinese and English dependency parsing tasks ( \u00a75).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section we provide a brief review of LSTMs ( \u00a72.1) and then define stack LSTMs ( \u00a72.2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stack LSTMs", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Notation. We follow the convention that vectors are written with lowercase, boldface letters (e.g., v or v w ); matrices are written with uppercase, boldface letters (e.g., M, M a , or M ab ), and scalars are written as lowercase letters (e.g., s or q z ). Structured objects such as sequences of discrete symbols are written with lowercase, bold, italic letters (e.g., w refers to a sequence of input words). Discussion of dimensionality is deferred to the experiments section below ( \u00a75).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stack LSTMs", |
| "sec_num": "2" |
| }, |
| { |
| "text": "LSTMs are a variant of recurrent neural networks (RNNs) designed to cope with the vanishing gradient problem inherent in RNNs (Hochreiter and Schmidhuber, 1997; Graves, 2013) . RNNs read a vector x t at each time step and compute a new (hidden) state h t by applying a linear map to the concatenation of the previous time step's state h t\u22121 and the input, and passing this through a logistic sigmoid nonlinearity. Although RNNs can, in principle, model long-range dependencies, training them is difficult in practice since the repeated application of a squashing nonlinearity at each step results in an exponential decay in the error signal through time. LSTMs address this with an extra memory \"cell\" (c t ) that is constructed as a linear combination of the previous state and signal from the input.", |
| "cite_spans": [ |
| { |
| "start": 126, |
| "end": 160, |
| "text": "(Hochreiter and Schmidhuber, 1997;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 161, |
| "end": 174, |
| "text": "Graves, 2013)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long Short-Term Memories", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "LSTM cells process inputs with three multiplicative gates which control what proportion of the current input to pass into the memory cell (i t ) and what proportion of the previous memory cell to \"forget\" (f t ). The updated value of the memory cell after an input x t is computed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long Short-Term Memories", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "i t = \u03c3(W ix x t + W ih h t\u22121 + W ic c t\u22121 + b i ) f t = \u03c3(W f x x t + W f h h t\u22121 + W f c c t\u22121 + b f ) c t = f t c t\u22121 + i t tanh(W cx x t + W ch h t\u22121 + b c ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long Short-Term Memories", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where \u03c3 is the component-wise logistic sigmoid function, and is the component-wise (Hadamard) product.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long Short-Term Memories", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The value h t of the LSTM at each time step is controlled by a third gate (o t ) that is applied to the result of the application of a nonlinearity to the memory cell contents:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long Short-Term Memories", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "o t = \u03c3(W ox x t + W oh h t\u22121 + W oc c t + b o ) h t = o t tanh(c t ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long Short-Term Memories", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "To improve the representational capacity of LSTMs (and RNNs generally), LSTMs can be stacked in \"layers\" (Pascanu et al., 2014) . In these architectures, the input LSTM at higher layers at time t is the value of h t computed by the lower layer (and x t is the input at the lowest layer).", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 127, |
| "text": "(Pascanu et al., 2014)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long Short-Term Memories", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Finally, output is produced at each time step from the h t value at the top layer:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long Short-Term Memories", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "y t = g(h t ),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long Short-Term Memories", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where g is an arbitrary differentiable function.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Long Short-Term Memories", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Conventional LSTMs model sequences in a leftto-right order. 1 Our innovation here is to augment the LSTM with a \"stack pointer.\" Like a conventional LSTM, new inputs are always added in the right-most position, but in stack LSTMs, the current location of the stack pointer determines which cell in the LSTM provides c t\u22121 and h t\u22121 when computing the new memory cell contents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stack Long Short-Term Memories", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In addition to adding elements to the end of the sequence, the stack LSTM provides a pop operation which moves the stack pointer to the previous element (i.e., the previous element that was extended, not necessarily the right-most element). Thus, the LSTM can be understood as a stack implemented so that contents are never overwritten, that is, push always adds a new entry at the end of the list that contains a back-pointer to the previous top, and pop only updates the stack pointer. 2 This control structure is schematized in Figure 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 531, |
| "end": 539, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Stack Long Short-Term Memories", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "By querying the output vector to which the stack pointer points (i.e., the h TOP ), a continuous-space \"summary\" of the contents of the current stack configuration is available. We refer to this value as the \"stack summary.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stack Long Short-Term Memories", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "What does the stack summary look like? Intuitively, elements near the top of the stack will 1 Ours is not the first deviation from a strict left-toright order: previous variations include bidirectional LSTMs (Graves and Schmidhuber, 2005) and multidimensional LSTMs (Graves et al., 2007) .", |
| "cite_spans": [ |
| { |
| "start": 208, |
| "end": 238, |
| "text": "(Graves and Schmidhuber, 2005)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 266, |
| "end": 287, |
| "text": "(Graves et al., 2007)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stack Long Short-Term Memories", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "2 Goldberg et al. (2013) propose a similar stack construction to prevent stack operations from invalidating existing references to the stack in a beam-search parser that must (efficiently) maintain a priority queue of stacks.", |
| "cite_spans": [ |
| { |
| "start": 2, |
| "end": 24, |
| "text": "Goldberg et al. (2013)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stack Long Short-Term Memories", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": ";", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stack Long Short-Term Memories", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "x 1 y 0 y 1 ; x 1 y 0 y 1 T O P pop ; x 1 y 0 y 1 T O P T O P push y 2 x 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stack Long Short-Term Memories", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Figure 1: A stack LSTM extends a conventional left-to-right LSTM with the addition of a stack pointer (notated as TOP in the figure). This figure shows three configurations: a stack with a single element (left), the result of a pop operation to this (middle), and then the result of applying a push operation (right). The boxes in the lowest rows represent stack contents, which are the inputs to the LSTM, the upper rows are the outputs of the LSTM (in this paper, only the output pointed to by TOP is ever accessed), and the middle rows are the memory cells (the c t 's and h t 's) and gates. Arrows represent function applications (usually affine transformations followed by a nonlinearity), refer to \u00a72.1 for specifics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stack Long Short-Term Memories", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "influence the representation of the stack. However, the LSTM has the flexibility to learn to extract information from arbitrary points in the stack (Hochreiter and Schmidhuber, 1997) .", |
| "cite_spans": [ |
| { |
| "start": 148, |
| "end": 182, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stack Long Short-Term Memories", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Although this architecture is to the best of our knowledge novel, it is reminiscent of the Recurrent Neural Network Pushdown Automaton (NNPDA) of Das et al. (1992) , which added an external stack memory to an RNN. However, our architecture provides an embedding of the complete contents of the stack, whereas theirs made only the top of the stack visible to the RNN.", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 163, |
| "text": "Das et al. (1992)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stack Long Short-Term Memories", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We now turn to the problem of learning representations of dependency parsers. We preserve the standard data structures of a transition-based dependency parser, namely a buffer of words (B) to be processed and a stack (S) of partially constructed syntactic elements. Each stack element is augmented with a continuous-space vector embedding representing a word and, in the case of S, any of its syntactic dependents. Additionally, we introduce a third stack (A) to represent the history of actions taken by the parser. 3 Each of these stacks is associated with a stack LSTM that provides an encoding of their current contents. The full architecture is illustrated in Figure 3 , and we will review each of the components in turn.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 665, |
| "end": 673, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dependency Parser", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The dependency parser is initialized by pushing the words and their representations (we discuss word representations below in \u00a73.3) of the input sentence in reverse order onto B such that the first word is at the top of B and the ROOT symbol is at the bottom, and S and A each contain an emptystack token. At each time step, the parser computes a composite representation of the stack states (as determined by the current configurations of B, S, and A) and uses that to predict an action to take, which updates the stacks. Processing completes when B is empty (except for the empty-stack symbol), S contains two elements, one representing the full parse tree headed by the ROOT symbol and the other the empty-stack symbol, and A is the history of operations taken by the parser.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parser Operation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The parser state representation at time t, which we write p t , which is used to is determine the transition to take, is defined as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parser Operation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "p t = max {0, W[s t ; b t ; a t ] + d} ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parser Operation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where W is a learned parameter matrix, b t is the stack LSTM encoding of the input buffer B, s t is the stack LSTM encoding of S, a t is the stack LSTM encoding of A, d is a bias term, then passed through a component-wise rectified linear unit (ReLU) nonlinearity (Glorot et al., 2011) . 4 Finally, the parser state p t is used to compute overhasty an decision was Figure 2 : Parser state computation encountered while parsing the sentence \"an overhasty decision was made.\" Here S designates the stack of partially constructed dependency subtrees and its LSTM encoding; B is the buffer of words remaining to be processed and its LSTM encoding; and A is the stack representing the history of actions taken by the parser. These are linearly transformed, passed through a ReLU nonlinearity to produce the parser state embedding p t . An affine transformation of this embedding is passed to a softmax layer to give a distribution over parsing decisions that can be taken. the probability of the parser action at time t as:", |
| "cite_spans": [ |
| { |
| "start": 264, |
| "end": 285, |
| "text": "(Glorot et al., 2011)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 288, |
| "end": 289, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 365, |
| "end": 373, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Parser Operation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "amod REDUCE-LEFT(amod) SHIFT | {z } | {z } | {z } \u2026 S H IF T R E D -L (a m o d ) \u2026 made S B A ; ; p t root T O P T O P T O P", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parser Operation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "p(z t | p t ) = exp g zt p t + q zt z \u2208A(S,B) exp g z p t + q z ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parser Operation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where g z is a column vector representing the (output) embedding of the parser action z, and q z is a bias term for action z. The set A(S, B) represents the valid actions that may be taken given the current contents of the stack and buffer. 5 Since p t = f (s t , b t , a t ) encodes information about all previous decisions made by the parser, the chain rule may be invoked to write the probability of any valid sequence of parse actions z conditional on the input as:", |
| "cite_spans": [ |
| { |
| "start": 241, |
| "end": 242, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parser Operation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "p(z | w) = |z| t=1 p(z t | p t ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parser Operation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "(1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parser Operation", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Our parser is based on the arc-standard transition inventory (Nivre, 2004) , given in Figure 3 .", |
| "cite_spans": [ |
| { |
| "start": 61, |
| "end": 74, |
| "text": "(Nivre, 2004)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 86, |
| "end": 94, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Transition Operations", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Why arc-standard? Arc-standard transitions parse a sentence from left to right, using a stack to store partially built syntactic structures and a buffer that keeps the incoming tokens to be parsed. The parsing algorithm chooses an action at each configuration by means of a score. In arc-standard parsing, the dependency tree is constructed bottom-up, because right-dependents of a head are only attached after the subtree under the dependent is fully parsed. Since our parser recursively computes representations of tree fragments, this construction order guarantees that once a syntactic structure has been used to modify a head, the algorithm will not try to find another head for the dependent structure. This means we can evaluate composed representations of tree fragments incrementally; we discuss our strategy for this below ( \u00a73.4).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transition Operations", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To represent each input token, we concatenate three vectors: a learned vector representation for each word type (w); a fixed vector representation from a neural language model (w LM ), and a learned representation (t) of the POS tag of the token, provided as auxiliary input to the parser. A Figure 3 : Parser transitions indicating the action applied to the stack and buffer and the resulting stack and buffer states. Bold symbols indicate (learned) embeddings of words and relations, script symbols indicate the corresponding words and relations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 292, |
| "end": 300, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Token Embeddings and OOVs", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Stack t Buffer t Action Stack t+1 Buffer t+1 Dependency (u, u), (v, v), S B REDUCE-RIGHT(r) (g r (u, v), u), S B u r \u2192 v (u, u), (v, v), S B REDUCE-LEFT(r) (g r (v, u), v), S B u r \u2190 v S (u, u), B SHIFT (u, u), S B -", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token Embeddings and OOVs", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "linear map (V) is applied to the resulting vector and passed through a component-wise ReLU,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token Embeddings and OOVs", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "x = max {0, V[w;w LM ; t] + b} .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token Embeddings and OOVs", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "This mapping can be shown schematically as in Figure 4 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 46, |
| "end": 54, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Token Embeddings and OOVs", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "overhasty JJ UNK decision NN decision x 2 x 3 t 2 t 3 w 2 w LM 2w LM 3 w 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token Embeddings and OOVs", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Figure 4: Token embedding of the words decision, which is present in both the parser's training data and the language model data, and overhasty, an adjective that is not present in the parser's training data but is present in the LM data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token Embeddings and OOVs", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "This architecture lets us deal flexibly with outof-vocabulary words-both those that are OOV in both the very limited parsing data but present in the pretraining LM, and words that are OOV in both. To ensure we have estimates of the OOVs in the parsing training data, we stochastically replace (with p = 0.5) each singleton word type in the parsing training data with the UNK token in each training iteration.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token Embeddings and OOVs", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Pretrained word embeddings. A veritable cottage industry exists for creating word embeddings, meaning numerous pretraining options forw LM are available. However, for syntax modeling problems, embedding approaches which discard order perform less well (Bansal et al., 2014) ; therefore we used a variant of the skip n-gram model introduced by Ling et al. (2015) , named \"structured skip n-gram,\" where a different set of parameters is used to predict each context word depending on its position relative to the target word. The hyperparameters of the model are the same as in the skip n-gram model defined in word2vec (Mikolov et al., 2013) , and we set the window size to 5, used a negative sampling rate to 10, and ran 5 epochs through unannotated corpora described in \u00a75.1.", |
| "cite_spans": [ |
| { |
| "start": 252, |
| "end": 273, |
| "text": "(Bansal et al., 2014)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 343, |
| "end": 361, |
| "text": "Ling et al. (2015)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 618, |
| "end": 640, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token Embeddings and OOVs", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Recursive neural network models enable complex phrases to be represented compositionally in terms of their parts and the relations that link them (Socher et al., 2011; Socher et al., 2013c; Hermann and Blunsom, 2013; Socher et al., 2013b) . We follow this previous line of work in embedding dependency tree fragments that are present in the stack S in the same vector space as the token embeddings discussed above.", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 167, |
| "text": "(Socher et al., 2011;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 168, |
| "end": 189, |
| "text": "Socher et al., 2013c;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 190, |
| "end": 216, |
| "text": "Hermann and Blunsom, 2013;", |
| "ref_id": null |
| }, |
| { |
| "start": 217, |
| "end": 238, |
| "text": "Socher et al., 2013b)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Composition Functions", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "A particular challenge here is that a syntactic head may, in general, have an arbitrary number of dependents. To simplify the parameterization of our composition function, we combine headmodifier pairs one at a time, building up more complicated structures in the order they are \"reduced\" in the parser, as illustrated in Figure 5 . Each node in this expanded syntactic tree has a value computed as a function of its three arguments: the syntactic head (h), the dependent (d), and the syntactic relation being satisfied (r). We define this by concatenating the vector embeddings of the head, dependent and relation, applying a linear operator and a component-wise nonlinearity as follows:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 322, |
| "end": 330, |
| "text": "Figure 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Composition Functions", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "c = tanh (U[h; d; r] + e) .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Composition Functions", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "For the relation vector, we use an embedding of the parser action that was applied to construct the relation (i.e., the syntactic relation paired with the direction of attachment).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Composition Functions", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We trained our parser to maximize the conditional log-likelihood (Eq. 1) of treebank parses given sentences. Our implementation constructs a computation graph for each sentence and runs forwardand backpropagation to obtain the gradients of this Figure 5 : The representation of a dependency subtree (above) is computed by recursively applying composition functions to head, modifier, relation triples. In the case of multiple dependents of a single head, the recursive branching order is imposed by the order of the parser's reduce operations (below).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 245, |
| "end": 253, |
| "text": "Figure 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training Procedure", |
| "sec_num": "4" |
| }, |
| { |
| "text": "objective with respect to the model parameters. The computations for a single parsing model were run on a single thread on a CPU. Using the dimensions discussed in the next section, we required between 8 and 12 hours to reach convergence on a held-out dev set. 6 Parameter optimization was performed using stochastic gradient descent with an initial learning rate of \u03b7 0 = 0.1, and the learning rate was updated on each pass through the training data as \u03b7 t = \u03b7 0 /(1 + \u03c1t), with \u03c1 = 0.1 and where t is the number of epochs completed. No momentum was used. To mitigate the effects of \"exploding\" gradients, we clipped the 2 norm of the gradient to 5 before applying the weight update rule (Sutskever et al., 2014; Graves, 2013) . An 2 penalty of 1 \u00d7 10 \u22126 was applied to all weights.", |
| "cite_spans": [ |
| { |
| "start": 261, |
| "end": 262, |
| "text": "6", |
| "ref_id": null |
| }, |
| { |
| "start": 689, |
| "end": 713, |
| "text": "(Sutskever et al., 2014;", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 714, |
| "end": 727, |
| "text": "Graves, 2013)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Procedure", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Matrix and vector parameters were initialized with uniform samples in \u00b1 6/(r + c), where r and c were the number of rows and columns in the structure (Glorot and Bengio, 2010) .", |
| "cite_spans": [ |
| { |
| "start": 150, |
| "end": 175, |
| "text": "(Glorot and Bengio, 2010)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Procedure", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Dimensionality. The full version of our parsing model sets dimensionalities as follows. LSTM hidden states are of size 100, and we use two layers of LSTMs for each stack. Embeddings of the parser actions used in the composition functions have 16 dimensions, and the output embedding size is 20 dimensions. Pretained word embeddings have 100 dimensions (English) and 80 dimensions (Chinese), and the learned word embeddings have 32 dimensions. Part of speech embeddings have 12 dimensions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Procedure", |
| "sec_num": "4" |
| }, |
| { |
| "text": "These dimensions were chosen based on intuitively reasonable values (words should have higher dimensionality than parsing actions, POS tags, and relations; LSTM states should be relatively large), and it was confirmed on development data that they performed well. 7 Future work might more carefully optimize these parameters; our reported architecture strikes a balance between minimizing computational expense and finding solutions that work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Procedure", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We applied our parsing model and several variations of it to two parsing tasks and report results below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We used the same data setup as Chen and Manning (2014) , namely an English and a Chinese parsing task. This baseline configuration was chosen since they likewise used a neural parameterization to predict actions in an arc-standard transition-based parser.", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 54, |
| "text": "Chen and Manning (2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 For English, we used the Stanford Dependencency (SD) treebank (de Marneffe et al., 2006) used in (Chen and Manning, 2014) which is the closest model published, with the same splits. 8 The part-of-speech tags are predicted by using the Stanford Tagger (Toutanova et al., 2003) with an accuracy of 97.3%. This treebank contains a negligible amount of non-projective arcs (Chen and Manning, 2014) .", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 90, |
| "text": "(de Marneffe et al., 2006)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 99, |
| "end": 123, |
| "text": "(Chen and Manning, 2014)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 253, |
| "end": 277, |
| "text": "(Toutanova et al., 2003)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 371, |
| "end": 395, |
| "text": "(Chen and Manning, 2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 For Chinese, we use the Penn Chinese Treebank 5.1 (CTB5) following Zhang and Clark (2008) , 9 with gold part-of-speech tags which is also the same as in Chen and Manning (2014) .", |
| "cite_spans": [ |
| { |
| "start": 69, |
| "end": 91, |
| "text": "Zhang and Clark (2008)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 155, |
| "end": 178, |
| "text": "Chen and Manning (2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Language model word embeddings were generated, for English, from the AFP portion of the English Gigaword corpus (version 5), and from the complete Chinese Gigaword corpus (version 2), as segmented by the Stanford Chinese Segmenter (Tseng et al., 2005) .", |
| "cite_spans": [ |
| { |
| "start": 231, |
| "end": 251, |
| "text": "(Tseng et al., 2005)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We report results on five experimental configurations per language, as well as the Chen and Manning (2014) baseline. These are: the full stack LSTM parsing model (S-LSTM), the stack LSTM parsing model without POS tags (\u2212POS), the stack LSTM parsing model without pretrained language model embeddings (\u2212pretraining), the stack LSTM parsing model that uses just head words on the stack instead of composed representations (\u2212composition), and the full parsing model where rather than an LSTM, a classical recurrent neural network is used (S-RNN).", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 106, |
| "text": "Chen and Manning (2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental configurations", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Following Chen and Manning 2014 ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Overall, our parser substantially outperforms the baseline neural network parser of Chen and Manning (2014) , both in the full configuration and in the various ablated conditions we report. The one exception to this is the \u2212POS condition for the Chinese parsing task, which in which we underperform their baseline (which used gold POS tags), although we do still obtain reasonable parsing performance in this limited case. We note that predicted POS tags in English add very little value-suggesting that we can think of parsing sentences directly without first tagging them. We also find that using composed representations of dependency tree fragments outperforms using representations of head words alone, which has implications for theories of headedness. Finally, we find that while LSTMs outperform baselines that use only classical RNNs, these are still quite capable of learning good representations.", |
| "cite_spans": [ |
| { |
| "start": 84, |
| "end": 107, |
| "text": "Chen and Manning (2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Effect of beam size. Beam search was determined to have minimal impact on scores (absolute improvements of \u2264 0.3% were possible with small beams). Therefore, all results we report used greedy decoding- Chen and Manning (2014) likewise only report results with greedy decoding. This finding is in line with previous work that generates sequences from recurrent networks (Grefenstette et al., 2014), although Vinyals et al. (2015) did report much more substantial improvements with beam search on their \"grammar as a foreign language\" parser. 10", |
| "cite_spans": [ |
| { |
| "start": 202, |
| "end": 225, |
| "text": "Chen and Manning (2014)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 407, |
| "end": 428, |
| "text": "Vinyals et al. (2015)", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Our approach ties together several strands of previous work. First, several kinds of stack memories have been proposed to augment neural architectures. Das et al. (1992) proposed a neural network with an external stack memory based on recurrent neural networks. In contrast to our model, in which the entire contents of the stack are summarized in a single value, in their model, the network could only see the contents of the top of the stack. Mikkulainen (1996) proposed an architecture with a stack that had a summary feature, although the stack control was learned as a latent variable. A variety of authors have used neural networks to predict parser actions in shift-reduce parsers. The earliest attempt we are aware of is due to Mayberry and Miikkulainen (1999) . The resurgence of interest in neural networks has resulted in in several applications to transition-based dependency parsers (Weiss et al., 2015; Chen and Manning, 2014; Stenetorp, 2013) . In these works, the conditioning structure was manually crafted and sensitive to only certain properties of the state, while we are conditioning on the global state object. Like us, Stenetorp (2013) used recursively composed representations of the tree fragments (a head and its dependents). Neural networks have also been used to learn representations for use in chart parsing (Henderson, 2004; Titov and Henderson, 2007; Socher et al., 2013a; Le and Zuidema, 2014) .", |
| "cite_spans": [ |
| { |
| "start": 152, |
| "end": 169, |
| "text": "Das et al. (1992)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 445, |
| "end": 463, |
| "text": "Mikkulainen (1996)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 736, |
| "end": 768, |
| "text": "Mayberry and Miikkulainen (1999)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 896, |
| "end": 916, |
| "text": "(Weiss et al., 2015;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 917, |
| "end": 940, |
| "text": "Chen and Manning, 2014;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 941, |
| "end": 957, |
| "text": "Stenetorp, 2013)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 1338, |
| "end": 1355, |
| "text": "(Henderson, 2004;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1356, |
| "end": 1382, |
| "text": "Titov and Henderson, 2007;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 1383, |
| "end": 1404, |
| "text": "Socher et al., 2013a;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 1405, |
| "end": 1426, |
| "text": "Le and Zuidema, 2014)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "LSTMs have also recently been demonstrated as a mechanism for learning to represent parse structure. Vinyals et al. (2015) proposed a phrasestructure parser based on LSTMs which operated by first reading the entire input sentence in so as to obtain a vector representation of it, and then generating bracketing structures sequentially conditioned on this representation. Although superficially similar to our model, their approach has a number of disadvantages. First, they relied on a large amount of semi-supervised training data that was generated by parsing a large unannotated corpus with an off-the-shelf parser. Second, while they recognized that a stack-like shiftreduce parser control provided useful information, they only made the top word of the stack visible during training and decoding. Third, although it is impressive feat of learning that an entire parse tree be represented by a vector, it seems that this formulation makes the problem unnecessarily difficult.", |
| "cite_spans": [ |
| { |
| "start": 101, |
| "end": 122, |
| "text": "Vinyals et al. (2015)", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Finally, our work can be understood as a progression toward using larger contexts in parsing. An exhaustive summary is beyond the scope of this paper, but some of the important milestones in this tradition are the use of cube pruning to efficiently include nonlocal features in discriminative chart reranking (Huang and Chiang, 2008) , approximate decoding techniques based on LP relaxations in graph-based parsing to include higherorder features (Martins et al., 2010) , and randomized hill-climbing methods that enable arbitrary nonlocal features in global discriminative parsing models . Since our parser is sensitive to any part of the input, its history, or its stack contents, it is similar in spirit to the last approach, which permits truly arbitrary features.", |
| "cite_spans": [ |
| { |
| "start": 309, |
| "end": 333, |
| "text": "(Huang and Chiang, 2008)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 447, |
| "end": 469, |
| "text": "(Martins et al., 2010)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We presented stack LSTMs, recurrent neural networks for sequences, with push and pop operations, and used them to implement a state-of-theart transition-based dependency parser. We conclude by remarking that stack memory offers intriguing possibilities for learning to solve general information processing problems (Mikkulainen, 1996) . Here, we learned from observable stack manipulation operations (i.e., supervision from a treebank), and the computed embeddings of final parser states were not used for any further prediction. However, this could be reversed, giving a device that learns to construct context-free programs (e.g., expression trees) given only observed outputs; one application would be unsupervised parsing. Such an extension of the work would make it an alternative to architectures that have an explicit external memory such as neural Turing machines (Graves et al., 2014) and memory networks (Weston et al., 2015) . However, as with those models, without supervision of the stack operations, formidable computational challenges must be solved (e.g., marginalizing over all latent stack operations), but sampling techniques and techniques from reinforcement learning have promise here (Zaremba and Sutskever, 2015) , making this an intriguing avenue for future work.", |
| "cite_spans": [ |
| { |
| "start": 315, |
| "end": 334, |
| "text": "(Mikkulainen, 1996)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 872, |
| "end": 893, |
| "text": "(Graves et al., 2014)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 914, |
| "end": 935, |
| "text": "(Weston et al., 2015)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 1206, |
| "end": 1235, |
| "text": "(Zaremba and Sutskever, 2015)", |
| "ref_id": "BIBREF47" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "The A stack is only ever pushed to; our use of a stack here is purely for implementational and expository convenience.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In preliminary experiments, we tried several nonlinearities and found ReLU to work slightly better than the others.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In general, A(S, B) is the complete set of parser actions discussed in \u00a73.2, but in some cases not all actions are available. For example, when S is empty and words remain in B, a SHIFT operation is obligatory(Sartorio et al., 2013).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Software for replicating the experiments is available from https://github.com/clab/lstm-parser.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We did perform preliminary experiments with LSTM states of 32, 50, and 80, but the other dimensions were our initial guesses.8 Training: 02-21. Development: 22. Test: 23. 9 Training: 001-815,1001-1136. Development: 886- 931, 1148-1151. Test: 816-885, 1137-1147", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Although superficially similar to ours,Vinyals et al. (2015) is a phrase-structure parser and adaptation to the dependency parsing scenario would have been nontrivial. We discuss their work in \u00a76.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The authors would like to thank Lingpeng Kong and Jacob Eisenstein for comments on an earlier version of this draft and Danqi Chen for assistance with the parsing datasets. This work was sponsored in part by the U. S. Army Research Laboratory and the U. S. Army Research Office under contract/grant number W911NF-10-1-0533, and in part by NSF CAREER grant IIS-1054319. Miguel Ballesteros is supported by the European Commission under the contract numbers FP7-ICT-610411 (project MULTISENSOR) and H2020-RIA-645012 (project KRISTINA).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Automatic feature selection for agenda-based dependency parsing", |
| "authors": [ |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Ballesteros", |
| "suffix": "" |
| }, |
| { |
| "first": "Bernd", |
| "middle": [], |
| "last": "Bohnet", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Miguel Ballesteros and Bernd Bohnet. 2014. Au- tomatic feature selection for agenda-based depen- dency parsing. In Proc. COLING.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "MaltOptimizer: Fast and effective parser optimization. Natural Language Engineering", |
| "authors": [ |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Ballesteros", |
| "suffix": "" |
| }, |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Miguel Ballesteros and Joakim Nivre. 2014. MaltOp- timizer: Fast and effective parser optimization. Nat- ural Language Engineering.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Tailoring continuous word representations for dependency parsing", |
| "authors": [ |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Livescu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohit Bansal, Kevin Gimpel, and Karen Livescu. 2014. Tailoring continuous word representations for dependency parsing. In Proc. ACL.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A transitionbased system for joint part-of-speech tagging and labeled non-projective dependency parsing", |
| "authors": [ |
| { |
| "first": "Bernd", |
| "middle": [], |
| "last": "Bohnet", |
| "suffix": "" |
| }, |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bernd Bohnet and Joakim Nivre. 2012. A transition- based system for joint part-of-speech tagging and la- beled non-projective dependency parsing. In Proc. EMNLP.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A fast and accurate dependency parser using neural networks", |
| "authors": [ |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danqi Chen and Christopher D. Manning. 2014. A fast and accurate dependency parser using neural net- works. In Proc. EMNLP.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Feature embedding for dependency parsing", |
| "authors": [ |
| { |
| "first": "Wenliang", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenliang Chen, Yue Zhang, and Min Zhang. 2014. Feature embedding for dependency parsing. In Proc. COLING.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Transition-based dependency parsing with selectional branching", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Jinho", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinho D. Choi and Andrew McCallum. 2013. Transition-based dependency parsing with selec- tional branching. In Proc. ACL.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Learning context-free grammars: Capabilities and limitations of a recurrent neural network with an external stack memory", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "Lee" |
| ], |
| "last": "Sreerupa Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Guo-Zheng", |
| "middle": [], |
| "last": "Giles", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "Proc. Cognitive Science Society", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sreerupa Das, C. Lee Giles, and Guo-Zheng Sun. 1992. Learning context-free grammars: Capabilities and limitations of a recurrent neural network with an external stack memory. In Proc. Cognitive Science Society.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Generating typed dependency parses from phrase structure parses", |
| "authors": [ |
| { |
| "first": "Marie-Catherine", |
| "middle": [], |
| "last": "De Marneffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Maccartney", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marie-Catherine de Marneffe, Bill MacCartney, and Christopher D. Manning. 2006. Generating typed dependency parses from phrase structure parses. In Proc. LREC.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Understanding the difficulty of training deep feedforward neural networks", |
| "authors": [ |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Glorot", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xavier Glorot and Yoshua Bengio. 2010. Understand- ing the difficulty of training deep feedforward neural networks. In Proc. ICML.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Deep sparse rectifier neural networks", |
| "authors": [ |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Glorot", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proc. AISTATS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xavier Glorot, Antoine Bordes, and Yoshua Bengio. 2011. Deep sparse rectifier neural networks. In Proc. AISTATS.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Efficient implementation of beam-search incremental parsers", |
| "authors": [ |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoav Goldberg, Kai Zhao, and Liang Huang. 2013. Efficient implementation of beam-search incremen- tal parsers. In Proc. ACL.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Framewise phoneme classification with bidirectional LSTM networks", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proc. IJCNN", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Graves and J\u00fcrgen Schmidhuber. 2005. Frame- wise phoneme classification with bidirectional LSTM networks. In Proc. IJCNN.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Multi-dimensional recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| }, |
| { |
| "first": "Santiago", |
| "middle": [], |
| "last": "Fern\u00e1ndez", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. ICANN", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Graves, Santiago Fern\u00e1ndez, and J\u00fcrgen Schmid- huber. 2007. Multi-dimensional recurrent neural networks. In Proc. ICANN.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Neural Turing machines. CoRR", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Wayne", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivo", |
| "middle": [], |
| "last": "Danihelka", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Graves, Greg Wayne, and Ivo Danihelka. 2014. Neural Turing machines. CoRR, abs/1410.5401.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Generating sequences with recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Graves", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Graves. 2013. Generating sequences with recur- rent neural networks. CoRR, abs/1308.0850.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "New directions in vector space models of meaning", |
| "authors": [ |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [ |
| "Moritz" |
| ], |
| "last": "Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgiana", |
| "middle": [], |
| "last": "Dinu", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "ACL Tutorial", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edward Grefenstette, Karl Moritz Hermann, Georgiana Dinu, and Phil Blunsom. 2014. New directions in vector space models of meaning. ACL Tutorial.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Discriminative training of a neural network discriminative parser", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Henderson", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proc. ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Henderson. 2004. Discriminative training of a neural network discriminative parser. In Proc. ACL.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "The role of syntax in vector space models of compositional semantics", |
| "authors": [], |
| "year": 2013, |
| "venue": "Proc. ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karl Moritz Hermann and Phil Blunsom. 2013. The role of syntax in vector space models of composi- tional semantics. In Proc. ACL.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Forest reranking: Discriminative parsing with non-local features", |
| "authors": [ |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Chiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proc. ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liang Huang and David Chiang. 2008. Forest rerank- ing: Discriminative parsing with non-local features. In Proc. ACL.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Insideoutside recursive neural network model for dependency parsing", |
| "authors": [ |
| { |
| "first": "Phong", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Willem", |
| "middle": [], |
| "last": "Zuidema", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Phong Le and Willem Zuidema. 2014. Inside- outside recursive neural network model for depen- dency parsing. In Proc. EMNLP.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Two/too simple adaptations of word2vec for syntax problems", |
| "authors": [ |
| { |
| "first": "Wang", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Black", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabel", |
| "middle": [], |
| "last": "Trancoso", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang Ling, Chris Dyer, Alan Black, and Isabel Trancoso. 2015. Two/too simple adaptations of word2vec for syntax problems. In Proc. NAACL.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Turboparsers: Dependency parsing by approximate variational inference", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [ |
| "T" |
| ], |
| "last": "Andr\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Martins", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "P" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Pedro", |
| "middle": [ |
| "M Q" |
| ], |
| "last": "Xing", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Aguiar", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "T" |
| ], |
| "last": "M\u00e1rio", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Figueiredo", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proc. EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andr\u00e9 F. T. Martins, Noah A. Smith, Eric P. Xing, Pe- dro M. Q. Aguiar, and M\u00e1rio A. T. Figueiredo. 2010. Turboparsers: Dependency parsing by approximate variational inference. In Proc. EMNLP.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "SARDSRN: A neural network shift-reduce parser", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Marshall", |
| "suffix": "" |
| }, |
| { |
| "first": "Risto", |
| "middle": [], |
| "last": "Mayberry", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Miikkulainen", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proc. IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marshall R. Mayberry and Risto Miikkulainen. 1999. SARDSRN: A neural network shift-reduce parser. In Proc. IJCAI.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Subsymbolic case-role analysis of sentences with embedded clauses", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Risto Mikkulainen", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Cognitive Science", |
| "volume": "20", |
| "issue": "", |
| "pages": "47--73", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Risto Mikkulainen. 1996. Subsymbolic case-role anal- ysis of sentences with embedded clauses. Cognitive Science, 20:47-73.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013. Distributed representa- tions of words and phrases and their compositional- ity. In Proc. NIPS.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "An efficient algorithm for projective dependency parsing", |
| "authors": [ |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proc. IWPT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joakim Nivre. 2003. An efficient algorithm for projec- tive dependency parsing. In Proc. IWPT.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Incrementality in deterministic dependency parsing", |
| "authors": [ |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Workshop on Incremental Parsing: Bringing Engineering and Cognition Together", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joakim Nivre. 2004. Incrementality in deterministic dependency parsing. In Proceedings of the Work- shop on Incremental Parsing: Bringing Engineering and Cognition Together.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Incremental non-projective dependency parsing", |
| "authors": [ |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joakim Nivre. 2007. Incremental non-projective de- pendency parsing. In Proc. NAACL.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Algorithms for deterministic incremental dependency parsing", |
| "authors": [ |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Computational Linguistics", |
| "volume": "34", |
| "issue": "4", |
| "pages": "513--553", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joakim Nivre. 2008. Algorithms for deterministic in- cremental dependency parsing. Computational Lin- guistics, 34:4:513-553. MIT Press.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Non-projective dependency parsing in expected linear time", |
| "authors": [ |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joakim Nivre. 2009. Non-projective dependency pars- ing in expected linear time. In Proc. ACL.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "How to construct deep recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Razvan", |
| "middle": [], |
| "last": "Pascanu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Aglar G\u00fcl\u00e7ehre", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Razvan Pascanu, \u00c7 aglar G\u00fcl\u00e7ehre, Kyunghyun Cho, and Yoshua Bengio. 2014. How to construct deep recurrent neural networks. In Proc. ICLR.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "A transition-based dependency parser using a dynamic parsing strategy", |
| "authors": [ |
| { |
| "first": "Francesco", |
| "middle": [], |
| "last": "Sartorio", |
| "suffix": "" |
| }, |
| { |
| "first": "Giorgio", |
| "middle": [], |
| "last": "Satta", |
| "suffix": "" |
| }, |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francesco Sartorio, Giorgio Satta, and Joakim Nivre. 2013. A transition-based dependency parser using a dynamic parsing strategy. In Proc. ACL.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Dynamic pooling and unfolding recursive autoencoders for paraphrase detection", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "H" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proc. NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Eric H. Huang, Jeffrey Pennington, Andrew Y. Ng, and Christopher D. Manning. 2011. Dynamic pooling and unfolding recursive autoen- coders for paraphrase detection. In Proc. NIPS.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Parsing with compositional vector grammars", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, John Bauer, Christopher D. Manning, and Andrew Y. Ng. 2013a. Parsing with composi- tional vector grammars. In Proc. ACL.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Grounded compositional semantics for finding and describing images with sentences", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrej", |
| "middle": [], |
| "last": "Karpathy", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Andrej Karpathy, Quoc V. Le, Christo- pher D. Manning, and Andrew Y. Ng. 2013b. Grounded compositional semantics for finding and describing images with sentences. TACL.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Perelygin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Chuang", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Alex Perelygin, Jean Y. Wu, Jason Chuang, Christopher D. Manning, Andrew Y. Ng, and Christopher Potts. 2013c. Recursive deep mod- els for semantic compositionality over a sentiment treebank. In Proc. EMNLP.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Transition-based dependency parsing using recursive neural networks", |
| "authors": [ |
| { |
| "first": "Pontus", |
| "middle": [], |
| "last": "Stenetorp", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. NIPS Deep Learning Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pontus Stenetorp. 2013. Transition-based dependency parsing using recursive neural networks. In Proc. NIPS Deep Learning Workshop.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural net- works. In Proc. NIPS.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Constituent parsing with incremental sigmoid belief networks", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Titov", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Henderson", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Titov and James Henderson. 2007. Constituent parsing with incremental sigmoid belief networks. In Proc. ACL.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Feature-rich part-ofspeech tagging with a cyclic dependency network", |
| "authors": [ |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoram", |
| "middle": [], |
| "last": "Singer", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proc. NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kristina Toutanova, Dan Klein, Christopher D. Man- ning, and Yoram Singer. 2003. Feature-rich part-of- speech tagging with a cyclic dependency network. In Proc. NAACL.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "A conditional random field word segmenter for SIGHAN bakeoff", |
| "authors": [ |
| { |
| "first": "Huihsin", |
| "middle": [], |
| "last": "Tseng", |
| "suffix": "" |
| }, |
| { |
| "first": "Pichuan", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Galen", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proc. Fourth SIGHAN Workshop on Chinese Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Huihsin Tseng, Pichuan Chang, Galen Andrew, Daniel Jurafsky, and Christopher Manning. 2005. A con- ditional random field word segmenter for SIGHAN bakeoff 2005. In Proc. Fourth SIGHAN Workshop on Chinese Language Processing.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Grammar as a foreign language", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Terry", |
| "middle": [], |
| "last": "Koo", |
| "suffix": "" |
| }, |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, Lukasz Kaiser, Terry Koo, Slav Petrov, Ilya Sutskever, and Geoffrey Hinton. 2015. Gram- mar as a foreign language. In Proc. ICLR.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Structured training for neural network transition-based parsing", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Alberti", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Weiss, Christopher Alberti, Michael Collins, and Slav Petrov. 2015. Structured training for neural network transition-based parsing. In Proc. ACL.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Memory networks", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proc. ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Weston, Sumit Chopra, and Antoine Bordes. 2015. Memory networks. In Proc. ICLR.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Statistical dependency analysis with support vector machines", |
| "authors": [ |
| { |
| "first": "Hiroyasu", |
| "middle": [], |
| "last": "Yamada", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuji", |
| "middle": [], |
| "last": "Matsumoto", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proc. IWPT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hiroyasu Yamada and Yuji Matsumoto. 2003. Statis- tical dependency analysis with support vector ma- chines. In Proc. IWPT.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Reinforcement learning neural Turing machines. ArXiv e-prints", |
| "authors": [ |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Zaremba", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wojciech Zaremba and Ilya Sutskever. 2015. Rein- forcement learning neural Turing machines. ArXiv e-prints, May.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "A tale of two parsers: Investigating and combining graph-based and transition-based dependency parsing", |
| "authors": [ |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proc. EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yue Zhang and Stephen Clark. 2008. A tale of two parsers: Investigating and combining graph-based and transition-based dependency parsing. In Proc. EMNLP.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Transition-based dependency parsing with rich non-local features", |
| "authors": [ |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proc. ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yue Zhang and Joakim Nivre. 2011. Transition-based dependency parsing with rich non-local features. In Proc. ACL.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Greed is good if randomized: New inference for dependency parsing", |
| "authors": [ |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Lei", |
| "suffix": "" |
| }, |
| { |
| "first": "Regina", |
| "middle": [], |
| "last": "Barzilay", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jaakkola", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proc. EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuan Zhang, Tao Lei, Regina Barzilay, and Tommi Jaakkola. 2014. Greed is good if randomized: New inference for dependency parsing. In Proc. EMNLP.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "content": "<table/>", |
| "html": null, |
| "text": "Chinese parsing results (CTB5)", |
| "type_str": "table", |
| "num": null |
| } |
| } |
| } |
| } |