| { |
| "paper_id": "Q13-1010", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:08:35.281367Z" |
| }, |
| "title": "Incremental Tree Substitution Grammar for Parsing and Sentence Prediction", |
| "authors": [ |
| { |
| "first": "Federico", |
| "middle": [], |
| "last": "Sangati", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh", |
| "location": { |
| "addrLine": "10 Crichton Street", |
| "postCode": "EH8 9AB", |
| "settlement": "Cognition, Edinburgh", |
| "country": "UK" |
| } |
| }, |
| "email": "federico.sangati@gmail.com" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Keller", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Edinburgh", |
| "location": { |
| "addrLine": "10 Crichton Street", |
| "postCode": "EH8 9AB", |
| "settlement": "Cognition, Edinburgh", |
| "country": "UK" |
| } |
| }, |
| "email": "keller@inf.ed.ac.uk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper, we present the first incremental parser for Tree Substitution Grammar (TSG). A TSG allows arbitrarily large syntactic fragments to be combined into complete trees; we show how constraints (including lexicalization) can be imposed on the shape of the TSG fragments to enable incremental processing. We propose an efficient Earley-based algorithm for incremental TSG parsing and report an F-score competitive with other incremental parsers. In addition to whole-sentence F-score, we also evaluate the partial trees that the parser constructs for sentence prefixes; partial trees play an important role in incremental interpretation, language modeling, and psycholinguistics. Unlike existing parsers, our incremental TSG parser can generate partial trees that include predictions about the upcoming words in a sentence. We show that it outperforms an n-gram model in predicting more than one upcoming word.", |
| "pdf_parse": { |
| "paper_id": "Q13-1010", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper, we present the first incremental parser for Tree Substitution Grammar (TSG). A TSG allows arbitrarily large syntactic fragments to be combined into complete trees; we show how constraints (including lexicalization) can be imposed on the shape of the TSG fragments to enable incremental processing. We propose an efficient Earley-based algorithm for incremental TSG parsing and report an F-score competitive with other incremental parsers. In addition to whole-sentence F-score, we also evaluate the partial trees that the parser constructs for sentence prefixes; partial trees play an important role in incremental interpretation, language modeling, and psycholinguistics. Unlike existing parsers, our incremental TSG parser can generate partial trees that include predictions about the upcoming words in a sentence. We show that it outperforms an n-gram model in predicting more than one upcoming word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "When humans listen to speech, the input becomes available gradually as the speech signal unfolds. Reading happens in a similarly gradual manner when the eyes scan a text. There is good evidence that the human language processor is adapted to this and works incrementally, i.e., computes an interpretation for an incoming sentence on a word-by-word basis (Tanenhaus et al., 1995; Altmann and Kamide, 1999) . Also language processing systems often deal with speech as it is spoken, or text as it is being typed. A dialogue system should start interpreting a sentence while it is being spoken, and a question answering system should start retrieving answers before the user has finished typing the question.", |
| "cite_spans": [ |
| { |
| "start": 354, |
| "end": 378, |
| "text": "(Tanenhaus et al., 1995;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 379, |
| "end": 404, |
| "text": "Altmann and Kamide, 1999)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Incremental processing is therefore essential both for realistic models of human language processing and for NLP applications that react to user input in real time. In response to this, a number of incremental parsers have been developed, which use context-free grammar (Roark, 2001; Schuler et al., 2010) , dependency grammar (Chelba and Jelinek, 2000; Nivre, 2007; Huang and Sagae, 2010) , or treeadjoining grammar (Demberg et al., 2014) . Typical applications of incremental parsers include speech recognition (Chelba and Jelinek, 2000; Roark, 2001; Xu et al., 2002) , machine translation (Schwartz et al., 2011; Tan et al., 2011) , reading time modeling (Demberg and Keller, 2008) , or dialogue systems (Stoness et al., 2004) . Another potential use of incremental parsers is sentence prediction, i.e., the task of predicting upcoming words in a sentence given a prefix. However, so far only n-gram models and classifiers have been used for this task (Fazly and Hirst, 2003; Eng and Eisner, 2004; Grabski and Scheffer, 2004; Bickel et al., 2005; Li and Hirst, 2005) .", |
| "cite_spans": [ |
| { |
| "start": 270, |
| "end": 283, |
| "text": "(Roark, 2001;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 284, |
| "end": 305, |
| "text": "Schuler et al., 2010)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 327, |
| "end": 353, |
| "text": "(Chelba and Jelinek, 2000;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 354, |
| "end": 366, |
| "text": "Nivre, 2007;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 367, |
| "end": 389, |
| "text": "Huang and Sagae, 2010)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 417, |
| "end": 439, |
| "text": "(Demberg et al., 2014)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 513, |
| "end": 539, |
| "text": "(Chelba and Jelinek, 2000;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 540, |
| "end": 552, |
| "text": "Roark, 2001;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 553, |
| "end": 569, |
| "text": "Xu et al., 2002)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 592, |
| "end": 615, |
| "text": "(Schwartz et al., 2011;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 616, |
| "end": 633, |
| "text": "Tan et al., 2011)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 658, |
| "end": 684, |
| "text": "(Demberg and Keller, 2008)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 707, |
| "end": 729, |
| "text": "(Stoness et al., 2004)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 955, |
| "end": 978, |
| "text": "(Fazly and Hirst, 2003;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 979, |
| "end": 1000, |
| "text": "Eng and Eisner, 2004;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1001, |
| "end": 1028, |
| "text": "Grabski and Scheffer, 2004;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1029, |
| "end": 1049, |
| "text": "Bickel et al., 2005;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1050, |
| "end": 1069, |
| "text": "Li and Hirst, 2005)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we present an incremental parser for Tree Substitution Grammar (TSG). A TSG contains a set of arbitrarily large tree fragments, which can be combined into new syntax trees by means of a substitution operation. An extensive tradition of parsing with TSG (also referred to as data-oriented parsing) exists (Bod, 1995; Bod et al., 2003) , but none of the existing TSG parsers are incremental. We show how constraints can be imposed on the shape of the TSG fragments to enable incremental processing. We propose an efficient Earley-based algorithm for incremental TSG parsing and report an F-score competitive with other incremental parsers. TSG fragments can be arbitrarily large and can contain multiple lexical items. This property enables our incremental TSG parser to generate partial parse trees that include predictions about the upcoming words in a sentence. It can therefore be applied directly to the task of sentence prediction, simply by reading off the predicted items in a partial tree. We show that our parser outperforms an n-gram model in predicting more than one upcoming word.", |
| "cite_spans": [ |
| { |
| "start": 319, |
| "end": 330, |
| "text": "(Bod, 1995;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 331, |
| "end": 348, |
| "text": "Bod et al., 2003)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of the paper is structured as follows. In Section 2, we introduce the ITSG framework and relate it to the original TSG formalism. Section 3 describes the chart-parser algorithm, while Section 4 details the experimental setup and results. Sections 5 and 6 present related work and conclusions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The current work is based on Tree Substitution Grammar (TSG, Schabes 1990 ; for a recent overview see Bod et al. 2003) . A TSG is composed of (i) a set of arbitrarily large fragments, usually extracted from an annotated phrase-structure treebank, and (ii) the substitution operation by means of which fragments can be combined into complete syntactic analyses (derivations) of novel sentences.", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 73, |
| "text": "(TSG, Schabes 1990", |
| "ref_id": null |
| }, |
| { |
| "start": 102, |
| "end": 118, |
| "text": "Bod et al. 2003)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incremental Tree Substitution Grammar", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Every fragment's node is either a lexical node (word), a substitution site (a non-lexical node in the yield of the structure), 1 or an internal node. An internal node must always keep the same daughter nodes as in the original tree. For an example of a binarized 2 tree and a fragment extracted from it see Figure 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 307, |
| "end": 315, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Incremental Tree Substitution Grammar", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A TSG derivation is constructed in a top-down generative process starting from a fragment in the grammar rooted in S (the unique non-lexical node all syntactic analysis are rooted in). A partial derivation is extended by subsequently introducing more fragments: if X is the left-most substitution site in the yield of the current partial derivation, a fragment 1 For example nodes NP, VP, S@ are the substitution sites of the right fragment in Figure 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 444, |
| "end": 452, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Incremental Tree Substitution Grammar", |
| "sec_num": "2" |
| }, |
| { |
| "text": "2 The tree is right-binarized via artificial nodes with @ symbols, as explained in Section 4.1. The original tree is rooted in X is chosen from the grammar and substituted into it. When there are no more substitution sites (all nodes in the yield are lexical items) the generative process terminates.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incremental Tree Substitution Grammar", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this work we are interested in defining an incremental TSG (in short ITSG). The new generative process, while retaining the original mechanism for combining fragments (by means of the substitution operation), must ensure a way for deriving syntactic analyses of novel sentences in an incremental manner, i.e., one word at the time from left to right. More precisely, at each stage of the generative process, the partially derived structure must be connected (as in standard TSG) and have a prefix of the sentence at the beginning of its yield. A partial derivation is connected if it has tree shape, i.e., all the nodes are dominated by a common root node (which does not necessarily have to be the root node of the sentence). For instance, the right fragment in Figure 1 shows a possible way of starting a standard TSG derivation which does not satisfy the incrementality constraint: the partial derivation has a substitution site as the first element in its yield.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 766, |
| "end": 774, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Incrementality", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In order to achieve incrementality while maintaining connectedness, we impose one further constraint on the type of fragments which are allowed in an ITSG: each fragment should be lexicalized, i.e., contain at least one word (lexical anchor) at the first or the second position in its yield. Allowing more than one substitution site at the beginning of a fragment's yield would lead to a violation of the incrementality requirement (as will become clear in Section 2.2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Incrementality", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The generative process starts with a fragment anchored in the first word of the sentence being generated. At each subsequent step, a lexicalized fragment is introduced (by means of the substitution operation) to extend the current partial derivation in such a way that the prefix of the yield of the partial structure is lengthened by one word (the lexical anchor of the fragment being introduced). The lexicalization constraint allows a fragment to have multiple lexical items, not necessarily adjacent to one another. This is useful to capture the general ability of TSG to produce in one single step an arbitrarily big syntactic construction ranging from phrasal verbs (e.g., ask someone out), to parallel constructions (e.g., either X or Y), and idiomatic expressions (e.g., took me to the cleaners). For an example of a fragment with multiple lexical anchors see the fragment in the middle of Figure 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 898, |
| "end": 906, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Incrementality", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "An ITSG is a tuple N , L , F , , , , where N and L are the set of non-lexical and lexical nodes respectively, F is a collection of lexicalized fragments, and are two variants of the substitution operation (backward and forward) used to combine fragments into derivations, and is the stop operation which terminates the generative process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Symbolic Grammar", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Fragments A fragment f \u2208 F belongs to one of the three sets F init , F X lex , F Y sub :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Symbolic Grammar", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u2022 An initial fragment ( f init ) has the lexical anchor in the first position of the yield, being the initial word of a sentence (the left-most lexical node of the parse tree from which it was extracted).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Symbolic Grammar", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u2022 A lex-first fragment ( f X lex ) has the lexical anchor (non sentence-initial) in the first position of the yield, and is rooted in X. 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Symbolic Grammar", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "\u2022 A sub-first fragment ( f Y sub ) has the lexical anchor in the second position of its yield, and a substitution site Y in the first.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Symbolic Grammar", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Fringes We will use fringes (Demberg et al., 2014) as a compressed representation of fragments, 3 A fragment can be both an initial and a lex-first fragment (e.g., if the lexical anchor is a proper noun). We will make use of two separate instances of the same fragment in the two sets. in which the internal structure is replaced by a triangle ( or \u00a1) and only the root and the yield are visible. It is possible in a grammar that multiple fragments map to the same fringe; we will refer to those as ambiguous fringes. We use both vertical ( , e.g., in Figure 3 and 4) and horizontal (\u00a1) fringe notation. The latter is used for describing the states in our chart-parsing algorithm in Section 3. For instance, the horizontal fringe representation of the right fragment in Figure 1 is S \u00a1 NP \"were\" VP S@.", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 50, |
| "text": "(Demberg et al., 2014)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 552, |
| "end": 560, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 770, |
| "end": 778, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Symbolic Grammar", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Incremental Derivation An incremental derivation is a sequence of lexicalized fragments f 1 , f 2 , . . . , f n which, combined together in the specified order, give rise to a complete parse tree (see Figure 2 for an example). The first fragment f 1 being introduced in the derivation must be an initial fragment, and its lexical anchor constitutes the oneword prefix of the sentence being generated. Subsequent fragments are introduced by means of the substitution operation, which has two variants: backward substitution ( ), which is used to substitute lex-first fragments into the partial derivation generated so far, and forward substitution ( ), which is used to substitute sub-first fragments into the partial derivation. After a number of fragments are introduced, a stop operation ( ) may terminate the generative process.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 201, |
| "end": 209, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Symbolic Grammar", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Operations The three ITSG operations take place under specific conditions within an incremental derivation, as illustrated in Figure 3 and explained hereafter. At a given stage of the generative process (after an initial fragment has been inserted), the connected partial structure may or may not have sub-", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 126, |
| "end": 134, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Symbolic Grammar", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Accepted Fragment Resulting Structure Terminated Figure 3 : Schemata of the three ITSG operations. All tree structures (partial structure and fragments) are represented in a compact notation, which displays only the root nodes and the yields. The i-th words in the structure's yield is represented as i , while \u03b1 and \u03b2 stands for any (possibly empty) sequence of words and substitution sites.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 49, |
| "end": 57, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Operation", |
| "sec_num": null |
| }, |
| { |
| "text": "Y 1 lex . . . i X \u03b1 . . . (backward) X i+1 \u03b2 . . . Y 1 lex . . . i+1 \u03b2 . . . \u03b1 . . . NO Y 1 lex . . . i (forward) X Y i+1 \u03b1 . . . X 1 lex . . . i i+1 \u03b1 . . . NO Y 1 lex . . . n (stop) \u2205 Y # \u2205 1 lex . . . n # YES", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Operation", |
| "sec_num": null |
| }, |
| { |
| "text": "stitution sites present in its yield. In the first case, a backward substitution ( ) must take place in the following generative step: if X is the left-most substitution site, a new fragment of type f X lex is chosen from the grammar and substituted into X. If the partially derived structure has no substitution site (all the nodes in its yield are lexical nodes) and it is rooted in Y , two possible choices exist: either the generative process terminates by means of the stop operation ( Y ), or the generative process continues. In the latter case a forward substitution ( ) is performed: a new f Y sub fragment is chosen from the grammar, and the partial structure is substituted into the left-most substitution site Y of the fragment. 4 Multiple Derivations As in TSG, an ITSG may be able to generate the same parse tree in multiple ways: multiple incremental derivations yielding the same tree. Figure 4 shows one such example.", |
| "cite_spans": [ |
| { |
| "start": 741, |
| "end": 742, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 902, |
| "end": 910, |
| "text": "Figure 4", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Operation", |
| "sec_num": null |
| }, |
| { |
| "text": "Generative Capacity It is useful to clarify the difference between ITSG and the more general TSG formalism in terms of generative capacity. Although both types of grammar make use of the substitution operation to combine fragments, an ITSG imposes more constraints on (i) the type of fragments which are allowed in the grammar (initial, lex-first, 4 A stop operation can be viewed as a forward substitution when using an artificial sub-first fragment \u2205 \u00a1 Y # (stop fragment), where # is an artificial lexical node indicating the termination of the sentence. For simplicity, stop fragments are omitted in Figure 2 and 4 and Y is attached to the stop symbol ( Y ). and sub-first fragments), and (ii) the generative process with which fragments are combined (incrementally left to right instead of top-down). If we compare a TSG and an ITSG on the same set of (ITSGcompatible) fragments, then there are cases in which the TSG can generate more tree structures than the ITSG.", |
| "cite_spans": [ |
| { |
| "start": 348, |
| "end": 349, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 604, |
| "end": 612, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Operation", |
| "sec_num": null |
| }, |
| { |
| "text": "In the following, we provide a more formal characterization of the strong and weak generative power of ITSG with respects to context-free grammar (CFG) and TSG. (However, a full investigation of this issue is beyond the scope of this paper.) We can limit our analysis to CFG, as TSG is strongly equivalent to CFG. The weak equivalence between ITSG and CFG is straightforward: for any CFG there is a way to produce a weakly equivalent grammar in Greibach Normal Form in which any production has a right side beginning with a lexical item (Aho and Ullman, 1972) . The grammar that results from this transformation is an ITSG which uses only backward substitutions.", |
| "cite_spans": [ |
| { |
| "start": 537, |
| "end": 559, |
| "text": "(Aho and Ullman, 1972)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Operation", |
| "sec_num": null |
| }, |
| { |
| "text": "S X \"a\" X \"c\" X X \"b\" S X \"c\" X \"c\" X \"c\" X \"b\" \"a\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Operation", |
| "sec_num": null |
| }, |
| { |
| "text": "Left-recursion seems to be the main obstacle for strong equivalence between ITSG and CFG. As an example, the left side of Figure 5 shows a CFG that contains a left-recursive rule. The types of structures this grammar can generate (such as the one given on the right side of the same figure) are characterized by an arbitrarily long chain of rules that can intervene before the second word of the string, \"b\", is generated. Given the incrementality constraints, there is no ITSG that can generate the same set of structures that this CFG can generate. However, it may be possible to circumvent this problem by applying the left-corner transform (Rosenkrantz and Lewis, 1970; Aho and Ullman, 1972) to generate an equivalent CFG without left-recursive rules.", |
| "cite_spans": [ |
| { |
| "start": 644, |
| "end": 673, |
| "text": "(Rosenkrantz and Lewis, 1970;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 674, |
| "end": 695, |
| "text": "Aho and Ullman, 1972)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 122, |
| "end": 130, |
| "text": "Figure 5", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Operation", |
| "sec_num": null |
| }, |
| { |
| "text": "In the generative process presented above there are a number of choices which are left open, i.e., which fragment is being introduced at a specific stage of a derivation, and when the generative process terminates. A symbolic ITSG can be equipped with a probabilistic component which deals with these choices. A proper probability model for ITSG needs to define three probability distributions over the three types of fragments in the grammar, such that:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Grammar", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2211 f init \u2208F init P( f init ) = 1 (1) \u2211 f X lex \u2208F X lex P( f X lex ) = 1 (\u2200X \u2208 N )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Probabilistic Grammar", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P( Y ) + \u2211 f Y sub \u2208F Y sub P( f Y sub ) = 1 (\u2200Y \u2208 N )", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Probabilistic Grammar", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "The probability that an ITSG generates a specific derivation d is obtained by multiplying the probabilities of the fragments taking part in the derivation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Grammar", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P(d) = \u220f f \u2208d P( f )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Probabilistic Grammar", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Since the grammar may generate a tree t via multiple derivations", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Grammar", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "D(t) = d 1 , d 2 , . . . , d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Grammar", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "m , the probability of the parse tree is the sum of the probabilities of the ITSG derivations in D(t):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Grammar", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P(t) = \u2211 d\u2208D(t) P(d) = \u2211 d\u2208D(t) \u220f f \u2208d P( f )", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Probabilistic Grammar", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "3 Probabilistic ITSG Parser", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Grammar", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We introduce a probabilistic chart-parsing algorithm to efficiently compute all possible incremental derivations that an ITSG can generate given an input sentence (presented one word at the time). The parsing algorithm is an adaptation of the Earley algorithm (Earley, 1970) and its probabilistic instantiation (Stolcke, 1995) .", |
| "cite_spans": [ |
| { |
| "start": 260, |
| "end": 274, |
| "text": "(Earley, 1970)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 311, |
| "end": 326, |
| "text": "(Stolcke, 1995)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Grammar", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "A TSG incremental derivation is represented in the chart as a sequence of chart states, i.e., a path. For a given fringe in an incremental derivation, there will be one or more states in the chart, depending on the length of the fringe's yield. This is because we need to keep track of the extent to which the yield of each fringe has been consumed within a derivation as the sentence is processed incrementally. 5 At the given stage of the derivation, the states offer a compact representation over the partial structures generated so far. Figure 6 : Chart operations with forward (\u03b1), inner (\u03b3), and outer (\u03b2) probabilities.", |
| "cite_spans": [ |
| { |
| "start": 413, |
| "end": 414, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 541, |
| "end": 549, |
| "text": "Figure 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Parsing Chart", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Start( 0 ) X \u00a1 0 \u03bd 0 : 0 X \u00a1 \u2022 0 \u03bd [\u03b1, \u03b3, \u03b2] \u03b1 = \u03b3 = P(X \u00a1 0 \u03bd) \u03b2 = \u03b2(1 : 0 X \u00a1 0 \u2022 \u03bd) Backward Substitution( i ) i : k X \u00a1 \u03bb \u2022Y \u00b5 [\u03b1, \u03b3, \u03b2] Y \u00a1 i \u03bd i : i Y \u00a1 \u2022 i \u03bd [\u03b1 , \u03b3 , \u03b2 ] \u03b1 + = \u03b1 \u2022 P(Y \u00a1 i \u03bd) \u03b3 = P(Y \u00a1 i \u03bd) Forward Substitution( i ) i : 0 Y \u00a1 \u03bd \u2022 [\u03b1, \u03b3, \u03b2] X \u00a1Y i \u00b5 i : 0 X \u00a1Y \u2022 i \u00b5 [\u03b1 , \u03b3 , \u03b2 ] \u03b1 + = \u03b1 \u2022 P(X \u00a1Y i \u00b5) \u03b3 + = \u03b3 \u2022 P(X \u00a1Y i \u00b5) \u03b2 + = \u03b2 \u2022 P(X \u00a1Y i \u00b5) Completion i : j Y \u00a1 j \u03bd \u2022 [\u03b1, \u03b3, \u03b2] j : k X \u00a1 \u03bb \u2022Y \u00b5 [\u03b1 , \u03b3 , \u03b2 ] i : k X \u00a1 \u03bbY \u2022 \u00b5 [\u03b1 , \u03b3 , \u03b2 ] \u03b1 + = \u03b1 \u2022 \u03b3 \u03b3 + = \u03b3 \u2022 \u03b3 \u03b2 + = \u03b2 \u2022 \u03b3 \u03b2 + = \u03b2 \u2022 \u03b3 Scan( i ) i : k X \u00a1 \u03bb \u2022 i \u00b5 [\u03b1, \u03b3, \u03b2] i + 1 : k X \u00a1 \u03bb i \u2022 \u00b5 [\u03b1 , \u03b3 , \u03b2 ] \u03b1 = \u03b1 \u03b3 = \u03b3 \u03b2 = \u03b2 Stop(#) n : 0 Y \u00a1 \u03bd \u2022 [\u03b1 = \u03b3, \u03b2] \u00d8 \u00a1Y # n : 0 \u00d8 \u00a1Y \u2022 # [\u03b1 , \u03b3 , \u03b2 ] \u03b1 = \u03b3 = \u03b1 \u2022 P( Y ) \u03b2 = 1 \u03b2 = P( Y )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Chart", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Each state is composed of a fringe and some additional information which keeps track of where the fringe is located within a path. A chart state can be generally represented as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Chart", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "i : k X \u00a1 \u03bb \u2022 \u00b5 (6)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Chart", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where X \u00a1 \u03bb\u00b5 is the state's fringe, Greek letters are (possibly empty) sequences of words and substitution sites, and \u2022 is a placeholder indicating to which extent the fragment's yield has been consumed: all the elements in the yield preceding the dot have been already accepted. Finally, i and k are indices of words in the input sentence:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Chart", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 i signifies that the current state is introduced after the first i words in the sentence have been scanned. All states in the chart will be grouped according to this index, and will constitute state-set i.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Chart", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u2022 k indicates that the fringe associated with the current state was first introduced in the chart after the first k words in the input sentence had been scanned. The index k is therefore called the start index.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Chart", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For instance when generating the first incremental derivation in Figure 4 , the parser will pass through state 1 : 1 S \u00a1 NP \u2022 \"were\" VP \".\" indicating that the second fringe is introduced right after the parser has scanned the first word in the sentence and before having scanned the second word.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 65, |
| "end": 73, |
| "text": "Figure 4", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Parsing Chart", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We will first introduce the symbolic part of the parsing algorithm, and then discuss its probabilistic component in Section 3.3. In line with the generative process illustrated in Section 2.2, the parser operates on the chart states in order to keep track of all possible ITSG derivations as new words are fed in. It starts by reading the first word 0 and introducing new states to state-set 0 in the chart, those mapping to initial fragments in the grammar with 0 as lexical anchor. At a given stage, after i words have been scanned, the parser reads the next word ( i ) and introduces new states in state-sets i and i + 1 by applying specific operations on states present in the chart, and fringes in the grammar.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Parser Operations The parser begins with the start operation just described, and continues with a cycle of four operations for every word in the input sentence i (for i \u2265 0). The order of the four operations is the following: completion, backward substitution ( ), forward substitution ( ), and scan. When there are no more words in input, the parser terminates with a stop operation. We will now describe the parser operations (see Figure 6 for their formal definition), ignoring the probabilities for now. Start( 0 ): For every initial fringe in the grammar anchored in 0 , the parser inserts a (scan) state for that fringe in the state-set 0.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 433, |
| "end": 441, |
| "text": "Figure 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Parsing Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Backward Substitution( i ) applies to acceptor states, i.e., those with a substitution site following the dot, say X. For each acceptor state in state-set i, and any lex-first fringe in the grammar rooted in X and anchored in i , the parser inserts a (scan) state for that fringe in state-set i.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Forward Substitution( i ) applies to donor states, i.e., those that have no elements following the dot and with start index 0. For each donor state in state-set i, rooted in Y , and any sub-first fringe in the grammar with Y as the left-most element in its yield, the parser inserts a (scan) state for that fringe in state-set i, with the dot placed after Y .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Completion applies to complete states, i.e., those with no elements following the dot and with start index j > 0. For every complete state in state-set i, rooted in Y , with starting index j, and every acceptor state in set j with Y following the dot, the parser inserts a copy of the acceptor state in state-set i, and advances the dot.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Scan( i ) applies to scan states, i.e., those with a word after the dot. For every scan state in state-set i having i after the dot, the parser inserts a copy of that state in state-set (i + 1), and advances the dot.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Stop(#) is a special type of forward substitution and applies to donor states, but only when the input word is the terminal symbol #. For every donor state in state-set n (the length of the sentence), if the root of the fringe's state is Y , the parser introduces a stop state whose fringe is a stop fringe with Y as the left most substitution site.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Comparison with the Earley Algorithm It is useful to clarify the differences between the proposed ITSG parsing algorithm and the original Earley algorithm. Primarily, the ITSG parser is based on a left-right processing order, whereas the Earley algorithm uses a top-down generative process. Moreover, our parser presupposes a restricted inventory of fragments in the grammar (the ones allowed by an ITSG) as opposed to the general CFG rules allowed by the Earley algorithm. In particular, the Backward Substitution operation is more limited than the corresponding Prediction step of the Earley algorithm: only lex-first fragments can be introduced using Backward Substitution, and therefore left recursion (allowed by the Earley algorithm) is not pos-sible here. 6 This restriction is compensated for by the existence of the Forward Substitution operation, which has no analog in the Earley algorithm. 7 The worst case complexity of Earley algorithm is dominated by the Completion operation which is identical to that in our parser, and therefore the original total time complexity applies, i.e., O(l 3 ) for an input sentence of length l, and O(n 3 ) in terms of the number of non-lexical nodes n in the grammar.", |
| "cite_spans": [ |
| { |
| "start": 902, |
| "end": 903, |
| "text": "7", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Derivations Incremental (partial) derivations are represented in the chart as (partial) paths along states. Each state can lead to one or more successors, and come from one or more antecedents. Scan is the only operation which introduces, for every scan state, a new single successor state (which can be of any of the four types) in the following stateset. Complete states may lead to several states within the current state-set, which may belong to any of the four types. An acceptor state may lead to a number of scan states via backward substitution (depending on the number of lex-first fringes that can combine with it). Similarly, a donor state may lead to a number of scan states via forward substitution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "After i words have been scanned, we can retrieve (partial) paths from the chart. This is done in a backward direction starting from scan states in state-set i all the way back to the initial states. This is possible since all the operations are reversible, i.e., given a state it is possible to retrieve its antecedent state(s).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parsing Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "As an example, consider the ITSG grammar consisting of the fragments in Figure 7 and the two derivations of the same parse tree in the same figure; Figure 7 represents the parsing chart of the same grammar, containing the two corresponding paths.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 72, |
| "end": 80, |
| "text": "Figure 7", |
| "ref_id": null |
| }, |
| { |
| "start": 148, |
| "end": 156, |
| "text": "Figure 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Parsing Algorithm", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the probabilistic version of the parser, each fringe in the grammar has a given probability, such that Equations (1)-(3) are satisfied. 8 In the probabilistic chart, every state i : k X \u00a1 \u03bb \u2022 \u00b5 is decorated with three 0 -\"Terms\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Parser", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S | 0 NP \u00a1 \u2022 \"Terms\" [1/2, 1/2, 1] || 0 S \u00a1 \u2022 \"Terms\" S@ [1/2, 1/2, 1]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Parser", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "1 -\"were\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Parser", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S | 0 S \u00a1 NP \u2022 \"were\" V P \".\" [1/2, 1/2, 1] || 1 S@ \u00a1 \u2022 \"were\" V P \".\" [1/2, 1, 1/2] || 0 S \u00a1 \"Terms\" \u2022 S@ [1/2, 1/2, 1] * || S@ \u00a1 \"were\" V P \".\" [1] | 0 NP \u00a1 \"Terms\" \u2022 [1/2, 1/2, 1] | S \u00a1 NP \"were\" V P \".\" [1] 2 -\"disclosed\" S 2 V P \u00a1 \u2022 \"disclosed\" [1, 1, 1] | 0 S \u00a1 NP \"were\" \u2022 V P \".\" [1/2, 1/2, 1] * * || 1 S@ \u00a1 \"were\" \u2022 V P \".\" [1/2, 1, 1/2] * * * V P \u00a1 \"disclosed\" [1]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Parser", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "3 -\".\" Figure 7 : The parsing chart of the two derivations in Figure 4 . Blue states or fringes (also marked with |) are the ones in the first derivation, red (||) in the second, and yellow (no marks) are the ones in common. Each state-set is represented as a separate block in the chart, headed by the state-set index and the next word. Each row maps to a chart operation (specified in the first column, with S and C standing for 'scan' and 'complete' respectively) and follows the same notation of figure 6. Symbols * are used as state placeholders.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 7, |
| "end": 15, |
| "text": "Figure 7", |
| "ref_id": null |
| }, |
| { |
| "start": 62, |
| "end": 70, |
| "text": "Figure 4", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Probabilistic Parser", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "S | 0 S \u00a1 NP \"were\" V P \u2022 \".\" [1/2, 1/2, 1] || 1 S@ \u00a1 \"were\" V P \u2022 \".\" [1/2, 1, 1/2] C 2 V P \u00a1 \"disclosed\" \u2022 [1, 1, 1] | ** || *** 4 -# S 0 \u2205 \u00a1 S \u2022 # [1, 1, 1] || 0 S \u00a1 \"Terms\" S@ \u2022 [1/2, 1/2, 1] | 0 S \u00a1 NP \"were\" V P \".\" \u2022 [1/2, 1/2, 1] \u2205 \u00a1 S # [1] C || 1 S@ \u00a1 \"were\" V P \".\" \u2022 [1/2, 1, 1/2] || *", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Parser", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "probabilities [\u03b1, \u03b3, \u03b2] as shown in the chart example in Figure 7 .", |
| "cite_spans": [ |
| { |
| "start": 14, |
| "end": 23, |
| "text": "[\u03b1, \u03b3, \u03b2]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 57, |
| "end": 65, |
| "text": "Figure 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Probabilistic Parser", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 The forward probability \u03b1 is the marginal probability of all the paths starting with an initial state, scanning all initial words in the sentence until i\u22121 included, and passing through the current state.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Parser", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 The inner probability \u03b3 is the marginal probability of all the paths passing through the state k : k X \u00a1 \u2022\u03bb\u00b5, scanning words k , . . . , i\u22121 and passing through the current state.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Parser", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "\u2022 The outer probability \u03b2 is the marginal probability of all the paths starting with an initial state, scanning all initial words in the sentence until k\u22121 included, passing through the current state, and reaching a stop state.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probabilistic Parser", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Forward (\u03b1) and inner (\u03b3) probabilities are propagated while filling the chart incrementally, whereas outer probabilities (\u03b2) are back-propagated from the stop states, for which \u03b2 = 1 (see Figure 6 ). These probabilities are used for computing prefix and sentence probabilities, and for obtaining the most probable partial derivation (MPD) of a prefix, the MPD of a sentence, its minimum risk parse (MRP), and to approximate its most probable parse (MPP). Prefix probabilities are obtained by summing over the forward probabilities of all scan states in state-set i having i after the dot: 9", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 189, |
| "end": 197, |
| "text": "Figure 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Probabilistic Parser", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P( 0 , . . . , i ) = \u2211 state s i: k X\u00a1\u03bb\u2022 i \u00b5 \u03b1(s)", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Probabilistic Parser", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The Most Probable (partial) Derivation (MPD) can be obtained from the chart by backtracking the Viterbi path. Viterbi forward and inner probabilities (\u03b1 * , \u03b3 * ) are propagated as standard forward and inner probabilities except that summation is replaced by maximization, and the probability of an ambiguous fringe is the maximum probability among all the fragments mapping into it (instead of the marginal one). The Viterbi partial path for the prefix 0 , . . . , i can then be retrieved by backtracking from the scan state in state-set i with max \u03b1 * : for each state, the most probable preceding state is retrieved, i.e., the state among its antecedents with maximum \u03b1 * . The Viterbi complete path of a sentence can be obtained by backtracking the Viterbi path from the stop state with max \u03b1 * . Given a Viterbi path, it is possible to obtain the corresponding MPD. This is done by retrieving the associated sequence of fragments 10 and connecting them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Most Probable Derivation (MPD)", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "According to Equation (5), if we want to compute the MPP we need to retrieve all possible derivations of the current sentence, sum up the probabilities of those generating the same tree, and returning the tree with max marginal probability. Unfortunately the number of possible derivations grows exponentially with the length of the sentence, and computing the exact MPP is NP-hard (Sima'an, 1996) . In our implementation, we approximate the MPP by performing this marginalization over the Viterbi-best derivations obtained from all stop states in the chart.", |
| "cite_spans": [ |
| { |
| "start": 374, |
| "end": 397, |
| "text": "NP-hard (Sima'an, 1996)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Most Probable Parse (MPP)", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "MPD and MPP aim at obtaining the structure of a sentence which is more likely as a whole under the current probabilistic model. Alternatively, we may want to focus on the single components of a tree structures, e.g., CFG rules covering a certain span of the sentence, and search for the structure which has the highest number of correct constituents, as proposed by Goodman (1996) . Such structure is more likely to obtain higher results according to standard parsing evaluations, as the objective being maximized is closely related to the metric used for evaluation (recall/precision on the number of correct labeled constituents).", |
| "cite_spans": [ |
| { |
| "start": 366, |
| "end": 380, |
| "text": "Goodman (1996)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Minimum Risk Parse (MRP)", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "In order to obtain the minimum risk parse (MRP) we utilize both inner (\u03b3) and outer (\u03b2) probabilities. The product of these two probabilities equals the marginal probability of all paths generating the entire current sentence and passing through the current state. We can therefore compute the probability of a fringe f = X \u00a1 \u03bb \u2022 \u00b5 covering a specific span [s,t] of the sentence:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Minimum Risk Parse (MRP)", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P( f , [s,t]) = \u03b3(t : s f \u2022) \u2022 \u03b2(t : s f \u2022)", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Minimum Risk Parse (MRP)", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "We can then compute the probability of each fragment spanning [s,t], 11 and the probability P(r, [s,t]) of a CFG-rule r spanning [s,t].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Minimum Risk Parse (MRP)", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "12 Finally the MRP is computed as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Minimum Risk Parse (MRP)", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "MRP = arg max T \u220f r\u2208T P(r, [s,t])", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Minimum Risk Parse (MRP)", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "4 Experiments", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Minimum Risk Parse (MRP)", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "For training and evaluating the ITSG parser, we employ the Penn WSJ Treebank (Marcus et al., 1993) . We use sections 2-21 for training, section 22 and 24 for development and section 23 for testing.", |
| "cite_spans": [ |
| { |
| "start": 77, |
| "end": 98, |
| "text": "(Marcus et al., 1993)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Minimum Risk Parse (MRP)", |
| "sec_num": "3.6" |
| }, |
| { |
| "text": "Following standard practice, we start with some preprocessing of the treebank. After removing traces and functional tags, we apply right binarization on the training treebank (Klein and Manning, 2003) , with no horizontal and vertical conditioning. This means that when a node X has more than two children, new artificial constituents labeled X@ are created in a right recursive fashion (see Figure 1) . 13 We then replace words appearing less than five times in the training data by one of 50 unknown word categories based on the presence of lexical features as described in Petrov (2009) .", |
| "cite_spans": [ |
| { |
| "start": 175, |
| "end": 200, |
| "text": "(Klein and Manning, 2003)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 404, |
| "end": 406, |
| "text": "13", |
| "ref_id": null |
| }, |
| { |
| "start": 576, |
| "end": 589, |
| "text": "Petrov (2009)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 392, |
| "end": 401, |
| "text": "Figure 1)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Grammar Extraction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Fragment Extraction In order to equip the grammar with a representative set of lexicalized fragments, we use the extraction algorithm of Sangati et al. (2010) which finds maximal fragments recurring twice or more in the training treebank. To ensure better coverage, we additionally extract oneword fragments from each training parse tree: for each lexical node in the parse tree we percolate up till the root node, and for every encountered internal node X 0 , X 1 , . . . , X i we extract the lexicalized fragment whose spine is X i \u2212 X i\u22121 \u2212 . . . \u2212 X 0 \u2212 , and where all the remaining children of the internal nodes are substitution sites (see for instance the right fragment in Figure 1 ). Finally, we remove all fragments which do not comply with the restrictions presented in Section 2.1. 14 For each extracted fragment we keep track of its frequency, i.e., the number of times it occurs in the training corpus. Each fragment's probability is then derived according to its relative frequency in the corresponding set of fragments ( f init , f X lex , f Y sub ), so that equations(1)-(3) are satisfied. The final grammar consists of 2.2M fragments mapping to 2.0M fringes.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 682, |
| "end": 690, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Grammar Extraction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Smoothing Two types of smoothing are performed over the grammar's fragments: Open class smoothing adds simple CFG rewriting rules to the grammar for open-class 15 PoS, word pairs not encountered in the training corpus, with frequency 10 \u22126 . Initial fragments smoothing adds each lex-first fragment f to the initial fragment set with frequency 10 \u22122 \u2022 freq( f ). 16 All ITSG experiments we report used exhaustive search (no beam was used to prune the search space).", |
| "cite_spans": [ |
| { |
| "start": 363, |
| "end": 365, |
| "text": "16", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grammar Extraction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In addition to standard full-sentence parsing results, we propose a novel way of evaluating our ITSG on partial trees, i.e., those that the parser constructs for sentence prefixes. More precisely, for each prefix of the input sentence (length two words or longer) we compute the parsing accuracy on the minimal structure spanning that prefix. The minimal structure is obtained from the subtree rooted in the minimum common ancestor of the prefix nodes, after pruning those nodes not yielding any word in the prefix.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "As observed in the example derivations of Figure 4 , our ITSG generates partial trees for a given prefix which may include predictions about unseen parts of the sentence. We propose three new measures for evaluating sentence prediction: 17 Word prediction PRD(m): For every prefix of each test sentence, if the model predicts m \u2265 m words, the prediction is correct if the first m predicted words are identical to the m words following the prefix in the original sentence.", |
| "cite_spans": [ |
| { |
| "start": 237, |
| "end": 239, |
| "text": "17", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 42, |
| "end": 50, |
| "text": "Figure 4", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Word presence PRS(m): For every prefix of each test sentence, if the model predicts m \u2265 m words, the prediction is correct if the first m predicted words are present, in the same order, in the words following the prefix in the original sentence (i.e., the predicted word sequence is a subsequence of the sequence of words following the prefix). 18 Longest common subsequence LCS: For every prefix of each test sentence, it computes the longest common subsequence between the sequence of predicted words (possibly none) and the words following the prefix in the original sentence.", |
| "cite_spans": [ |
| { |
| "start": 345, |
| "end": 347, |
| "text": "18", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Recall and precision can be computed in the usual way for these three measures. Recall is the total number (over all prefixes) of correctly predicted words (as defined by PRD(m), PRS(m), or LCS) over the total number of words expected to be predicted (according to m), while precision is the number of correctly predicted words over the number of words predicted by the model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We compare the ITSG parser with the incremental parsers of Schuler et al. (2010) and Demberg et al. (2014) for full-sentence parsing, with the Roark (2001) parser 19 for full-sentence and partial pars-R P F1 Demberg et al. (2014) 79.4 79.4 79.4 Schuler et al. (2010) 83.4 83.7 83.5 Roark (2001) 86.6 86.5 86.5 Roark et al. (2009) 87 ing, and with a language model built using SRILM (Stolcke, 2002) for sentence prediction. We used a standard 3-gram model trained on the sentences of the training set using the default setting and smoothing (Kneser-Ney) provided by the SRILM package. (Higher n-gram model do not seem appropriate, given the small size of the training corpus.) For every prefix in the test set we compute the most probable continuation predicted by the n-gram model. 20 Table 1 reports full-sentence parsing results for our parser and three comparable incremental parsers from the literature. While Roark (2001) obtains the best results, the ITSG parser without smoothing performs on a par with Schuler et al. (2010) , and outperforms Demberg et al. (2014) . 21 Adding smoothing results in a gain of 1.2 points F-score over the Schuler parser. When we compare the different parsing objectives of the ITSG parser, MRP is the best one, followed by MPP and MPD. Figure 8 compare the ITSG and Roark's parser on the incremental parsing evaluation, when parsing sentences of length 10, 20, 30 and 40. The performance of both models declines as the length of the prefix increases, with Roark's parser outperforming the ITSG parser on average, although the ITSG parser seems more com- 20 We used a modified version of a script by Nathaniel Smith available at https://github.com/njsmith/pysrilm.", |
| "cite_spans": [ |
| { |
| "start": 59, |
| "end": 80, |
| "text": "Schuler et al. (2010)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 85, |
| "end": 106, |
| "text": "Demberg et al. (2014)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 208, |
| "end": 229, |
| "text": "Demberg et al. (2014)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 245, |
| "end": 266, |
| "text": "Schuler et al. (2010)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 282, |
| "end": 294, |
| "text": "Roark (2001)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 310, |
| "end": 329, |
| "text": "Roark et al. (2009)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 382, |
| "end": 397, |
| "text": "(Stolcke, 2002)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 914, |
| "end": 926, |
| "text": "Roark (2001)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1010, |
| "end": 1031, |
| "text": "Schuler et al. (2010)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1050, |
| "end": 1071, |
| "text": "Demberg et al. (2014)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1592, |
| "end": 1594, |
| "text": "20", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 785, |
| "end": 792, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1274, |
| "end": 1282, |
| "text": "Figure 8", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "21 Note that the scores reported by Demberg et al. (2014) are for TAG structures, not for the original Penn Treebank trees. petitive when parsing prefixes for longer (and therefore more difficult) sentences. Table 2 compares the sentence prediction results of the ITSG and the language model (SRILM). The latter is outperforming the former when predicting the next word of a prefix, i.e. PRD(1), whereas ITSG is better than the language model at predicting a single future word, i.e. PRS(1). When more than one (consecutive) word is considered, the SRILM model exhibits a slightly better recall while ITSG achieves a large gain in precision. This illustrates the complementary nature of the two models: while the language model is better at predicting the next word, the ITSG predicts future words (rarely adjacent to the prefix) with high confidence (89.4% LCS precision). However, it makes predictions for only a small number of words (5.9% LCS recall). Examples of sentence predictions can be found in Table 3 .", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 57, |
| "text": "Demberg et al. (2014)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 208, |
| "end": 215, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1005, |
| "end": 1012, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Incremental Parsing The graphs in", |
| "sec_num": null |
| }, |
| { |
| "text": "To the best of our knowledge, there are no other incremental TSG parsers in the literature. The parser of Demberg et al. (2014) is closely related, but uses tree-adjoining grammar, which includes both substitution and adjunction. That parser makes predictions, but only for upcoming structure, not for upcoming words, and thus cannot be used directly for sentence prediction. The incremental parser of Roark (2001) on the basis of context-free rules. These are augmented with a large number of non-local features (e.g., grandparent categories). Our approach avoids the need for such additional features, as TSG fragments naturally contain non-local information. Roark's parser outperforms ours in both fullsentence and incremental F-score (see Section 4), but cannot be used for sentence prediction straightforwardly: to obtain a prediction for the next word, we would need to compute an argmax over the whole vocabulary, then iterate this for each word after that (the same is true for the parsers of Schuler et al., 2010 and Demberg et al., 2014) . Most incremental dependency parsers use a discriminative model over parse actions (Nivre, 2007) , and therefore cannot predict upcoming words either (but see Huang and Sagae 2010) .", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 127, |
| "text": "Demberg et al. (2014)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 402, |
| "end": 414, |
| "text": "Roark (2001)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1002, |
| "end": 1026, |
| "text": "Schuler et al., 2010 and", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1027, |
| "end": 1048, |
| "text": "Demberg et al., 2014)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1133, |
| "end": 1146, |
| "text": "(Nivre, 2007)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1209, |
| "end": 1230, |
| "text": "Huang and Sagae 2010)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Turning to the literature on sentence prediction, we note that ours is the first attempt to use a parser for this task. Existing approaches either use n-gram models (Eng and Eisner, 2004; Bickel et al., 2005) or a retrieval approach in which the best matching sentence is identified from a sentence collection given a set of features (Grabski and Scheffer, 2004) . There is also work combining n-gram models with lexical semantics (Li and Hirst, 2005) or part-of-speech information (Fazly and Hirst, 2003) .", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 187, |
| "text": "(Eng and Eisner, 2004;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 188, |
| "end": 208, |
| "text": "Bickel et al., 2005)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 334, |
| "end": 362, |
| "text": "(Grabski and Scheffer, 2004)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 431, |
| "end": 451, |
| "text": "(Li and Hirst, 2005)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 482, |
| "end": 505, |
| "text": "(Fazly and Hirst, 2003)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In the language modeling literature, more sophisticated models than simple n-gram models have been developed in the past few years, and these could potentially improve sentence prediction. Examples include syntactic language models which have applied successfully for speech recognition (Chelba and Jelinek, 2000; Xu et al., 2002) and machine translation (Schwartz et al., 2011; Tan et al., 2011) , as well as discriminative language models (Mikolov et al., 2010; Roark et al., 2007) . Future work should evaluate these approaches against the ITSG model proposed here.", |
| "cite_spans": [ |
| { |
| "start": 287, |
| "end": 313, |
| "text": "(Chelba and Jelinek, 2000;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 314, |
| "end": 330, |
| "text": "Xu et al., 2002)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 355, |
| "end": 378, |
| "text": "(Schwartz et al., 2011;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 379, |
| "end": 396, |
| "text": "Tan et al., 2011)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 441, |
| "end": 463, |
| "text": "(Mikolov et al., 2010;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 464, |
| "end": 483, |
| "text": "Roark et al., 2007)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We have presented the first incremental parser for tree substitution grammar. Incrementality is motivated by psycholinguistic findings, and by the need for real-time interpretation in NLP. We have shown that our parser performs competitively on both full sentence and sentence prefix F-score. We also introduced sentence prediction as a new way of evaluating incremental parsers, and demonstrated that our parser outperforms an n-gram model in predicting more than one upcoming word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The performance of our approach is likely to improve by implementing better binarization and more advanced smoothing. Also, our model currently contains no conditioning on lexical information, which is also likely to yield a performance gain. Finally, future work could involve replacing the relative frequency estimator that we use with more sophisticated estimation schemes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "A fringe (state) may occur in multiple derivations (paths): for instance inFigure 4the two derivations will correspond to two separate paths that will converge to the same fringe (state).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "This further simplifies the probabilistic version of our parser, as there is no need to resort to the probabilistic reflexive, transitive left-corner relation described byStolcke (1995).7 This operation would violate Earley's top-down constraint; donor states are in fact the terminal states in Earley algorithm.8 The probability of an ambiguous fringe is the marginal probability of the fragments mapping to it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Sentence probability is obtained by marginalizing the forward probabilities of the stop states in the last state-set n.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For each scan state in the path, we obtain the fragment in the grammar that maps into the state's fringe. For ambiguous fringes the most probable fragment that maps into it is selected.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For an ambiguous fringe, the spanning probability of each fragment mapping into it is the fraction of the fringe's spanning probability with respect to the marginal fringe probability.12 Marginalizing the probabilities of all fragments having r spanning[s,t].13 This shallow binarization (H0V1) was used based on gold coverage of the unsmoothed grammar (extracted from the training set) on trees in section 22: H0V1 binarization results on a coverage of 88.0% of the trees, compared to 79.2% for H1V1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The fragment with no lexical items, and those with more than one substitution site at the beginning of the yield.15 A PoS belongs to the open class if it rewrites to at least 50 different words in the training corpus. A word belongs to the open class if it has been seen only with open-class PoS tags.16 The parameters were tuned on section 24 of the WSJ.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We also evaluated our ITSG model using perplexity; the results obtained were substantially worse than those obtained using Roark's parsers.18 Note that neither PRD(m) nor PRS(m) correspond to word error rate (WER). PRD requires the predicted word sequence to be identical to the original sequence, while PRS only requires the predicted words to be present in the original. In contrast, WER measures the minimum number of substitutions, insertions, and deletions needed to transform the predicted sequence into the original sequence.19 Apart from reporting the results inRoark (2001), we also run the latest version ofRoark's parser, used in Roark et al. (2009), which has higher results compared to the original work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was funded by EPSRC grant EP/I032916/1 \"An integrated model of syntactic and semantic prediction in human language processing\". We are grateful to Brian Roark for clarifying correspondence and for guidance in using his incremental parser. We would also like to thank Katja Abramova, Vera Demberg, Mirella Lapata, Andreas van Cranenburgh, and three anonymous reviewers for useful comments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "The theory of parsing, translation, and compiling", |
| "authors": [ |
| { |
| "first": "Alfred", |
| "middle": [ |
| "V" |
| ], |
| "last": "Aho", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [ |
| "D" |
| ], |
| "last": "Ullman", |
| "suffix": "" |
| } |
| ], |
| "year": 1972, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alfred V. Aho and Jeffrey D. Ullman. 1972. The theory of parsing, translation, and compiling. Prentice-Hall, Inc., Upper Saddle River, NJ.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Incremental interpretation at verbs: Restricting the domain of subsequent reference", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "M" |
| ], |
| "last": "Gerry", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuki", |
| "middle": [], |
| "last": "Altmann", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kamide", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Cognition", |
| "volume": "73", |
| "issue": "", |
| "pages": "247--264", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gerry T. M. Altmann and Yuki Kamide. 1999. Incre- mental interpretation at verbs: Restricting the do- main of subsequent reference. Cognition, 73:247- 264.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Predicting sentences using n-gram language models", |
| "authors": [ |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Bickel", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Haider", |
| "suffix": "" |
| }, |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "Scheffer", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the Conference on Human Language Technology and Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "193--200", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steffen Bickel, Peter Haider, and Tobias Scheffer. 2005. Predicting sentences using n-gram lan- guage models. In Proceedings of the Conference on Human Language Technology and Empirical Methods in Natural Language Processing, pages 193-200. Vancouver.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The problem of computing the most probable tree in data-oriented parsing and stochastic tree grammars", |
| "authors": [ |
| { |
| "first": "Rens", |
| "middle": [], |
| "last": "Bod", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Proceedings of the 7th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "104--111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rens Bod. 1995. The problem of computing the most probable tree in data-oriented parsing and stochastic tree grammars. In Proceedings of the 7th Conference of the European Chapter of the Association for Computational Linguistics, pages 104-111. Association for Computer Linguistics, Dublin.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Data-Oriented Parsing", |
| "authors": [ |
| { |
| "first": "Rens", |
| "middle": [], |
| "last": "Bod", |
| "suffix": "" |
| }, |
| { |
| "first": "Khalil", |
| "middle": [], |
| "last": "Sima'an", |
| "suffix": "" |
| }, |
| { |
| "first": "Remko", |
| "middle": [], |
| "last": "Scha", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rens Bod, Khalil Sima'an, and Remko Scha. 2003. Data-Oriented Parsing. University of Chicago Press, Chicago, IL.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Structured language modeling", |
| "authors": [ |
| { |
| "first": "Ciprian", |
| "middle": [], |
| "last": "Chelba", |
| "suffix": "" |
| }, |
| { |
| "first": "Frederick", |
| "middle": [], |
| "last": "Jelinek", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Computer Speech and Language", |
| "volume": "14", |
| "issue": "", |
| "pages": "283--332", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ciprian Chelba and Frederick Jelinek. 2000. Struc- tured language modeling. Computer Speech and Language, 14:283-332.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Data from eye-tracking corpora as evidence for theories of syntactic processing complexity", |
| "authors": [ |
| { |
| "first": "Vera", |
| "middle": [], |
| "last": "Demberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Keller", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Cognition", |
| "volume": "101", |
| "issue": "2", |
| "pages": "193--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vera Demberg and Frank Keller. 2008. Data from eye-tracking corpora as evidence for theories of syntactic processing complexity. Cognition, 101(2):193-210.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Parsing with psycholinguistically motivated tree-adjoining grammar", |
| "authors": [ |
| { |
| "first": "Vera", |
| "middle": [], |
| "last": "Demberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Keller", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Koller", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Computational Linguistics", |
| "volume": "", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vera Demberg, Frank Keller, and Alexander Koller. 2014. Parsing with psycholinguistically moti- vated tree-adjoining grammar. Computational Linguistics, 40(1). In press.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "An efficient context-free parsing algorithm", |
| "authors": [ |
| { |
| "first": "Jay", |
| "middle": [], |
| "last": "Earley", |
| "suffix": "" |
| } |
| ], |
| "year": 1970, |
| "venue": "Communications of the ACM", |
| "volume": "13", |
| "issue": "2", |
| "pages": "94--102", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jay Earley. 1970. An efficient context-free pars- ing algorithm. Communications of the ACM, 13(2):94-102.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Radiology report entry with automatic phrase completion driven by language modeling", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Eng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [ |
| "M" |
| ], |
| "last": "Eisner", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Radiographics", |
| "volume": "24", |
| "issue": "5", |
| "pages": "1493--1501", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Eng and Jason M. Eisner. 2004. Radiology report entry with automatic phrase completion driven by language modeling. Radiographics, 24(5):1493-1501.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Testing the efficacy of part-of-speech information in word completion", |
| "authors": [ |
| { |
| "first": "Afsaneh", |
| "middle": [], |
| "last": "Fazly", |
| "suffix": "" |
| }, |
| { |
| "first": "Graeme", |
| "middle": [], |
| "last": "Hirst", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the EACL Workshop on Language Modeling for Text Entry Methods", |
| "volume": "", |
| "issue": "", |
| "pages": "9--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Afsaneh Fazly and Graeme Hirst. 2003. Testing the efficacy of part-of-speech information in word completion. In Proceedings of the EACL Work- shop on Language Modeling for Text Entry Meth- ods, pages 9-16. Budapest.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Parsing algorithms and metrics", |
| "authors": [ |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of the 34th Annual Meeting on Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "177--183", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joshua Goodman. 1996. Parsing algorithms and metrics. In Proceedings of the 34th Annual Meet- ing on Association for Computational Linguistics, pages 177-183. Association for Computational Linguistics, Santa Cruz.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Sentence completion", |
| "authors": [ |
| { |
| "first": "Korinna", |
| "middle": [], |
| "last": "Grabski", |
| "suffix": "" |
| }, |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "Scheffer", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 27th Annual International ACM SIR Conference on Research and Development in Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "433--439", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Korinna Grabski and Tobias Scheffer. 2004. Sen- tence completion. In Proceedings of the 27th An- nual International ACM SIR Conference on Re- search and Development in Information Retrieval, pages 433-439. Sheffield.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Dynamic programming for linear-time incremental parsing", |
| "authors": [ |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenji", |
| "middle": [], |
| "last": "Sagae", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1077--1086", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liang Huang and Kenji Sagae. 2010. Dynamic pro- gramming for linear-time incremental parsing. In Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics, pages 1077-1086. Association for Computational Lin- guistics, Uppsala.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Accurate unlexicalized parsing", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 41st Annual Meeting on Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "423--430", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Klein and Christopher D. Manning. 2003. Ac- curate unlexicalized parsing. In Proceedings of the 41st Annual Meeting on Association for Com- putational Linguistics, pages 423-430. Associa- tion for Computational Linguistics, Sapporo.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Semantic knowledge in a word completion task", |
| "authors": [ |
| { |
| "first": "Jianhua", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Graeme", |
| "middle": [], |
| "last": "Hirst", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 7th International ACM SIGAC-CESS Conference on Computers and Accessibility", |
| "volume": "", |
| "issue": "", |
| "pages": "121--128", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jianhua Li and Graeme Hirst. 2005. Semantic knowledge in a word completion task. In Pro- ceedings of the 7th International ACM SIGAC- CESS Conference on Computers and Accessibil- ity, pages 121-128. Baltimore.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Building a large annotated corpus of english: The penn treebank", |
| "authors": [ |
| { |
| "first": "Mitchell", |
| "middle": [ |
| "P" |
| ], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [ |
| "Ann" |
| ], |
| "last": "Marcinkiewicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Beatrice", |
| "middle": [], |
| "last": "Santorini", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational Linguistics", |
| "volume": "19", |
| "issue": "2", |
| "pages": "313--330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mitchell P. Marcus, Mary Ann Marcinkiewicz, and Beatrice Santorini. 1993. Building a large anno- tated corpus of english: The penn treebank. Com- putational Linguistics, 19(2):313-330.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Recurrent neural network based language model", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Karafiat", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 11th", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Martin Karafiat, Jan Cernocky, and Sanjeev. 2010. Recurrent neural network based language model. In Proceedings of the 11th", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Annual Conference of the International Speech Communication Association", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "2877--2880", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Annual Conference of the International Speech Communication Association, pages 2877-2880. Florence.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Incremental non-projective dependency parsing", |
| "authors": [ |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of Human Language Technologies: The Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "396--403", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joakim Nivre. 2007. Incremental non-projective dependency parsing. In Proceedings of Human Language Technologies: The Annual Conference of the North American Chapter of the Associa- tion for Computational Linguistics, pages 396- 403. Association for Computational Linguistics, Rochester.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Coarse-to-Fine Natural Language Processing", |
| "authors": [ |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Slav Petrov. 2009. Coarse-to-Fine Natural Lan- guage Processing. Ph.D. thesis, University of California at Bekeley, Berkeley, CA.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Probabilistic top-down parsing and language modeling", |
| "authors": [ |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Roark", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Computational Linguististics", |
| "volume": "27", |
| "issue": "", |
| "pages": "249--276", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian Roark. 2001. Probabilistic top-down parsing and language modeling. Computational Linguis- tistics, 27:249-276.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Deriving lexical and syntactic expectation-based measures for psycholinguistic modeling via incremental top-down parsing", |
| "authors": [ |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Roark", |
| "suffix": "" |
| }, |
| { |
| "first": "Asaf", |
| "middle": [], |
| "last": "Bachrach", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Cardenas", |
| "suffix": "" |
| }, |
| { |
| "first": "Christophe", |
| "middle": [], |
| "last": "Pallier", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "324--333", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian Roark, Asaf Bachrach, Carlos Cardenas, and Christophe Pallier. 2009. Deriving lexical and syntactic expectation-based measures for psy- cholinguistic modeling via incremental top-down parsing. In Proceedings of the Conference on Em- pirical Methods in Natural Language Processing, pages 324-333. Association for Computational Linguistics, Singapore.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Discriminative n-gram language modeling", |
| "authors": [ |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Roark", |
| "suffix": "" |
| }, |
| { |
| "first": "Murat", |
| "middle": [], |
| "last": "Saraclar", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Computer Speech and Language", |
| "volume": "21", |
| "issue": "2", |
| "pages": "373--392", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian Roark, Murat Saraclar, and Michael Collins. 2007. Discriminative n-gram language modeling. Computer Speech and Language, 21(2):373-392.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Deterministic left corner parsing", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "J" |
| ], |
| "last": "Rosenkrantz", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "M" |
| ], |
| "last": "Lewis", |
| "suffix": "" |
| } |
| ], |
| "year": 1970, |
| "venue": "Proceedings of the 11th Annual Symposium on Switching and Automata Theory", |
| "volume": "", |
| "issue": "", |
| "pages": "139--152", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. J. Rosenkrantz and P. M. Lewis. 1970. Deter- ministic left corner parsing. In Proceedings of the 11th Annual Symposium on Switching and Au- tomata Theory, pages 139-152. IEEE Computer Society, Washington, DC.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Efficiently extract recurring tree fragments from large treebanks", |
| "authors": [ |
| { |
| "first": "Federico", |
| "middle": [], |
| "last": "Sangati", |
| "suffix": "" |
| }, |
| { |
| "first": "Willem", |
| "middle": [], |
| "last": "Zuidema", |
| "suffix": "" |
| }, |
| { |
| "first": "Rens", |
| "middle": [], |
| "last": "Bod", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 7th InternationalConference on Language Resources and Evaluation. European Language Resources Association", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Federico Sangati, Willem Zuidema, and Rens Bod. 2010. Efficiently extract recurring tree fragments from large treebanks. In Nicoletta Calzolari, Khalid Choukri, Bente Maegaard, Joseph Mar- iani, Jan Odijk, Stelios Piperidis, Mike Rosner, and Daniel Tapias, editors, Proceedings of the 7th InternationalConference on Language Resources and Evaluation. European Language Resources Association, Valletta, Malta.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Mathematical and Computational Aspects of Lexicalized Grammars", |
| "authors": [ |
| { |
| "first": "Yves", |
| "middle": [], |
| "last": "Schabes", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yves Schabes. 1990. Mathematical and Computa- tional Aspects of Lexicalized Grammars. Ph.D. thesis, University of Pennsylvania, Philadelphia, PA.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Broad-coverage parsing using human-like memory constraints", |
| "authors": [ |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Schuler", |
| "suffix": "" |
| }, |
| { |
| "first": "Samir", |
| "middle": [], |
| "last": "Abdelrahman", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Lane", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Computational Linguististics", |
| "volume": "36", |
| "issue": "1", |
| "pages": "1--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William Schuler, Samir AbdelRahman, Tim Miller, and Lane Schwartz. 2010. Broad-coverage pars- ing using human-like memory constraints. Com- putational Linguististics, 36(1):1-30.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Incremental syntactic language models for phrase-based translation", |
| "authors": [ |
| { |
| "first": "Lane", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Schuler", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "620--631", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lane Schwartz, Chris Callison-Burch, William Schuler, and Stephen Wu. 2011. Incremental syn- tactic language models for phrase-based transla- tion. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, Volume 1, pages 620-631. Association for Computational Linguis- tics, Portland, OR.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Computational complexity of probabilistic disambiguation by means of treegrammars", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Khalil Sima'an", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of the 16th Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1175--1180", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Khalil Sima'an. 1996. Computational complexity of probabilistic disambiguation by means of tree- grammars. In Proceedings of the 16th Confer- ence on Computational Linguistics, pages 1175- 1180. Association for Computational Linguistics, Copenhagen.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "An efficient probabilistic context-free parsing algorithm that computes prefix probabilities", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Computational Linguistics", |
| "volume": "21", |
| "issue": "2", |
| "pages": "165--201", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Stolcke. 1995. An efficient probabilis- tic context-free parsing algorithm that computes prefix probabilities. Computational Linguistics, 21(2):165-201.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "SRILM -an extensible language modeling toolkit", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Stolcke", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings International Conference on Spoken Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "257--286", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Stolcke. 2002. SRILM -an extensible lan- guage modeling toolkit. In Proceedings Interna- tional Conference on Spoken Language Process- ing, pages 257-286. Denver, CO.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Incremental parsing with reference interaction", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Scott", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Stoness", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Tetreault", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Allen", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the ACL Workshop Incremental Parsing: Bringing Engineering and Cognition Together", |
| "volume": "", |
| "issue": "", |
| "pages": "18--25", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Scott C. Stoness, Joel Tetreault, and James Allen. 2004. Incremental parsing with reference inter- action. In Frank Keller, Stephen Clark, Matthew Crocker, and Mark Steedman, editors, Proceed- ings of the ACL Workshop Incremental Parsing: Bringing Engineering and Cognition Together, pages 18-25. Association for Computational Lin- guistics, Barcelona.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "A large scale distributed syntactic, semantic and lexical language model for machine translation", |
| "authors": [ |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenli", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaojun", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "201--210", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ming Tan, Wenli Zhou, Lei Zheng, and Shaojun Wang. 2011. A large scale distributed syntac- tic, semantic and lexical language model for ma- chine translation. In Proceedings of the 49th Annual Meeting of the Association for Compu- tational Linguistics: Human Language Technolo- gies, Volume 1, pages 201-210. Association for Computational Linguistics, Portland, OR.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Integration of visual and linguistic information in spoken language comprehension", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [ |
| "K" |
| ], |
| "last": "Tanenhaus", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "J" |
| ], |
| "last": "Spivey-Knowlton", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathleen", |
| "middle": [ |
| "M" |
| ], |
| "last": "Eberhard", |
| "suffix": "" |
| }, |
| { |
| "first": "Julie", |
| "middle": [ |
| "C" |
| ], |
| "last": "Sedivy", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Science", |
| "volume": "268", |
| "issue": "", |
| "pages": "1632--1634", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael K. Tanenhaus, Michael J. Spivey- Knowlton, Kathleen M. Eberhard, and Julie C. Sedivy. 1995. Integration of visual and linguistic information in spoken language comprehension. Science, 268:1632-1634.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "A study on richer syntactic dependencies for structured language modeling", |
| "authors": [ |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ciprian", |
| "middle": [], |
| "last": "Chelba", |
| "suffix": "" |
| }, |
| { |
| "first": "Frederick", |
| "middle": [], |
| "last": "Jelinek", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "191--198", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peng Xu, Ciprian Chelba, and Frederick Jelinek. 2002. A study on richer syntactic dependencies for structured language modeling. In Proceedings of the 40th Annual Meeting on Association for Computational Linguistics, pages 191-198. As- sociation for Computational Linguistics, Philadel- phia.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "An example of a binarized 2 parse tree and a lexicalized fragment extracted from it.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "text": "An example of an ITSG derivation yielding the tree on the left side ofFigure 1. The second and third fragment are introduced by means of forward and backward substitution, respectively.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "text": "Above: an example of a set of fragments extracted from the tree inFigure 1. Below: two incremental derivations that generate it. Colors (and lines strokes) indicate which derivation fragments belong to.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF3": { |
| "text": "Left: an example of a CFG with left recursion. Right: one of the structures the CFG can generate.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF4": { |
| "text": "Partial parsing results for sentences of length 10, 20, 30, and 40 (from upper left to lower right).", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF3": { |
| "html": null, |
| "text": "Examples comparing sentence predictions for ITSG and SRILM (UNK: unknown word).", |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |