| { |
| "paper_id": "K15-2003", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:08:49.726334Z" |
| }, |
| "title": "The UniTN Discourse Parser in CoNLL 2015 Shared Task: Token-level Sequence Labeling with Argument-specific Models", |
| "authors": [ |
| { |
| "first": "Evgeny", |
| "middle": [ |
| "A" |
| ], |
| "last": "Stepanov", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "settlement": "Trento", |
| "region": "TN", |
| "country": "Italy" |
| } |
| }, |
| "email": "stepanov@disi.unitn.it" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Riccardi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "settlement": "Trento", |
| "region": "TN", |
| "country": "Italy" |
| } |
| }, |
| "email": "riccardi@disi.unitn.it" |
| }, |
| { |
| "first": "Ali", |
| "middle": [ |
| "Orkan" |
| ], |
| "last": "Bayer", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "settlement": "Trento", |
| "region": "TN", |
| "country": "Italy" |
| } |
| }, |
| "email": "bayer@disi.unitn.it" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Penn Discourse Treebank style discourse parsing is a composite task of identifying discourse relations (explicit or nonexplicit), their connective and argument spans, and assigning a sense to these relations from the hierarchy of senses. In this paper we describe University of Trento parser submitted to CoNLL 2015 Shared Task on Shallow Discourse Parsing. The span detection tasks for explicit relations are cast as token-level sequence labeling. The argument span decisions are conditioned on relations' being intra-or intersentential. Non-explicit relation detection and sense assignment tasks are cast as classification. In the end-to-end closedtrack evaluation, the parser ranked second with a global F-measure of 0.2184", |
| "pdf_parse": { |
| "paper_id": "K15-2003", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Penn Discourse Treebank style discourse parsing is a composite task of identifying discourse relations (explicit or nonexplicit), their connective and argument spans, and assigning a sense to these relations from the hierarchy of senses. In this paper we describe University of Trento parser submitted to CoNLL 2015 Shared Task on Shallow Discourse Parsing. The span detection tasks for explicit relations are cast as token-level sequence labeling. The argument span decisions are conditioned on relations' being intra-or intersentential. Non-explicit relation detection and sense assignment tasks are cast as classification. In the end-to-end closedtrack evaluation, the parser ranked second with a global F-measure of 0.2184", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Discourse parsing is a challenging Natural Language Processing (NLP) task that has utility for many other NLP tasks such as summarization, opinion mining, etc. (Webber et al., 2011) . With the release of Penn Discourse Treebank (PDTB) (Prasad et al., 2008) , the researchers have developed discourse parsers for all (e.g. (Lin et al., 2014) or some (e.g. (Ghosh et al., 2011) ) discourse relation types in the PDTB definition, or addressed particular discourse parsing subtasks .", |
| "cite_spans": [ |
| { |
| "start": 160, |
| "end": 181, |
| "text": "(Webber et al., 2011)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 235, |
| "end": 256, |
| "text": "(Prasad et al., 2008)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 322, |
| "end": 340, |
| "text": "(Lin et al., 2014)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 355, |
| "end": 375, |
| "text": "(Ghosh et al., 2011)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "PDTB adopts non-hierarchical binary view on discourse relations: a discourse connective and its two arguments -Argument 1 and Argument 2, which is syntactically attached to the connective. And, a relation is assigned particular sense from the sense hierarchy. It was identified that parsing Explicit discourse relations, that are signaled by a presence of a discourse connective (a closed class), is much easier task than detection and classification of Implicit discourse relations, where a discourse connective is implied, rather than lexically realized. Since Explicit and Implicit discourse relations in a document do not differ much in relative frequency, the low performance on one of the relation types limits the utility of discourse parsing for downstream applications.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper we describe the University of Trento discourse parser for both explicit and nonexplicit -implicit, alternatively lexicalized (Al-tLex), and entity (EntRel) relations -that was submitted to the CoNLL 2015 Shared Task on Shallow Discourse Parsing (Xue et al., 2015) and ranked 2nd. The parser makes use of tokenlevel sequence labeling with Conditional Random Fields (Lafferty et al., 2001 ) for identification of connective and argument spans; and classification for identification of relation senses and argument configurations.", |
| "cite_spans": [ |
| { |
| "start": 259, |
| "end": 277, |
| "text": "(Xue et al., 2015)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 378, |
| "end": 400, |
| "text": "(Lafferty et al., 2001", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The parser architecture is described in Section 2. The features and individual model details are described in Sections 3 and 4, respectively. In Section 5 we describe official evaluation results. Section 6 discusses the lessons learned from the shared task and provides concluding remarks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The discourse parser submitted for the CoNLL 2015 Shared Task is the extension of the parser described in (Stepanov and Riccardi, 2013; Stepanov and Riccardi, 2014) . The overall architecture of the parser is depicted in Figure 1 . The approach structures discourse parsing into a pipeline of several subtasks, mimicking the Penn Discourse Treebank (PDTB) (Prasad et al., 2008) annotation procedure as in (Lin et al., 2014) .", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 135, |
| "text": "(Stepanov and Riccardi, 2013;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 136, |
| "end": 164, |
| "text": "Stepanov and Riccardi, 2014)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 356, |
| "end": 377, |
| "text": "(Prasad et al., 2008)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 405, |
| "end": 423, |
| "text": "(Lin et al., 2014)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 221, |
| "end": 229, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "System Architecture", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The first step is Discourse Connective Detection (DCD) that identifies explicit discourse connectives and their spans. Then Connective Sense Classification (CSC) is used to classify these con- Figure 1 : Discourse parser architecture. CRF modules are in bold; classification modules are in italic.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 193, |
| "end": 201, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "System Architecture", |
| "sec_num": "2" |
| }, |
| { |
| "text": "nectives into the PDTB hierarchy of senses; and Argument Position Classification (APC) to classify the connectives as requiring their Argument 1 in the previous (PS) or the same sentence as Argument 2 (i.e. classify relations as inter-and intrasentential). With respect to the decision of the step an Argument Span Extraction (ASE) model is applied to label the spans of both arguments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Architecture", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Separate Argument Span Extraction models are trained for each of the arguments of intra-and inter-sentential explicit discourse relations. Identification of Argument 2 is much easier, since it is the argument syntactically attached to the discourse connective. Thus, for the intra-sentential (SS) relations, models are applied in a cascade such that the output of Argument 2 span extraction in the input for Argument 1 span extraction. For the inter-sentential (PS) relations, a sentence containing the connective is selected as Argument 2, and the sentence immediately preceding it as a candidate for Argument 1. Even though in 9% of all inter-sentential relations Argument 1 is located in non-adjacent previous sentence (Prasad et al., 2008) , this heuristic is widely used (Lin et al., 2014; Stepanov and Riccardi, 2013) , and is known as Previous Sentence Heuristic.", |
| "cite_spans": [ |
| { |
| "start": 722, |
| "end": 743, |
| "text": "(Prasad et al., 2008)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 776, |
| "end": 794, |
| "text": "(Lin et al., 2014;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 795, |
| "end": 823, |
| "text": "Stepanov and Riccardi, 2013)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Architecture", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In PDTB, the Non-Explicit discourse relations -Implicit, AltLex, and EntRel -are annotated for pairs of adjacent sentences except the pairs that were already annotated as explicit discourse relations (Prasad et al., 2007) . Thus, in the Non-Explicit Pair Generation (NPG) step a list of adjacent sentence pairs is generated omitting the inter-sentential explicit relations identified in the APC step. In the Non-Explicit Relation Detection (NRD) step the candidate pairs are classified as holding a relation or not. The pairs identified as a relation are then classified into relation senses in the Relation Sense Classification (RSC) step.", |
| "cite_spans": [ |
| { |
| "start": 200, |
| "end": 221, |
| "text": "(Prasad et al., 2007)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Architecture", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Since the goal of Discourse Connective Detection and Argument Span Extraction tasks is to label the spans of a connective and its arguments, they are cast as token-level sequence labeling with CRFs using CRF++ (Kudo, 2013) . The Non-Explicit Relation Detection and Sense and Argument Position classification tasks are cast as supervised classification using AdaBoost algorithm (Freund and Schapire, 1997) implemented in icsiboost (Favre et al., 2007) . In Section 3 we describe the features used for token-level sequence labeling and classification tasks; and in Section 4 models for each of the subtasks in more detail.", |
| "cite_spans": [ |
| { |
| "start": 210, |
| "end": 222, |
| "text": "(Kudo, 2013)", |
| "ref_id": null |
| }, |
| { |
| "start": 377, |
| "end": 404, |
| "text": "(Freund and Schapire, 1997)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 430, |
| "end": 450, |
| "text": "(Favre et al., 2007)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Architecture", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Besides tokens, the PDTB corpus distributed to the participants contains Part-of-Speech tags, constituency and dependency parses. These resources are used to extract and generate both token-level and argument/relation-level features. Additionally, for argument/relation-level features Brown Clusters (Turian et al., 2010) are used.", |
| "cite_spans": [ |
| { |
| "start": 300, |
| "end": 321, |
| "text": "(Turian et al., 2010)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Discourse Connective Detection and Argument Span Extraction tasks of discourse parsing are cast as token-level sequence labeling with CRFs. The list of features used for the models is given in Table 1. Besides tokens and POS-tags, the rest of the features is described below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token-level Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Chunk-tag is the syntactic chunk prefixed with the information whether a token is at the beginning (B-), inside (I-) or outside (O) of the constituent (i.e. IOB format) (e.g. 'B-NP' indicates that a token is at the beginning of Noun Phrase Feature DCD ASE: SS ASE: chunk). The information is extracted from constituency parse trees using chunklink script (Buchholz, 2000) . IOB-chain is the path string of the syntactic tree nodes from the root node to the token, similar to Chunk-tag, it is prefixed with the IOB information. For example, the IOB-chain 'I-S/B-VP' indicates that a token is the first word of the verb phrase (B-VP) of the main clause (I-S).The feature is also extracted using the chunklink script (Buchholz, 2000) .", |
| "cite_spans": [ |
| { |
| "start": 355, |
| "end": 371, |
| "text": "(Buchholz, 2000)", |
| "ref_id": null |
| }, |
| { |
| "start": 714, |
| "end": 730, |
| "text": "(Buchholz, 2000)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token-level Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "PS A1 A2 A1 A2 Token Y Y Y Y Y POS-tag Y Y Y Y Chunk-tag Y IOB-chain Y Y Y Y Y Dependency chain Y Y Connective Head Y Connective Label Y Y Y Argument 2 Label Y", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token-level Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Dependency chain is a feature inspired by IOBchain and is the path string of the functions of the parents of a token, starting from root of a dependency parse. For example, the dependency chain 'root/nsubj/det' indicates that a token is a determiner of the subject of a sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token-level Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Connective Head is a binary feature that indicates whether a token is in the list of 100 PDTB discourse connectives. For example, all 'and' tokens will have this feature value '1'.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token-level Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Connective Label and Argument 2 Label are the output labels of the Discourse Connective Detection and Argument 2 Span Extraction models respectively. The outputs are the IOB-tagged strings 'CONN' and 'ARG2'. Using these labels as features for Argument Span Extraction is useful for constraining the search space, since the Connective, Argument 1 and Argument 2 spans are not supposed to overlap.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token-level Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Besides the features mentioned above, we have experimented with other token-level features: (1) morphological: lemma and inflection; (2) dependency: main verb of a sentence (i.e. root of the dependency parse) as a string and binary feature;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token-level Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "(3) Connective Head as string. Even though previous work on discourse parsing (e.g. (Ghosh et al., 2011; Stepanov and Riccardi, 2013) found these features useful in token-level sequence labeling approach to Argument Span Extraction using gold parse trees, they were excluded from the submitted models since in greedy hill climbing their contributions were negative.", |
| "cite_spans": [ |
| { |
| "start": 84, |
| "end": 104, |
| "text": "(Ghosh et al., 2011;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 105, |
| "end": 133, |
| "text": "Stepanov and Riccardi, 2013)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token-level Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Using templates of CRF++ the token-level features are enriched with ngrams (2 & 3-grams) in the window of \u00b12 tokens. That is, for each token there are 12 features per feature type: 5 unigrams, 4 bigrams and 3 trigrams. All features are conditioned on the output label independently of each other. Additionally, CRFs consider the previous token's output label as a feature.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Token-level Features", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In this section we describe features used for detecting non-explicit discourse relations and their sense classification. Since in these tasks the unit of classification is a relation rather than token, these features are extracted per argument of a relation and a relation as a whole.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument & Relation-level Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Previous work on the topic makes use of wide range of features ranging from first and last tokens of arguments to a Cartesian product of all tokens in both arguments, which leads to a very sparse feature set. To reduce the sparseness in (Rutherford and Xue, 2014) the authors map the tokens to Brown Clusters (Turian et al., 2010) and improve the classification into top-level senses.", |
| "cite_spans": [ |
| { |
| "start": 309, |
| "end": 330, |
| "text": "(Turian et al., 2010)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument & Relation-level Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Inspired by the previous research, we have experimented with the following features that are extracted from both arguments:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument & Relation-level Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "1. Bag-of-Words;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument & Relation-level Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "2. Bag-of-Words prefixed with the argument ID (Arg1 or Arg2);", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument & Relation-level Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "3. Cartesian product of all the tokens from both arguments;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument & Relation-level Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "4. Set of unique pairs from Cartesian product of Brown Clusters of all the tokens from both arguments (inspired by (Rutherford and Xue, 2014));", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument & Relation-level Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "5. First, last, and first 3 words of each argument (from ; Rutherford and Xue, 2014));", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument & Relation-level Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "6. Predicate, subject (both passive and active), direct and indirect objects, extracted from dependency parses (8 features); 7. Ternary features for pairs from 6 to indicate matches (1, 0) or NULL, if one of the arguments misses the feature (extension of 'similar subjects or main predicates' feature of (Rutherford and Xue, 2014)) (16 features);", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument & Relation-level Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "8. Cartesian product of Brown Clusters of 6 (16 features);", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument & Relation-level Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "These features are used for Non-Explicit Discourse Relation Detection and Sense Classification tasks, which are described in the next section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument & Relation-level Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In this section we describe individual discourse parsing subtasks discussing features and models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discourse Parsing Components", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Discourse Connective Detection is the first step in discourse parsing. The CRF model makes use of all the features in Table 3 (except Connective Label -its own output -and Argument 2 Labelthe output of downstream component). Using just cased token features (i.e. 1, 2, 3-grams in the window of \u00b12 tokens already has F-measure above 0.85. Adding other features gradually increases the performance on the development set to 0.9379. Other than the token itself, the feature that contributes the most to the performance is IOB-chain.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 118, |
| "end": 125, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discourse Connective Detection", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Connective Sense Classification takes the output of Discourse Connective Detection and classifies identified connectives into the hierarchy of PDTB senses. We have experimented with two approaches: (1) flat -directly classifying into full spectrum of senses including class, type and subtype (Prasad et al., 2008) ; and (2) hierarchicalfirst classifying into 4 top level senses (Comparison, Contingency, Expansion and Temporal) and then into the rest of the levels. For the purposes of the Shared Task partial senses (e.g. just class) were disallowed; thus, for the flat classification, instances having partial senses were removed from both training and development sets.", |
| "cite_spans": [ |
| { |
| "start": 292, |
| "end": 313, |
| "text": "(Prasad et al., 2008)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Connective Sense Classification", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The flat classification into 14 senses using just cased token strings as bag-of-words yields the best performance and has accuracy of 0.8968 on the filtered development set using gold connective spans. The 4-way classification into top-level senses on a full development set using just connective tokens has accuracy of 0.9426. Adding POStags increases accuracy to 0.9456. Due to the error propagation, going to the second level of the hierarchy drops the performance slightly below the flat classification. None of the other features listed in Table 1 has a positive effect on classification. Adding argument spans lowered the performance as well.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 545, |
| "end": 552, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Connective Sense Classification", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Argument Position Classification is an easy task, since explicit discourse connectives have a strong preference on the positions of its arguments, depending on whether they appear at the beginning or in the middle of a sentence. In the literature the task was reported as having a very high baseline (e.g. (Stepanov and Riccardi, 2013), 95% for whole PDTB). The features used for classification are cased connective token string (case here carries the information about connective's position in the sentence), POS-tags and IOB-chains. The accuracy on the development set given gold connective spans is 0.9868.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument Position Classification", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Argument Span Extraction models that make use of the Connective and Argument 2 Labels are trained on reference annotation. Even though, the performance of the upstream models (Discourse Connective Detection and Argument Position Classification) is relatively high compared to the Argument Span Extraction models, there is still error propagation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument Span Extraction", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "For the Argument Span Extraction of explicit relations the search space is limited to a single sentence; thus, all multi sentence arguments are missed. This constraint has a little effect on Argument 2 spans. However, since as a candidate for inter-sentential Argument 1 we use only immediately preceding sentence, together with this constraint we miss 12% of relations. Thus, detection of Argument 1 spans of inter-sentential relations is a hard task, additionally due to the fact that there is no other span (connective or Argument 2) to delimit it. Even though we have trained CRF models for the task, previous sentence heuristic was performing with insignificant difference. Thus, the heuristic was selected for the submitted version, and it was augmented with the removal of sentence initial and final punctuation. For Argument 2 of inter-sentential relations performance of CRF models is acceptably high (\u2248 0.80).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Argument Span Extraction", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The span of Argument 2 of intra-sentential relations is the easiest to detect, since it is syntacti-cally attached to the connective; and performances are high (\u2248 0.89 on the development set using the features in Table 1 ). Thus, its output is used as a feature for Argument 1 extraction. Interesting fact is that POS-tags have a negative effect on the Argument 1 Span Extraction.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 213, |
| "end": 220, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Argument Span Extraction", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Based on the output of Argument Position Classification a set of adjacent sentence pairs is generated as candidates for non-explicit discourse relations: Implicit, AltLex, and EntRel. For training the classification models we have generated No-Relation pairs using reference annotation, excluding all the sentences involved in inter-sentential relations (some relations have multiple sentence arguments). Additionally, since arguments of nonexplicit relations are stripped of leading and trailing punctuation, the No-Relation pairs were preprocessed. The task of detecting relations proved to be hard.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-Explicit Relation Detection", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Similar to Connective Sense Classification we attempted (1) flat classification into all PDTB senses + No-Relation (i.e. merging the task with Relation Sense Classification described in Section 4.6) and (2) hierarchical -first detect the presence of a relation then classify it into the hierarchy of senses. For the hierarchical detection of Non-Explicit relations we tried (1) Relation vs. No-Relation classification and (2) classification into relation types (Implicit, AltLex, EntRel) + No-Relation. The model that has the highest F-measure for actual relations turned out to be binary Relation vs. No-Relation classification (0.6988). However, since in the testing mode we don't have access to argument span information the performance is expected to drop significantly. The most robust feature combination for the task is Cartesian product of Brown Clusters of all the tokens from both arguments and Cartesian product of Brown Clusters of predicate, subject and direct and indirect objects (4 and 8 from Section 3.2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-Explicit Relation Detection", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "After a sentence pair is classified as a relation, it is further classified into the hierarchy of senses. The models are trained on all the features from Section 3.2, excluding prefixed Bag-of-Words and Cartesian product of all tokens. Relations are classified directly into 14 PDTB senses + EntRel.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Relation Sense Classification", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "The task is extremely hard, the classification accuracy is 0.3899 and the model misses infrequent senses. Table 2 lists the captured senses with their percentages in training data and F-measures on the development set. The distribution of senses has a direct effect on its F-measure. The performances reported so far are on a specific task without error propagation from the upstream tasks. In the next section we report official Shared Task evaluation results.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 106, |
| "end": 113, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Relation Sense Classification", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "The official evaluation of CoNLL 2015 Shared Task on Shallow Discourse Parsing is done on a per-discourse relation basis. A relation is considered to be predicted correctly if the parser correctly identifies (1) discourse connective span (head), (2) spans and labels of both arguments, and (3) sense of a relation. The predicted connective and arguments spans have to match the reference spans exactly. Consequently, to get a true positive for a relation the parser has to get true positive on all the subtasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Official Evaluation Metrics and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The task organizers also provided the evaluation script that reported precision, recall and F-measures for Discourse Connective Detection, joint Sense Classification scores for explicit and non-explicit relations, and joint Argument Span Extraction score for explicit and non-explicit relations. For argument spans three scores were reported: Argument 1 and Argument 2 individually and jointly. For Sense Classification the script reported performance on each of the senses and their macro-average. Later, performances for explicit and non-explicit relations were split. The participants had to evaluate their systems on 3 data sets:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Official Evaluation Metrics and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "(1) Development (WSJ Section 22), (2) Test (WSJ Section 23), and the blind test set annotated specifically for the Shared Task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Official Evaluation Metrics and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The performance of our parser on each of the Table 4 : Parser-level precision (P), recall (R), and F-measures (F1) of the submitted system on the blind test set. UniTN system is in bold.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 45, |
| "end": 52, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Official Evaluation Metrics and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "metrics (tasks) per evaluation set is reported individually and jointly for explicit and non-explicit relations in Table 3 . From the results, it is clear that non-explicit Relation Sense Classification is the hardest task. The next hardest task is intersentential Argument 1 Span Extraction. According to the organizers, the development, test and blind test sets are coming from the same domain. However, we observe a gradual decline in performance from development to test and from test to the blind test sets for each of the tasks on explicit relations. For non-explicit relations, on the other hand, performances vary and in some cases the performance on the blind test set is the highest (Argument 2 spans). The parser ranked the second on the test and the blind test sets and the third on the development set. For the comparison we also report performances of the systems ranked the first and the third in Table 4 . The global F-measure of our parser on the blind test set is 0.2184, which is 0.0219 points lower than the first ranked system and 0.0333 points higher than the next best system. Comparing the performance with all the participants, we have observed that our parser maintains higher recall across the subtasks.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 115, |
| "end": 122, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 912, |
| "end": 919, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Official Evaluation Metrics and Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this paper we have presented University of Trento parser submitted to CoNLL 2015 Shared Task on Shallow Discourse Parsing. We have described the discourse parsing architecture and models for each of the subtasks. The subtasks are categorized into span detection and classification. The span detection tasks are for explicit relations -Discourse Relation Detection and Argument Span Extraction; they are cast as token-level sequence labeling using Conditional Random Fields and argument span decisions are conditioned on relations' being intra-or inter-sentential. Classification tasks -Connective Sense Classification, Argument Position Classification, Non-Explicit Relation Detection, and Non-Explicit Relation Sense Classification -employ AdaBoost algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Participation in the CoNLL 2015 Shared Task on Shallow Discourse Parsing gave the teams a unique opportunity to compare their discourse parsing approaches on the same training and testing splits and the same automatic features. Even though the ranking of submitted systems depends on performances of all the modules, we can conclude that token-level sequence labeling for Argument Span Extraction of explicit discourse relations is a viable approach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Participation additionally allowed us to identify potential points of improvement for our parser. For example, even though Discourse Connective Detection as sequence labeling has an F-measure of 0.8992 on the blind test set, it ranks 4th. Since it is the first step in the pipeline, increasing the robustness of the model is essential.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The research leading to these results has received funding from the European Union -Seventh Framework Programme (FP7/2007-2013) under grant agreement No. 610916 -SENSEI.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A decision-theoretic generalization of on-line learning and an application to boosting", |
| "authors": [ |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Freund", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [ |
| "E" |
| ], |
| "last": "Schapire", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Journal of Computer and System Sciences", |
| "volume": "55", |
| "issue": "1", |
| "pages": "119--139", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoav Freund and Robert E. Schapire. 1997. A decision-theoretic generalization of on-line learning and an application to boosting. Journal of Computer and System Sciences, 55(1):119-139, August.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Shallow discourse parsing with conditional random fields", |
| "authors": [ |
| { |
| "first": "Sucheta", |
| "middle": [], |
| "last": "Ghosh", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Johansson", |
| "suffix": "" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Riccardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Tonelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 5th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sucheta Ghosh, Richard Johansson, Giuseppe Ric- cardi, and Sara Tonelli. 2011. Shallow discourse parsing with conditional random fields. In Proceed- ings of the 5th International Joint Conference on Natural Language Processing (IJCNLP 2011).", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Lafferty", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [ |
| "C N" |
| ], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of 18th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "282--289", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Lafferty, Andrew McCallum, and Fernando C. N. Pereira. 2001. Conditional random fields: Prob- abilistic models for segmenting and labeling se- quence data. In Proceedings of 18th International Conference on Machine Learning, pages 282-289.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A PDTB-styled end-to-end discourse parser", |
| "authors": [ |
| { |
| "first": "Ziheng", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Min-Yen", |
| "middle": [], |
| "last": "Hwee Tou Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kan", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Natural Language Engineering", |
| "volume": "20", |
| "issue": "", |
| "pages": "151--184", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ziheng Lin, Hwee Tou Ng, and Min-Yen Kan. 2014. A PDTB-styled end-to-end discourse parser. Natural Language Engineering, 20:151 -184.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Using syntax to disambiguate explicit discourse connectives in text", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Pitler", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the ACL-IJCNLP Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "13--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily Pitler and Ani Nenkova. 2009. Using syn- tax to disambiguate explicit discourse connectives in text. In Proceedings of the ACL-IJCNLP Con- ference, pages 13-16.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Automatic sense prediction for implicit discourse relations in text", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Pitler", |
| "suffix": "" |
| }, |
| { |
| "first": "Annie", |
| "middle": [], |
| "last": "Louis", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 47th Annual Meeting of the ACL and the 4th IJCNLP of the AFNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "683--691", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily Pitler, Annie Louis, and Ani Nenkova. 2009. Automatic sense prediction for implicit discourse relations in text. In Proceedings of the 47th An- nual Meeting of the ACL and the 4th IJCNLP of the AFNLP, pages 683-691.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "The Penn Discourse Treebank 2.0 annotation manual", |
| "authors": [ |
| { |
| "first": "Rashmi", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "Eleni", |
| "middle": [], |
| "last": "Miltsakaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Dinesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Aravind", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Livio", |
| "middle": [], |
| "last": "Robaldo", |
| "suffix": "" |
| }, |
| { |
| "first": "Bonnie", |
| "middle": [ |
| "L" |
| ], |
| "last": "Webber", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rashmi Prasad, Eleni Miltsakaki, Nikhil Dinesh, Alan Lee, Aravind Joshi, Livio Robaldo, and Bonnie L Webber. 2007. The Penn Discourse Treebank 2.0 annotation manual.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "The Penn Discourse Treebank 2.0", |
| "authors": [ |
| { |
| "first": "Rashmi", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Dinesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Eleni", |
| "middle": [], |
| "last": "Miltsakaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Livio", |
| "middle": [], |
| "last": "Robaldo", |
| "suffix": "" |
| }, |
| { |
| "first": "Aravind", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Bonnie", |
| "middle": [], |
| "last": "Webber", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 6th International Conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rashmi Prasad, Nikhil Dinesh, Alan Lee, Eleni Milt- sakaki, Livio Robaldo, Aravind Joshi, and Bon- nie Webber. 2008. The Penn Discourse Treebank 2.0. In Proceedings of the 6th International Confer- ence on Language Resources and Evaluation (LREC 2008).", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Discovering implicit discourse relations through Brown Cluster pair representation and coreference patterns", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Attapol", |
| "suffix": "" |
| }, |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Rutherford", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Attapol T. Rutherford and Nianwen Xue. 2014. Dis- covering implicit discourse relations through Brown Cluster pair representation and coreference patterns. In Proceedings of the 14th Conference of the Euro- pean Chapter of the Association for Computational Linguistics (EACL 2014).", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Comparative evaluation of argument extraction algorithms in discourse relation parsing", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Evgeny", |
| "suffix": "" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Stepanov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Riccardi", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 13th International Conference on Parsing Technologies (IWPT 2013)", |
| "volume": "", |
| "issue": "", |
| "pages": "36--44", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Evgeny A. Stepanov and Giuseppe Riccardi. 2013. Comparative evaluation of argument extraction al- gorithms in discourse relation parsing. In Proceed- ings of the 13th International Conference on Pars- ing Technologies (IWPT 2013), pages 36-44, Nara, Japan, November.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Towards cross-domain PDTB-style discourse parsing", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Evgeny", |
| "suffix": "" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Stepanov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Riccardi", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "The Fifth International Workshop on Health Text Mining and Information Analysis", |
| "volume": "", |
| "issue": "", |
| "pages": "30--37", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Evgeny A. Stepanov and Giuseppe Riccardi. 2014. Towards cross-domain PDTB-style discourse pars- ing. In EACL Workshops -Proceedings of the Louhi 2014: The Fifth International Workshop on Health Text Mining and Information Analysis, pages 30-37, Gothenburg, Sweden, April. ACL.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Word representations: A simple and general method for semisupervised learning", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Turian", |
| "suffix": "" |
| }, |
| { |
| "first": "Lev", |
| "middle": [], |
| "last": "Ratinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "384--394", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph Turian, Lev Ratinov, and Yoshua Bengio. 2010. Word representations: A simple and general method for semisupervised learning. In In ACL, pages 384- 394.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Discourse structure and language technology", |
| "authors": [ |
| { |
| "first": "Bonnie", |
| "middle": [ |
| "L" |
| ], |
| "last": "Webber", |
| "suffix": "" |
| }, |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "Egg", |
| "suffix": "" |
| }, |
| { |
| "first": "Valia", |
| "middle": [], |
| "last": "Kordoni", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Natural Language Engineering", |
| "volume": "", |
| "issue": "", |
| "pages": "1--54", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bonnie L. Webber, Markus Egg, and Valia Kordoni. 2011. Discourse structure and language technology. Natural Language Engineering, pages 1-54.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "The CoNLL-2015 Shared Task on Shallow Discourse Parsing", |
| "authors": [ |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Hwee Tou Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Rashmi", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "Attapol", |
| "middle": [], |
| "last": "Bryant", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rutherford", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Nineteenth Conference on Computational Natural Language Learning: Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nianwen Xue, Hwee Tou Ng, Sameer Pradhan, Rashmi Prasad, Christopher Bryant, and Attapol Rutherford. 2015. The CoNLL-2015 Shared Task on Shallow Discourse Parsing. In Proceedings of the Nine- teenth Conference on Computational Natural Lan- guage Learning: Shared Task.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "content": "<table><tr><td>: Token-level features for Discourse Con-</td></tr><tr><td>nective Detection (DCD) and Argument Span Ex-</td></tr><tr><td>traction (ASE) for intra-sentential (SS) and inter-</td></tr><tr><td>sentential (PS) explicit discourse relations.</td></tr></table>", |
| "num": null, |
| "html": null, |
| "text": "", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td>: F-measures of non-explicit relation sense</td></tr><tr><td>classification per sense, ordered by frequency in</td></tr><tr><td>the training set.</td></tr></table>", |
| "num": null, |
| "html": null, |
| "text": "", |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td>Team</td><td>P</td><td>R</td><td>F1</td></tr><tr><td>lan15</td><td colspan=\"3\">0.2369 0.2432 0.2400</td></tr><tr><td colspan=\"4\">stepanov15 0.2094 0.2283 0.2184</td></tr><tr><td>li15b</td><td colspan=\"3\">0.1981 0.1737 0.1851</td></tr></table>", |
| "num": null, |
| "html": null, |
| "text": "Task-level and parser-level F-measures of the parser on the development, test, and blind test sets for explicit and non-explicit relations individually and jointly. The Sense values are macro-averages.", |
| "type_str": "table" |
| } |
| } |
| } |
| } |