| { |
| "paper_id": "K16-2005", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:11:01.683516Z" |
| }, |
| "title": "Do We Really Need All Those Rich Linguistic Features? A Neural Network-Based Approach to Implicit Sense Labeling", |
| "authors": [ |
| { |
| "first": "Niko", |
| "middle": [], |
| "last": "Schenk", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Goethe University", |
| "location": { |
| "settlement": "Frankfurt am Main", |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Chiarcos", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Goethe University", |
| "location": { |
| "settlement": "Frankfurt am Main", |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Kathrin", |
| "middle": [], |
| "last": "Donandt", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Goethe University", |
| "location": { |
| "settlement": "Frankfurt am Main", |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "R\u00f6nnqvist", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Goethe University", |
| "location": { |
| "settlement": "Frankfurt am Main", |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Evgeny", |
| "middle": [ |
| "A" |
| ], |
| "last": "Stepanov", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Riccardi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "giuseppe.riccardi@unitn.it" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We describe our contribution to the CoNLL 2016 Shared Task on shallow discourse parsing. 1 Our system extends the two best parsers from previous year's competition by integration of a novel implicit sense labeling component. It is grounded on a highly generic, language-independent feedforward neural network architecture incorporating weighted word embeddings for argument spans which obviates the need for (traditional) hand-crafted features. Despite its simplicity, our system overall outperforms all results from 2015 on 5 out of 6 evaluation sets for English and achieves an absolute improvement in F 1-score of 3.2% on the PDTB test section for non-explicit sense classification.", |
| "pdf_parse": { |
| "paper_id": "K16-2005", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We describe our contribution to the CoNLL 2016 Shared Task on shallow discourse parsing. 1 Our system extends the two best parsers from previous year's competition by integration of a novel implicit sense labeling component. It is grounded on a highly generic, language-independent feedforward neural network architecture incorporating weighted word embeddings for argument spans which obviates the need for (traditional) hand-crafted features. Despite its simplicity, our system overall outperforms all results from 2015 on 5 out of 6 evaluation sets for English and achieves an absolute improvement in F 1-score of 3.2% on the PDTB test section for non-explicit sense classification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Text comprehension is an essential part of Natural Language Understanding and requires capabilities beyond capturing the lexical semantics of individual words or phrases. In order to understand how meaning is established, altered and transferred across words and sentences, a model is needed to account for contextual information as a semantically coherent representation of the logical discourse structure of a text. Different formalisms and frameworks have been proposed to realize this assumption (Mann and Thompson, 1988; Lascarides and Asher, 1993; Webber, 2004) .", |
| "cite_spans": [ |
| { |
| "start": 500, |
| "end": 525, |
| "text": "(Mann and Thompson, 1988;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 526, |
| "end": 553, |
| "text": "Lascarides and Asher, 1993;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 554, |
| "end": 567, |
| "text": "Webber, 2004)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In a more applied NLP context, shallow discourse parsing (SDP) aims at automatically de-tecting relevant discourse units and to label the relations that hold between them. Unlike deep discourse parsing, a stringent logical formalization or the establishment of a global data structure, for instance, a tree, is not required.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "With the release of the Penn Discourse Treebank (Prasad et al., 2008, PDTB) and the Chinese Discourse Treebank (Zhou and Xue, 2012, CDTB) , annotated training data for SDP has become available and, as a consequence, the field has considerably attracted researchers from the NLP and IR community. Informally, the PDTB annotation scheme describes a discourse unit as a syntactically motivated character span in the text, augmented with relations pointing from the second argument (Arg2, prototypically, a discourse unit associated with an explicit discourse marker) to its antecedent, i.e., the discourse unit Arg1. Relations are labeled with a relation type (its sense) and the associated discourse marker (either as found in the text or as inferred by the annotator). PDTB distinguishes explicit and implicit relations depending on whether such a connector or cue phrase (e.g., because) is present, or not. 2 As an illustrative example without such a marker, consider the following two adjacent sentences from the PDTB:", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 75, |
| "text": "(Prasad et al., 2008, PDTB)", |
| "ref_id": null |
| }, |
| { |
| "start": 111, |
| "end": 137, |
| "text": "(Zhou and Xue, 2012, CDTB)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Arg1: The real culprits are computer makers such as IBM that have jumped the gun to unveil 486-based products.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Arg2: The reason this is getting so much visibility is that some started shipping and announced early availability.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this implicit relation, Arg1 and Arg2 are directly related. The discourse relation type is Expansion.Restatement-one out of roughly twenty finegrained tags marking the sense relation between any given argument pair in the PDTB.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our Contribution: We participate in the CoNLL 2016 Shared Task on SDP (Xue et al., 2016; Potthast et al., 2014) and propose a novel, neural network-based approach for implicit sense labeling. Its system architecture is modular, highly generic and mostly language-independent, by leveraging the full power of pre-trained word embeddings for the SDP sense classification task. Our parser performs well on both English and Chinese data and is highly competitive with the state-of-the-art, though does not require manual feature engineering as employed in most prior works on implicit SDP, but rather relies extensively on features learned from data.", |
| "cite_spans": [ |
| { |
| "start": 70, |
| "end": 88, |
| "text": "(Xue et al., 2016;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 89, |
| "end": 111, |
| "text": "Potthast et al., 2014)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Most of the literature on automated discourse parsing has focused on specialized subtasks such as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "1. Argument identification (Ghosh et al., 2012; Kong et al., 2014) 2. Explicit sense classification 3. Implicit sense classification (Marcu and Echihabi, 2002; Lin et al., 2009; Zhou et al., 2010; Park and Cardie, 2012; Biran and McKeown, 2013; Rutherford and Xue, 2014) A minimal requirement for any full-fledged endto-end discourse parser is to integrate at least these three processes into a sequential pipeline. However, until recently, only a handful of such parsers have existed (Lin et al., 2014; Biran and McKeown, 2015; duVerle and Prendinger, 2009; Feng and Hirst, 2012) . It has been enormously difficult to evaluate the performance of these systems among themselves, and also to compare the efficiency of their individual components with other competing methods, as i.) those systems rely on different theories of discourse, e.g., PDTB or RST; and ii) different (sub)modules involve custom settings, feature-and tool-specific parameters, (esp. for the most challenging task of implicit sense labeling). Furthermore, iii) most previous works are not directly comparable in terms of overall accuracies as their underlying evaluation data suffers from inconsistent label sizes among studies (e.g., full sense inventory vs. simplified 1-or 2-level classes, cf. Huang and Chen (2011)).", |
| "cite_spans": [ |
| { |
| "start": 27, |
| "end": 47, |
| "text": "(Ghosh et al., 2012;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 48, |
| "end": 66, |
| "text": "Kong et al., 2014)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 133, |
| "end": 159, |
| "text": "(Marcu and Echihabi, 2002;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 160, |
| "end": 177, |
| "text": "Lin et al., 2009;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 178, |
| "end": 196, |
| "text": "Zhou et al., 2010;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 197, |
| "end": 219, |
| "text": "Park and Cardie, 2012;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 220, |
| "end": 244, |
| "text": "Biran and McKeown, 2013;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 245, |
| "end": 270, |
| "text": "Rutherford and Xue, 2014)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 485, |
| "end": 503, |
| "text": "(Lin et al., 2014;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 504, |
| "end": 528, |
| "text": "Biran and McKeown, 2015;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 529, |
| "end": 558, |
| "text": "duVerle and Prendinger, 2009;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 559, |
| "end": 580, |
| "text": "Feng and Hirst, 2012)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Fortunately, with the first edition of the shared task on SDP, Xue et al. (2015) had established a unified framework and had made an independent evaluation possible. The best performing participating systems -most notably those by Wang and Lan (2015) and Stepanov et al. (2015) -have reimplemented the well-established techniques, for example the one by Lin et al. (2014) .", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 80, |
| "text": "Xue et al. (2015)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 231, |
| "end": 250, |
| "text": "Wang and Lan (2015)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 255, |
| "end": 277, |
| "text": "Stepanov et al. (2015)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 354, |
| "end": 371, |
| "text": "Lin et al. (2014)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In last year's shared task, first implementations on deep learning have seen a surge of interest: and proposed a recurrent neural network for argument identification and a paragraph vector model for sense classification. Distributed representations for both arguments were obtained by vector concatenation of embeddings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Learning Approaches to SDP", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "An earlier attempt in a similar direction of representation learning (Bengio et al., 2013) has been made by Ji and Eisenstein (2014) . The authors demonstrated successfully how to discriminatively learn a latent, low-dimensional feature representation for RST-style discourse parsing, which has the benefit of capturing the underlying meaning of elementary discourse units without suffering from data sparsity of the originally high dimensional input data.", |
| "cite_spans": [ |
| { |
| "start": 69, |
| "end": 90, |
| "text": "(Bengio et al., 2013)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 108, |
| "end": 132, |
| "text": "Ji and Eisenstein (2014)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Learning Approaches to SDP", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Closely related, Li et al. (2014) introduced a recursive neural network for discourse parsing which jointly models distributed representations for sentences based on words and syntactic information. The approach is motivated by Socher et al. (2013) and models the discourse unit's root embedding to represent the whole discourse unit which is being obtained from its parts by an iterative process. Their system is made up of a binary structure classifier and a multi-class relation classifier and achieves similar performance compared to Ji and Eisenstein (2014) .", |
| "cite_spans": [ |
| { |
| "start": 17, |
| "end": 33, |
| "text": "Li et al. (2014)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 228, |
| "end": 248, |
| "text": "Socher et al. (2013)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 538, |
| "end": 562, |
| "text": "Ji and Eisenstein (2014)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Learning Approaches to SDP", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Very recently, Liu et al. (2016) and have successfully applied convolutional neural networks to model implicit relations within the PDTB-framework. Along these lines and inspired by the work in Weiss (2015), we also see great potential in the use of neural network-based techniques to SDP. Similarly, our approach trains a modular component for shallow discourse parsing which incorporates distributed word representations for argument spans by abstraction from surface-level (token) information. Crucially, our approach substitutes the traditional sparse and hand-crafted features from the literature to account for a minimalist, but at the same time, general (latent) representation of the discourse units. In the next sections, we elaborate on our novel neural network-based approach for implicit sense labeling and how it is fit into the overall system architecture of the parser.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 32, |
| "text": "Liu et al. (2016)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deep Learning Approaches to SDP", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We construct a neural network-based module for the classification of senses for both implicit and entity (EntRel) relations. 3 As a very general and highly data-driven approach to modeling discourse relations, our classifier incorporates only word embeddings and basic syntactic dependency information. Also, in order to keep the setup easily adaptable to new data and other languages, we avoid the use of very specific and costly hand-crafted features (such as sentiment polarities, word-pair features, cue phrases, modality, production rules, highly specific semantic information from external ontologies such as VerbNet, etc.), which has been the main focus in traditional approaches to SDP (Huang and Chen, 2011; Park and Cardie, 2012; Feng and Hirst, 2012). Instead, we substitute (sparse) tokens in the argument spans, with dense, distributed representations, i.e. word embeddings, as the main source of information for the sense classification component. Closely related, have explored a similar approach of constructing argument vectors by applying a set of aggregation functions on their token vectors, however, without the use of additional (syntactic) information, while embedding their vectors into a single-layer neural network only.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 126, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Neural Sense Labeler for Implicit and Entity Relations", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In our experiments, we used the pre-trained GoogleNews vectors (for English) and the Gigaword-induced vectors (for Chinese) provided by the shared task as a starting point. 4 We further trained the word vectors on the raw Wall Street Journal texts, thus tuning the embeddings toward the data at hand, with the goal of considerably im-proving their predictive power in the sense classification task. Specifically, the pre-trained vectors of size 300 were updated by the skip-gram method (Mikolov et al., 2013) 5 in multiple passes over the Newswire texts with decreasing learning rate. This procedure is supposed to improve the quality of the embeddings and also their coverage.", |
| "cite_spans": [ |
| { |
| "start": 173, |
| "end": 174, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 486, |
| "end": 508, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Neural Sense Labeler for Implicit and Entity Relations", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our new word vector model provides general vector representations for each token in the two argument spans 6 , which forms the basis for producing compositional vectors to represent the two spans. Compositional vectors that introduce a fixed-length representation of a variable-length span of tokens are practical features for feedforward neural networks. Thus, we may combine the token vectors of each span by simply averaging vectors, or -following Mitchell and Lapata (2008) -by calculating an aggregated argument vector", |
| "cite_spans": [ |
| { |
| "start": 451, |
| "end": 477, |
| "text": "Mitchell and Lapata (2008)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Neural Sense Labeler for Implicit and Entity Relations", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "v : v (j) = 1 k(j) k(j) i=1 V (j) i + k(j) i=1 V (j) i", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "A Neural Sense Labeler for Implicit and Entity Relations", |
| "sec_num": "3" |
| }, |
| { |
| "text": "for arguments j \u2208 {1, 2}, where k(j) = |t(j)| defines their lengths in the number of tokens and applies the pointwise product over the token vectors in V (j).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Neural Sense Labeler for Implicit and Entity Relations", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Both procedures produce rather simple argument representations that do not account for word order variation or any other sentence structure information, yet they serve as decent features for discourse parsing and other related tasks. By introducing pointwise multiplication of the token vectors, the elements that represent assumed independent, latent semantic dimensions are not merely lumped together across vectors, but are allowed to scale according to their mutual relevance. 7 Improving upon the compositional representation produced by Equation 1, we incorporate additional syntactic dependency information: for each token in an argument span, we calculate the depth d from the corresponding sentence's root node and weight the token vector by 1 2 d before applying the aggregating operators. 8 The bottom of Figure 1 illustrates the first step of the process, i.e. mapping tokens to their corresponding vectors based on the updated word vector model, as well as the token depth weighting. Secondly, the aggregation operators are applied, i.e., the sum (+) of the pointwise product ( / ) and average (avg) of the vectors. Finally, the compositional vectors for each of the arguments are concatenated (\u2295) and serve as input to a feedforward neural network.", |
| "cite_spans": [ |
| { |
| "start": 481, |
| "end": 482, |
| "text": "7", |
| "ref_id": null |
| }, |
| { |
| "start": 800, |
| "end": 801, |
| "text": "8", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 816, |
| "end": 824, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Neural Sense Labeler for Implicit and Entity Relations", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Given the composed argument vectors, we set up a network with one hidden layer and a softmax output layer to classify among 20 implicit senses for English and 9 for Chinese, plus an additional EntRel label. Other relations, such as AltLex, are not modeled. We train the network using Nesterov's Accelerated Gradient (Nesterov, 1983) and optimized all hyper-parameters on the development set. Best results were achieved with rectified linear activation with learnable leak rate and gain 8 Tokens that are missing in the parse tree, such as punctuation symbols, are weighted by 0.25, in our optimal setting. (lgrelu), 40-60 hidden nodes and weight decay and hidden node regularization of 0.0001. 9", |
| "cite_spans": [ |
| { |
| "start": 316, |
| "end": 332, |
| "text": "(Nesterov, 1983)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Neural Sense Labeler for Implicit and Entity Relations", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We participate in the closed track of the shared task, specifically in both full and supplementary tasks (sense-only) on English and Chinese texts. Full tasks require a participant's system to identify argument pairs and to label the sense relation that holds between them. In each supplementary task, gold arguments are provided so that the performance of sense labeling does not suffer from error propagation due to incorrectly detected argument spans.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Competition Tasks & Pipelines", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We combine different existent modules to address the specific settings and classification needs of both full and supplementary tasks for both lan- 9 The learning rate was set to 0.0001. Momentum of 0.35-0.6 and 60 hidden nodes performed well for the English tasks, and momentum of 0.85 and 40 hidden nodes for Chinese (with fewer output nodes). Good results were also obtained by Parametric Rectified Linear Unit (prelu) activation, as well as the combination of larger hidden layer and stronger regularization (e.g., L1 regularization of 0.1 on 100 nodes).", |
| "cite_spans": [ |
| { |
| "start": 147, |
| "end": 148, |
| "text": "9", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Competition Tasks & Pipelines", |
| "sec_num": "4" |
| }, |
| { |
| "text": "guages. The modules and their combination with our implicit neural sense classifier will be outlined in the following sections.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Competition Tasks & Pipelines", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For the full task, we exploit the high-quality argument extraction modules of the two bestperforming systems by Wang and Lan (2015, W&L) and Stepanov et al. (2015) from last year's competition (re-using their original implementations): Specifically, we initially run both systems for all explicit relations only, and keep those predicted arguments and sense labels -from either of the two systems -which maximize F 1 -score on the development set. With this simple heuristic, we hope to improve upon the best results from W&L, as, for instance, Stepanov et al. (2015) perform particularly well on all temporal relations, while W&L's tool handles the majority of other senses well.", |
| "cite_spans": [ |
| { |
| "start": 112, |
| "end": 136, |
| "text": "Wang and Lan (2015, W&L)", |
| "ref_id": null |
| }, |
| { |
| "start": 141, |
| "end": 163, |
| "text": "Stepanov et al. (2015)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 545, |
| "end": 567, |
| "text": "Stepanov et al. (2015)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "English Full Task Pipeline (EFTP)", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For all implicit and EntRel relations, we keep the exact argument spans obtained from the W&L system and reject all sense labels. In a second step, we re-classify all these implicit relations by our neural net-based architecture described in Section 3 given only the tokens and their dependencies in both argument spans. Finally, we merge all combined explicit and re-classified implicit relations into the final set for evaluation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "English Full Task Pipeline (EFTP)", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We make use of the system by Stepanov et al. (2015) to label all explicit relation senses, and classify all other relations with an empty token list for connectors (i.e., implicit and EntRels) by our neural network architecture from Section 3.", |
| "cite_spans": [ |
| { |
| "start": 29, |
| "end": 51, |
| "text": "Stepanov et al. (2015)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "English Supplementary Task Pipeline (ESTP)", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Since for the Chinese full task no reusable argument extraction tools were available, we have set up a minimalist (baseline) implementation whose individual steps we sketch briefly:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Full Task Pipeline (CFTP)", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "1. Connective detection is realized by means of a sequence labeling/CRF model. 10 Features are unigram and bigram information from the tokens, their parts-of-speech, dependency head, dependency chain, whether the token is found as a connector in the training set, and its relative position within the sentence.", |
| "cite_spans": [ |
| { |
| "start": 79, |
| "end": 81, |
| "text": "10", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Full Task Pipeline (CFTP)", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "2. Argument extraction is based on the output of predicted connectives for both inter-and intrasentence relations. As an additional feature, we found the IOB chain for the syntactic path of a token to be useful. 11", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Full Task Pipeline (CFTP)", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "3. We heuristically post-process the CRF-labeled argument tokens in order to assign connectors to same-sentence or separate-sentence Arg1 and Arg2 spans.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Full Task Pipeline (CFTP)", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "4. The so-obtained explicit argument pairs are sense labeled by a (linear-kernel) SVM classifier 12 with the connector word as the only feature, following the minimalist setting in Chiarcos and Schenk (2015) .", |
| "cite_spans": [ |
| { |
| "start": 181, |
| "end": 207, |
| "text": "Chiarcos and Schenk (2015)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Full Task Pipeline (CFTP)", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "5. As implicit relations we consider all intersentential relations which are not already part of an explicit relation. Same-sentence relations are ignored altogether.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Chinese Full Task Pipeline (CFTP)", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "For the provided argument pairs, we label explicit relations (i.e. those containing a non-empty connector) by the SVM classifier which has been trained using only a single feature -the connector token. For all other relations, we again employ our neural network-based strategy described in Section 3. The overall architecture is exactly the same as for the English subtask; only the (hyper)parameters have been updated in accordance with the Chinese training data. Table 1 shows the performance of our full-task pipeline (EFTP) which integrates our novel feedforward neural network architecture for implicit sense labeling. The figures suggest that our minimalist approach is highly competitive and can even outperform the best results from last year's competition in terms of F 1 -scores on two out of three evaluation sets (cf. last implicit column). Overall, with the integration of the combined systems by W&L and Stepanov et al. (2015) , we can improve upon the state-of-the-art by an absolute increase in F 1 -score of 0.5% on the blind test set-which is marginal but only due to the fruitful re-classification of the already-provided (and therefore fixed) argument spans.", |
| "cite_spans": [ |
| { |
| "start": 918, |
| "end": 940, |
| "text": "Stepanov et al. (2015)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 465, |
| "end": 472, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Chinese Supplementary Task Pipeline (CSTP)", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Measured on the development set, we found that the dependency depth weighting contributes to an absolute improvement in accuracy of 1.5% for non-explicit relations. Table 1 : English full task F 1 -scores.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 165, |
| "end": 172, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "English Full Task", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Without error propagation from argument identification, and with the gold arguments provided in the evaluation sets, the performance of our implicit sense labeling component is even better; cf. Table 2: on both PDTB evaluation sets F 1 -scores increase by 2.7% and 3.16% (absolute) and by 6.32% and up to 9.17% (relative) on the development and test section, respectively. Strikingly, however, the prediction quality on the blind test set is worse than expected. We assume that this is partly due to the (slightly) heterogeneous content of the annotated Wikinews, as opposed to the original Penn Discourse Treebank data on which our system performs extraordinarily well. Table 2 : English sense-only task F 1 -scores.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 671, |
| "end": 678, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "English Supplementary Task", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "This year's edition of the shared task has been the first to address shallow discourse parsing for Chinese Newswire texts. Given no prior (directly comparable) results on Chinese SDP so far, we simply report the performance of our system on all evaluation sets in Table 3 Table 3 : Chinese full task F 1 -scores.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 264, |
| "end": 271, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 272, |
| "end": 279, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Chinese Full Task", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "A final evaluation has been concerned with the sense-only labeling of gold-provided arguments for Chinese. We want to point out that the neural network architecture for implicit relations (with 70.59% F 1 -score on the dev set, cf. Table 4 ) has beaten all our other experiments: In particular, we have conducted an SVM setup in which we employed the traditional word-pair features substituted by Brown clusters 3200 (65.12%), and special additive Arg1/Arg2 combinations of word embeddings -yielding only 62.8% which equals the majority class baseline indicating no predictive power for any given kernel type. Table 4 : Chinese sense-only task F 1 -scores.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 232, |
| "end": 239, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 610, |
| "end": 617, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Chinese Supplementary Task", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "In the context of the CoNLL 2016 Shared Task on shallow discourse parsing, we have described our participating system and its architecture. Specifically, we introduced a novel feedforward neural network-based component for implicit sense labeling whose only source of information are pretrained word embeddings and syntactic dependencies. Its highly generic and extremely simple design is the main advantage of this module. It has proven to be language-independent, easy to tune and optimize and does not require the use of handcrafted -rich -linguistic features. Still its performance is highly competitive with the state-of-the-art on implicit sense labeling and builds a solid groundwork for future extensions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The set of relation types is completed by alternative lexicalization (AltLex, discourse marker rephrased), entity relation (EntRel, i.e., anaphoric coherence), resp. the absence of any relation (NoRel).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The reason to combine both relation types has been a design decision as EntRels are very similar to implicit relations and are also missing a connective. AltLex relations seemed too few to have any statistical impact on the performance of our experiments and have been ignored altogether.4 http://www.cs.brandeis.edu/\u02dcclp/ conll16st/dataset.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We found window size of 8 and min term count = 3 to be optimal. Neural networks were trained using the gensim package: http://radimrehurek.com/gensim/.6 We ignore unknown tokens for which no vectors exist.7 In our experiments, Equation 1 outperformed simpler strategies of either average or multiplication alone. This also indicates that it is beneficial to not completely suppress dimensions with near-zero values for single tokens.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://taku910.github.io/crfpp/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "This information was generated using the script from http://ilk.uvt.nl/team/sabine/chunklink/ chunklink_2-2-2000_for_conll.pl12 https://www.csie.ntu.edu.tw/\u02dccjlin/ libsvm/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Representation Learning: A Review and New Perspectives", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "IEEE Trans. Pattern Anal. Mach. Intell", |
| "volume": "35", |
| "issue": "8", |
| "pages": "1798--1828", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, Aaron Courville, and Pascal Vincent. 2013. Representation Learning: A Review and New Perspectives. IEEE Trans. Pattern Anal. Mach. In- tell., 35(8):1798-1828, August.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Aggregated Word Pair Features for Implicit Discourse Relation Disambiguation", |
| "authors": [ |
| { |
| "first": "Or", |
| "middle": [], |
| "last": "Biran", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathleen", |
| "middle": [], |
| "last": "Mckeown", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, ACL", |
| "volume": "2", |
| "issue": "", |
| "pages": "69--73", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Or Biran and Kathleen McKeown. 2013. Aggregated Word Pair Features for Implicit Discourse Relation Disambiguation. In Proceedings of the 51st Annual Meeting of the Association for Computational Lin- guistics, ACL 2013, 4-9 August 2013, Sofia, Bul- garia, Volume 2: Short Papers, pages 69-73.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "PDTB Discourse Parsing as a Tagging Task: The Two Taggers Approach", |
| "authors": [ |
| { |
| "first": "Or", |
| "middle": [], |
| "last": "Biran", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathleen", |
| "middle": [], |
| "last": "Mckeown", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 16th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "96--104", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Or Biran and Kathleen McKeown. 2015. PDTB Dis- course Parsing as a Tagging Task: The Two Taggers Approach. In Proceedings of the 16th Annual Meet- ing of the Special Interest Group on Discourse and Dialogue, pages 96-104, Prague, Czech Republic, September. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A Minimalist Approach to Shallow Discourse Parsing and Implicit Relation Recognition", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Chiarcos", |
| "suffix": "" |
| }, |
| { |
| "first": "Niko", |
| "middle": [], |
| "last": "Schenk", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 19th Conference on Computational Natural Language Learning: Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "42--49", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Chiarcos and Niko Schenk. 2015. A Mini- malist Approach to Shallow Discourse Parsing and Implicit Relation Recognition. In Proceedings of the 19th Conference on Computational Natural Lan- guage Learning: Shared Task, CoNLL 2015, Bei- jing, China, July 30-31, 2015, pages 42-49.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "A Novel Discourse Parser Based on Support Vector Machine Classification", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Helmut", |
| "middle": [], |
| "last": "Duverle", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Prendinger", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "2", |
| "issue": "", |
| "pages": "665--673", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David A. duVerle and Helmut Prendinger. 2009. A Novel Discourse Parser Based on Support Vector Machine Classification. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Nat- ural Language Processing of the AFNLP: Volume 2 -Volume 2, ACL '09, pages 665-673, Stroudsburg, PA, USA. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Text-level Discourse Parsing with Rich Linguistic Features", |
| "authors": [ |
| { |
| "first": "Vanessa", |
| "middle": [], |
| "last": "Wei Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Graeme", |
| "middle": [], |
| "last": "Hirst", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Long Papers", |
| "volume": "1", |
| "issue": "", |
| "pages": "60--68", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vanessa Wei Feng and Graeme Hirst. 2012. Text-level Discourse Parsing with Rich Linguistic Features. In Proceedings of the 50th Annual Meeting of the Asso- ciation for Computational Linguistics: Long Papers -Volume 1, ACL '12, pages 60-68, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Global Features for Shallow Discourse Parsing", |
| "authors": [ |
| { |
| "first": "Sucheta", |
| "middle": [], |
| "last": "Ghosh", |
| "suffix": "" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Riccardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Johansson", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 13th Annual Meeting of the Special Interest Group on Discourse and Dialogue (SIGDIAL)", |
| "volume": "", |
| "issue": "", |
| "pages": "150--159", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sucheta Ghosh, Giuseppe Riccardi, and Richard Jo- hansson. 2012. Global Features for Shallow Dis- course Parsing. In Proceedings of the 13th Annual Meeting of the Special Interest Group on Discourse and Dialogue (SIGDIAL), pages 150-159.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Chinese Discourse Relation Recognition", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hen-Hsen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hsin-Hsi", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of 5th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1442--1446", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hen-Hsen Huang and Hsin-Hsi Chen. 2011. Chinese Discourse Relation Recognition. In Proceedings of 5th International Joint Conference on Natural Lan- guage Processing, pages 1442-1446, Chiang Mai, Thailand, November. Asian Federation of Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Representation Learning for Text-level Discourse Parsing", |
| "authors": [ |
| { |
| "first": "Yangfeng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "13--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yangfeng Ji and Jacob Eisenstein. 2014. Representa- tion Learning for Text-level Discourse Parsing. In Proceedings of the 52nd Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 13-24, Baltimore, Maryland, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A Constituent-Based Approach to Argument Labeling with Joint Inference in Discourse Parsing", |
| "authors": [ |
| { |
| "first": "Fang", |
| "middle": [], |
| "last": "Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "Guodong", |
| "middle": [], |
| "last": "Tou Hwee Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "68--77", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fang Kong, Tou Hwee Ng, and Guodong Zhou. 2014. A Constituent-Based Approach to Argument La- beling with Joint Inference in Discourse Parsing. In Proceedings of the 2014 Conference on Em- pirical Methods in Natural Language Processing (EMNLP), pages 68-77. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Temporal Interpretation, Discourse Relations and Commonsense entailment", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Lascarides", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicholas", |
| "middle": [], |
| "last": "Asher", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Linguistics and Philosophy", |
| "volume": "16", |
| "issue": "5", |
| "pages": "437--493", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Lascarides and Nicholas Asher. 1993. Tem- poral Interpretation, Discourse Relations and Com- monsense entailment. Linguistics and Philosophy, 16(5):437-493.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Recursive Deep Models for Discourse Parsing", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Rumeng", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "2061--2069", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Rumeng Li, and Eduard Hovy. 2014. Re- cursive Deep Models for Discourse Parsing. In Pro- ceedings of the 2014 Conference on Empirical Meth- ods in Natural Language Processing (EMNLP), pages 2061-2069, Doha, Qatar, October. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Recognizing Implicit Discourse Relations in the Penn Discourse Treebank", |
| "authors": [ |
| { |
| "first": "Ziheng", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Min-Yen", |
| "middle": [], |
| "last": "Kan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hwee Tou", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "343--351", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ziheng Lin, Min-Yen Kan, and Hwee Tou Ng. 2009. Recognizing Implicit Discourse Relations in the Penn Discourse Treebank. In Proceedings of the 2009 Conference on Empirical Methods in Nat- ural Language Processing: Volume 1 -Volume 1, EMNLP '09, pages 343-351, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A PDTB-styled end-to-end discourse parser", |
| "authors": [ |
| { |
| "first": "Ziheng", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Min-Yen", |
| "middle": [], |
| "last": "Hwee Tou Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kan", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Natural Language Engineering", |
| "volume": "20", |
| "issue": "", |
| "pages": "151--184", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ziheng Lin, Hwee Tou Ng, and Min-Yen Kan. 2014. A PDTB-styled end-to-end discourse parser. Natural Language Engineering, 20:151-184, 4.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Implicit Discourse Relation Classification via Multi-Task Neural Networks", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifang", |
| "middle": [], |
| "last": "Sui", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yang Liu, Sujian Li, Xiaodong Zhang, and Zhifang Sui. 2016. Implicit Discourse Relation Classi- fication via Multi-Task Neural Networks. CoRR, abs/1603.02776.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Rhetorical structure theory: Toward a functional theory of text organization", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandra", |
| "middle": [ |
| "A" |
| ], |
| "last": "Mann", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Thompson", |
| "suffix": "" |
| } |
| ], |
| "year": 1988, |
| "venue": "Text", |
| "volume": "8", |
| "issue": "3", |
| "pages": "243--281", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William C. Mann and Sandra A. Thompson. 1988. Rhetorical structure theory: Toward a functional the- ory of text organization. Text, 8(3):243-281.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "An Unsupervised Approach to Recognizing Discourse Relations", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdessamad", |
| "middle": [], |
| "last": "Echihabi", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics, ACL '02", |
| "volume": "", |
| "issue": "", |
| "pages": "368--375", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Marcu and Abdessamad Echihabi. 2002. An Unsupervised Approach to Recognizing Discourse Relations. In Proceedings of the 40th Annual Meet- ing on Association for Computational Linguistics, ACL '02, pages 368-375, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Efficient Estimation of Word Representations in Vector Space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of Workshop at International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient Estimation of Word Repre- sentations in Vector Space. In Proceedings of Work- shop at International Conference on Learning Rep- resentations.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Vector-based Models of Semantic Composition", |
| "authors": [ |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "236--244", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeff Mitchell and Mirella Lapata. 2008. Vector-based Models of Semantic Composition. In Proceedings of Association for Computational Linguistics, pages 236-244.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "A method of solving a convex programming problem with convergence rate O (1/k2)", |
| "authors": [ |
| { |
| "first": "Yurii", |
| "middle": [], |
| "last": "Nesterov", |
| "suffix": "" |
| } |
| ], |
| "year": 1983, |
| "venue": "Soviet Mathematics Doklady", |
| "volume": "27", |
| "issue": "", |
| "pages": "372--376", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yurii Nesterov. 1983. A method of solving a con- vex programming problem with convergence rate O (1/k2). In Soviet Mathematics Doklady, volume 27, pages 372-376.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "The DCU Discourse Parser: A Sense Classification Task", |
| "authors": [ |
| { |
| "first": "Tsuyoshi", |
| "middle": [], |
| "last": "Okita", |
| "suffix": "" |
| }, |
| { |
| "first": "Longyue", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Nineteenth Conference on Computational Natural Language Learning -Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "71--77", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsuyoshi Okita, Longyue Wang, and Qun Liu. 2015. The DCU Discourse Parser: A Sense Classifica- tion Task. In Proceedings of the Nineteenth Confer- ence on Computational Natural Language Learning -Shared Task, pages 71-77, Beijing, China, July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Improving Implicit Discourse Relation Recognition Through Feature Set Optimization", |
| "authors": [ |
| { |
| "first": "Joonsuk", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 13th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "108--112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joonsuk Park and Claire Cardie. 2012. Improving Implicit Discourse Relation Recognition Through Feature Set Optimization. In Proceedings of the 13th Annual Meeting of the Special Interest Group on Discourse and Dialogue, page 108-112, Seoul, South Korea, July. Association for Computational Linguistics, Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Using Syntax to Disambiguate Explicit Discourse Connectives in Text", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Pitler", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "ACL 2009, Proceedings of the 47th Annual Meeting of the Association for Computational Linguistics and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "13--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily Pitler and Ani Nenkova. 2009. Using Syntax to Disambiguate Explicit Discourse Connectives in Text. In ACL 2009, Proceedings of the 47th Annual Meeting of the Association for Computational Lin- guistics and the 4th International Joint Conference on Natural Language Processing of the AFNLP, 2-7 August 2009, Singapore, Short Papers, pages 13-16.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Automatic Sense Prediction for Implicit Discourse Relations in Text", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Pitler", |
| "suffix": "" |
| }, |
| { |
| "first": "Annie", |
| "middle": [], |
| "last": "Louis", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "2", |
| "issue": "", |
| "pages": "683--691", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily Pitler, Annie Louis, and Ani Nenkova. 2009. Automatic Sense Prediction for Implicit Discourse Relations in Text. In Proceedings of the Joint Con- ference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP: Volume 2 -Vol- ume 2, ACL '09, pages 683-691, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Improving the Reproducibility of PAN's Shared Tasks: Plagiarism Detection, Author Identification, and Author Profiling", |
| "authors": [ |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Potthast", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Gollub", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Rangel", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| }, |
| { |
| "first": "Efstathios", |
| "middle": [], |
| "last": "Stamatatos", |
| "suffix": "" |
| }, |
| { |
| "first": "Benno", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Information Access Evaluation meets Multilinguality, Multimodality, and Visualization. 5th International Conference of the CLEF Initiative (CLEF 14)", |
| "volume": "", |
| "issue": "", |
| "pages": "268--299", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin Potthast, Tim Gollub, Francisco Rangel, Paolo Rosso, Efstathios Stamatatos, and Benno Stein. 2014. Improving the Reproducibility of PAN's Shared Tasks: Plagiarism Detection, Author Iden- tification, and Author Profiling. In Evangelos Kanoulas, Mihai Lupu, Paul Clough, Mark Sander- son, Mark Hall, Allan Hanbury, and Elaine Toms, editors, Information Access Evaluation meets Mul- tilinguality, Multimodality, and Visualization. 5th International Conference of the CLEF Initiative (CLEF 14), pages 268-299, Berlin Heidelberg New York, September. Springer.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "The Penn Discourse TreeBank 2.0", |
| "authors": [ |
| { |
| "first": "Rashmi", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Dinesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Eleni", |
| "middle": [], |
| "last": "Miltsakaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Livio", |
| "middle": [], |
| "last": "Robaldo", |
| "suffix": "" |
| }, |
| { |
| "first": "Aravind", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Bonnie", |
| "middle": [], |
| "last": "Webber", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rashmi Prasad, Nikhil Dinesh, Alan Lee, Eleni Milt- sakaki, Livio Robaldo, Aravind Joshi, and Bonnie Webber. 2008. The Penn Discourse TreeBank 2.0. In In Proceedings of LREC.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Discovering Implicit Discourse Relations Through Brown Cluster Pair Representation and Coreference Patterns", |
| "authors": [ |
| { |
| "first": "Attapol", |
| "middle": [], |
| "last": "Rutherford", |
| "suffix": "" |
| }, |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "645--654", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Attapol Rutherford and Nianwen Xue. 2014. Discov- ering Implicit Discourse Relations Through Brown Cluster Pair Representation and Coreference Pat- terns. In Proceedings of the 14th Conference of the European Chapter of the Association for Computa- tional Linguistics, pages 645-654. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Perelygin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Chuang", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1631--1642", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Y. Ng, and Christopher Potts. 2013. Recursive Deep Mod- els for Semantic Compositionality Over a Sentiment Treebank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Pro- cessing, pages 1631-1642, Stroudsburg, PA, Octo- ber. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "The UniTN Discourse Parser in CoNLL 2015 Shared Task: Token-level Sequence Labeling with Argument-specific Models", |
| "authors": [ |
| { |
| "first": "Evgeny", |
| "middle": [], |
| "last": "Stepanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Riccardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Orkan Ali", |
| "middle": [], |
| "last": "Bayer", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Nineteenth Conference on Computational Natural Language Learning -Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "25--31", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Evgeny Stepanov, Giuseppe Riccardi, and Orkan Ali Bayer. 2015. The UniTN Discourse Parser in CoNLL 2015 Shared Task: Token-level Sequence Labeling with Argument-specific Models. In Pro- ceedings of the Nineteenth Conference on Compu- tational Natural Language Learning -Shared Task, pages 25-31. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "A Refined End-to-End Discourse Parser", |
| "authors": [ |
| { |
| "first": "Jianxiang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Man", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Nineteenth Conference on Computational Natural Language Learning -Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "17--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jianxiang Wang and Man Lan. 2015. A Refined End-to-End Discourse Parser. In Proceedings of the Nineteenth Conference on Computational Natu- ral Language Learning -Shared Task, pages 17-24. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "The DCU Discourse Parser for Connective, Argument Identification and Explicit Sense Classification", |
| "authors": [ |
| { |
| "first": "Longyue", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Hokamp", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsuyoshi", |
| "middle": [], |
| "last": "Okita", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Nineteenth Conference on Computational Natural Language Learning -Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "89--94", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Longyue Wang, Chris Hokamp, Tsuyoshi Okita, Xiao- jun Zhang, and Qun Liu. 2015. The DCU Discourse Parser for Connective, Argument Identification and Explicit Sense Classification. In Proceedings of the Nineteenth Conference on Computational Natural Language Learning -Shared Task, pages 89-94. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "D-LTAG: extending lexicalized TAG to discourse", |
| "authors": [ |
| { |
| "first": "Bonnie", |
| "middle": [ |
| "L" |
| ], |
| "last": "Webber", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Cognitive Science", |
| "volume": "28", |
| "issue": "5", |
| "pages": "751--779", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bonnie L. Webber. 2004. D-LTAG: extending lex- icalized TAG to discourse. Cognitive Science, 28(5):751-779.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Learning Representations for Text-level Discourse Parsing", |
| "authors": [ |
| { |
| "first": "Gregor", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the ACL-IJCNLP 2015 Student Research Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "16--21", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gregor Weiss. 2015. Learning Representations for Text-level Discourse Parsing. In Proceedings of the ACL-IJCNLP 2015 Student Research Workshop, pages 16-21, Beijing, China, July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "The CoNLL-2015 Shared Task on Shallow Discourse Parsing", |
| "authors": [ |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Hwee Tou Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Rashmi", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "Attapol", |
| "middle": [], |
| "last": "Bryant", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rutherford", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Nineteenth Conference on Computational Natural Language Learning: Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nianwen Xue, Hwee Tou Ng, Sameer Pradhan, Rashmi Prasad, Christopher Bryant, and Attapol Rutherford. 2015. The CoNLL-2015 Shared Task on Shallow Discourse Parsing. In Proceedings of the Nine- teenth Conference on Computational Natural Lan- guage Learning: Shared Task, Beijing, China.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "The CoNLL-2016 Shared Task on Shallow Discourse Parsing", |
| "authors": [ |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Hwee Tou Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Bonnie", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Attapol", |
| "middle": [], |
| "last": "Webber", |
| "suffix": "" |
| }, |
| { |
| "first": "Chuan", |
| "middle": [], |
| "last": "Rutherford", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongmin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Twentieth Conference on Computational Natural Language Learning -Shared Task", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nianwen Xue, Hwee Tou Ng, Sameer Pradhan, Bon- nie Webber, Attapol Rutherford, Chuan Wang, and Hongmin Wang. 2016. The CoNLL-2016 Shared Task on Shallow Discourse Parsing. In Proceedings of the Twentieth Conference on Computational Nat- ural Language Learning -Shared Task, Berlin, Ger- many, August. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Shallow Convolutional Neural Network for Implicit Discourse Relation Recognition", |
| "authors": [ |
| { |
| "first": "Biao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinsong", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Deyi", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaojie", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hong", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "" |
| }, |
| { |
| "first": "Junfeng", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2230--2235", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Biao Zhang, Jinsong Su, Deyi Xiong, Yaojie Lu, Hong Duan, and Junfeng Yao. 2015. Shallow Convolu- tional Neural Network for Implicit Discourse Re- lation Recognition. In Proceedings of the 2015 Conference on Empirical Methods in Natural Lan- guage Processing, EMNLP 2015, Lisbon, Portugal, September 17-21, 2015, pages 2230-2235.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "PDTB-style Discourse Annotation of Chinese Text", |
| "authors": [ |
| { |
| "first": "Yuping", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Nianwen", |
| "middle": [], |
| "last": "Xue", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "69--77", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuping Zhou and Nianwen Xue. 2012. PDTB-style Discourse Annotation of Chinese Text. In Proceed- ings of the 50th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 69-77, Jeju Island, Korea, July. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Predicting Discourse Connectives for Implicit Discourse Relation Recognition", |
| "authors": [ |
| { |
| "first": "Zhi-Min", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng-Yu", |
| "middle": [], |
| "last": "Niu", |
| "suffix": "" |
| }, |
| { |
| "first": "Man", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Chew Lim", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 23rd International Conference on Computational Linguistics: Posters, COLING '10", |
| "volume": "", |
| "issue": "", |
| "pages": "1507--1514", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhi-Min Zhou, Yu Xu, Zheng-Yu Niu, Man Lan, Jian Su, and Chew Lim Tan. 2010. Predicting Discourse Connectives for Implicit Discourse Relation Recog- nition. In Proceedings of the 23rd International Conference on Computational Linguistics: Posters, COLING '10, pages 1507-1514, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "The feature construction process from argument spans (light blue) and neural architecture (dark blue) for implicit sense classification (incl. EntRel) . Dotted lines represent pointwise vector operations.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| } |
| } |
| } |
| } |