| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T01:08:46.856091Z" |
| }, |
| "title": "Examining the rhetorical capacities of neural language models", |
| "authors": [ |
| { |
| "first": "Zining", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Toronto", |
| "location": {} |
| }, |
| "email": "zining@cs.toronto.edu" |
| }, |
| { |
| "first": "Chuer", |
| "middle": [], |
| "last": "Pan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Toronto", |
| "location": {} |
| }, |
| "email": "chuer.pan@mail.utoronto.ca" |
| }, |
| { |
| "first": "Mohamed", |
| "middle": [], |
| "last": "Abdalla", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Toronto", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Rudzicz", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Toronto", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Recently, neural language models (LMs) have demonstrated impressive abilities in generating high-quality discourse. While many recent papers have analyzed the syntactic aspects encoded in LMs, to date, there has been no analysis of the inter-sentential, rhetorical knowledge. In this paper, we propose a method that quantitatively evaluates the rhetorical capacities of neural LMs. We examine the capacities of neural LMs understanding the rhetoric of discourse by evaluating their abilities to encode a set of linguistic features derived from Rhetorical Structure Theory (RST). Our experiments show that BERT-based LMs outperform other Transformer LMs, revealing the richer discourse knowledge in their intermediate layer representations. In addition, GPT-2 and XLNet apparently encode less rhetorical knowledge, and we suggest an explanation drawing from linguistic philosophy. Our method presents an avenue towards quantifying the rhetorical capacities of neural LMs.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Recently, neural language models (LMs) have demonstrated impressive abilities in generating high-quality discourse. While many recent papers have analyzed the syntactic aspects encoded in LMs, to date, there has been no analysis of the inter-sentential, rhetorical knowledge. In this paper, we propose a method that quantitatively evaluates the rhetorical capacities of neural LMs. We examine the capacities of neural LMs understanding the rhetoric of discourse by evaluating their abilities to encode a set of linguistic features derived from Rhetorical Structure Theory (RST). Our experiments show that BERT-based LMs outperform other Transformer LMs, revealing the richer discourse knowledge in their intermediate layer representations. In addition, GPT-2 and XLNet apparently encode less rhetorical knowledge, and we suggest an explanation drawing from linguistic philosophy. Our method presents an avenue towards quantifying the rhetorical capacities of neural LMs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In recent years, neural LMs (especially contextualized LMs) have shown profound abilities to generate texts that could be almost indistinguishable from human writings (Radford et al., 2019) . Neural LMs could be used to generate concise summaries (Song et al., 2019) , coherent stories (See et al., 2019) , and complete documents given prompts (Keskar et al., 2019) . It is natural to question their source and extent of rhetorical knowledge: What makes neural LMs articulate, and how? While some recent works query the linguistic knowledge (Hewitt and Manning, 2019; Liu et al., 2019a; Chen et al., 2019; , this open question remain unanswered. We hypothesize that contextualized neural LMs encode rhetorical knowledge in their intermediate repre-sentations, and would like to quantify the extent they encode rhetorical knowledge.", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 189, |
| "text": "(Radford et al., 2019)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 247, |
| "end": 266, |
| "text": "(Song et al., 2019)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 286, |
| "end": 304, |
| "text": "(See et al., 2019)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 344, |
| "end": 365, |
| "text": "(Keskar et al., 2019)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 541, |
| "end": 567, |
| "text": "(Hewitt and Manning, 2019;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 568, |
| "end": 586, |
| "text": "Liu et al., 2019a;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 587, |
| "end": 605, |
| "text": "Chen et al., 2019;", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To verify our hypothesis, we hand-craft a set of 24 rhetorical features including those used to examine rhetorical capacities of students (Mohsen and Alshahrani, 2019; Liu and Kunnan, 2016; Zhang, 2013; Powers et al., 2001) , and evaluate how well neural LMs encode these rhetorical features in the representations while encoding texts.", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 167, |
| "text": "(Mohsen and Alshahrani, 2019;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 168, |
| "end": 189, |
| "text": "Liu and Kunnan, 2016;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 190, |
| "end": 202, |
| "text": "Zhang, 2013;", |
| "ref_id": "BIBREF59" |
| }, |
| { |
| "start": 203, |
| "end": 223, |
| "text": "Powers et al., 2001)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recent work has started to evaluate encoded features from hidden representations. Among them, probing (Alain and Bengio, 2017; Adi et al., 2017) has been a popular choice. Previous work probed morphological Bisazza and Tump, 2018) , agreement (Giulianelli et al., 2018) , and syntactic features (Hewitt and Manning, 2019; Hewitt and Liang, 2019) . Probing involves optimizing a simple projection model from representations to features. The loss of this optimization measures the difficulty to decode features from the representations.", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 126, |
| "text": "(Alain and Bengio, 2017;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 127, |
| "end": 144, |
| "text": "Adi et al., 2017)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 207, |
| "end": 230, |
| "text": "Bisazza and Tump, 2018)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 243, |
| "end": 269, |
| "text": "(Giulianelli et al., 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 295, |
| "end": 321, |
| "text": "(Hewitt and Manning, 2019;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 322, |
| "end": 345, |
| "text": "Hewitt and Liang, 2019)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we use a probe containing self attention mechanism. We first project the variablelength embeddings to a fixed-length latent representation per document. Then, we apply a simple diagnostic classifier to detect rhetorical features from this latent representation. This design of probe reduces the total number of parameters, and enable us to better understand each model's ability to encode rhetorical knowledge. We find that:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 The BERT-based LMs encode more rhetorical features, and in a more stable manner, than other models. \u2022 The semantics of non-contextualized embeddings also pertain to some rhetorical features, but less than most layers of contextualized language models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "These observations allow us to investigate the mechanisms of neural LMs to better understand the degree to which they encode linguistic knowledge. We demonstrate how discourse-level features can be queried and analyzed from neural LMs. All of our code and parsed tree data will be available at github.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Various frameworks exist for \"good discourse\" (Lawrence and Reed, 2019; Irish and Weiss, 2009; Toulmin, 1958) , but most of them are inaccessible to quantitative analysis. In this work, we use Rhetorical Structure Theory (Mann and Thompson, 1988; Mann et al., 1989) since it represents the structures of discourse using trees, allowing straightforward quantitative analysis. There are two components in an RST parse-tree:", |
| "cite_spans": [ |
| { |
| "start": 46, |
| "end": 71, |
| "text": "(Lawrence and Reed, 2019;", |
| "ref_id": null |
| }, |
| { |
| "start": 72, |
| "end": 94, |
| "text": "Irish and Weiss, 2009;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 95, |
| "end": 109, |
| "text": "Toulmin, 1958)", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 221, |
| "end": 246, |
| "text": "(Mann and Thompson, 1988;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 247, |
| "end": 265, |
| "text": "Mann et al., 1989)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structural analysis of discourse", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2022 Each leaf node represents an elementary discourse unit (EDU). The role of an EDU in an article is similar to that of a word in a sentence. \u2022 Each non-leaf node denotes a relation involving its two children. Often, one of the children is more dependent on the other, and less essential to the writer's purpose. This child is referred to as \"satellite\", while the more central child is the \"nucleus\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structural analysis of discourse", |
| "sec_num": "2" |
| }, |
| { |
| "text": "I didn't know", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SN-Attribution", |
| "sec_num": null |
| }, |
| { |
| "text": "this is from C but it is very good! Figure 1 : A portion of an RST tree, selected from IMDB (Maas et al., 2011) train/pos/1 7.txt, and parsed with Feng and Hirst (2014). Nodes with rectangle borders are discourse relations, and those without borders are individual EDUs. The \"N\" and \"S\" prefix for discourse relations stand for \"nucleus\" and \"satellite\" respectively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 36, |
| "end": 44, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "SN-Attribution", |
| "sec_num": null |
| }, |
| { |
| "text": "Tree representations are clear, easy to understand, and allow us to compute features to numerically depict the rhetorical aspects of documents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SN-Attribution", |
| "sec_num": null |
| }, |
| { |
| "text": "Previous work used RST features to analyze the quality of discourse, to assess writing abilities (Wang et al., 2019; Zhang, 2013) , examine linguistic coherence Abdalla et al., 2017) , and to analyze arguments (Chakrabarty et al., 2019) . In this project, we extract similar RST features in the following three categories:", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 116, |
| "text": "(Wang et al., 2019;", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 117, |
| "end": 129, |
| "text": "Zhang, 2013)", |
| "ref_id": "BIBREF59" |
| }, |
| { |
| "start": 161, |
| "end": 182, |
| "text": "Abdalla et al., 2017)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 210, |
| "end": 236, |
| "text": "(Chakrabarty et al., 2019)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rhetorical features", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Discourse relation occurrences (Sig) We include the number of relations detected in each document. There are 18 relations in this category 1 . Unfortunately, the relations adopted by open-source RST parsers are not unified. To allow for comparison against other parsers, we do not differentiate subtle differences between relations, therefore grouping very similar relations, following the approach in (Feng and Hirst, 2012) . (E.g., we consider both Topic-Shift and Topic-Drift to be a Topic-Change). Specifically, this approach does not differentiate between the sequence of nucleus and satellite (e.g., NS-Evaluation and SN-Evaluation are both considered as an Evaluation).", |
| "cite_spans": [ |
| { |
| "start": 402, |
| "end": 424, |
| "text": "(Feng and Hirst, 2012)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rhetorical features", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Tree property features (Tree) We compute the depth and the Yngve depth (the number of rightbranching in the tree) (Yngve, 1960) of each tree node, and include their mean and variance as characteristic features, following previous work extracting tree linguistic features (Li et al., 2019; Zhu et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 271, |
| "end": 288, |
| "text": "(Li et al., 2019;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 289, |
| "end": 306, |
| "text": "Zhu et al., 2019)", |
| "ref_id": "BIBREF60" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Rhetorical features", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We include the mean and variance of EDU lengths of each document. We hypothesize the longer EDUs indicate higher levels of redundancy in discourse, hence extracting rhetorical features require memory across longer spans.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EDU related features (EDU)", |
| "sec_num": null |
| }, |
| { |
| "text": "Overall, there are 24 features from three categories. We normalize them to zero mean and unit variance, and take these RST features for probing. The features are not independent of each other. Specifically, the features of each group tend to describe the same property from different aspects. 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EDU related features (EDU)", |
| "sec_num": null |
| }, |
| { |
| "text": "1 The 18 relations are: Attribution, Background, Cause, Comparison, Condition, Contrast, Elaboration, Enablement, Evaluation, Explanation, Joint, Manner-Means, Topic-Comment, Summary, Temporal, Topic-Change, Textualorganization, and Same-unit.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EDU related features (EDU)", |
| "sec_num": null |
| }, |
| { |
| "text": "2 For example, Sig features describe the composition of the document in a histogram. For the same document, if a relation is changed, e.g., from Contrast to Attribution, then the occurrence of both Contrast and Attribution are affected. Figure 2 : RST relation occurrences per document. RST-DT contain longer documents than IMDB on average. However, the distributions of frequencies between these two datasets are relatively consistent, with Elaboration, Joint, and Attribution the most frequent signals.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 237, |
| "end": 245, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "EDU related features (EDU)", |
| "sec_num": null |
| }, |
| { |
| "text": "Our probing method contains two weight parameters, W d and W p . First, we embed a document with L tokens using a neural LM with D dimensions to get a raw representation matrix X \u2208 R L\u00d7D . We use a projection matrix W d \u2208 R D\u00d7d to reduce the embedding dimension from D (e.g., D = 768 for BERT and 2048 for XLM) to a much smaller one, d. Then, we use self attention similar to Lin et al. (2017) to collect the information spread across the document to a condensed form:", |
| "cite_spans": [ |
| { |
| "start": 376, |
| "end": 393, |
| "text": "Lin et al. (2017)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probe", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "A = (XW d ) T (XW d ) \u2208 R d\u00d7d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probe", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We flatten A into a vector with fixed size:\u00c3 = (d 2 , 1). We use a probing matrix W p \u2208 R d 2 \u00d7m to extract RST features v \u2208 R m from attention, normalize them to zero mean and unit variance, and optimize based on the expected L 2 error:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probe", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "min W d ,Wp E||W T p\u00c3 \u2212 v|| 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probe", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Note that the reduction from D to d using W p is necessary, because it significantly lowers the number of parameters of the probing model. If there were no W d (i.e., d = 768), then W p alone would require 768 2 m parameters to probe m features. Now, we let d = 10, then W d and W p combined have D \u00d7 d + d 2 m \u2248 7680 + 100m parameters. Considering m \u2208 O(10 1 ), the total parameter size is reduced from O(10 6 ) to O(10 3 ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probe", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "There is one more step before we can use this loss to measure the difficulty of probing rhetorical features. L 2 error scales linearly with the dimension of features m, so it is necessary to normalize the L 2 error by m, to ensure that the losses can be compared across linguistic feature sets. The difficulty of probing a group of m features v \u2208 R m therefore is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probe", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Difficulty = 1 m E W T p\u00c3 \u2212 v 2 3 Experiments 3.1 Data", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probe", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Most state-of-the-art rhetorical parsers are trained on either Penn Discourse Treebank (Ji and Eisenstein, 2014; Feng and Hirst, 2012) or RST-DT (Feng and Hirst, 2014; Joty et al., 2015; Surdeanu et al., 2015; Heilman and Sagae, 2015; Li et al., 2016; Wang et al., 2017; Yu et al., 2018) . Although the documents contain accurate discourse annotations, RST-DT (Carlson et al., 2001) only has 385 documents. The Penn Discourse Treebank (Prasad et al., 2008) has 2,159 documents but their annotations do not follow the RST framework. So in addition to RST-DT, we extend the analysis to a 100 times larger dataset, IMDB (Maas et al., 2011). IMDB contains 50,000 movie reviews without discourse annotations. In these reviews, the authors explain and elaborate upon their opinions towards certain movies and give ratings. We removed html tags, and attempt to parse all of them (i.e., both train and test data) using a two-pass parser from Feng and Hirst (2014). We discarded 1,977 documents that the RST parser generate ill-formatted trees 3 . Of the remaining documents, we additionally filtered out those with sequence lengths greater than 512 tokens 4 , resulting in 40,833 documents. RST features combined, while each dash-dotted line denotes one component (EDU, Sig, or Tree feature group for red, green, and blue respectively). In general, BERT-based LMs (BERT, BERT-multi, RoBERTa) encode rhetorical features in a more stable and easy-to-probe manner than the rest.", |
| "cite_spans": [ |
| { |
| "start": 87, |
| "end": 112, |
| "text": "(Ji and Eisenstein, 2014;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 113, |
| "end": 134, |
| "text": "Feng and Hirst, 2012)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 138, |
| "end": 167, |
| "text": "RST-DT (Feng and Hirst, 2014;", |
| "ref_id": null |
| }, |
| { |
| "start": 168, |
| "end": 186, |
| "text": "Joty et al., 2015;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 187, |
| "end": 209, |
| "text": "Surdeanu et al., 2015;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 210, |
| "end": 234, |
| "text": "Heilman and Sagae, 2015;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 235, |
| "end": 251, |
| "text": "Li et al., 2016;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 252, |
| "end": 270, |
| "text": "Wang et al., 2017;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 271, |
| "end": 287, |
| "text": "Yu et al., 2018)", |
| "ref_id": "BIBREF58" |
| }, |
| { |
| "start": 360, |
| "end": 382, |
| "text": "(Carlson et al., 2001)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 435, |
| "end": 456, |
| "text": "(Prasad et al., 2008)", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Probe", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "After parsing each document into an \"RST-tree\", we extracted the features mentioned in Section 2.1 from these parsed trees. Figure 2 shows the occurrence of the 18 RST relations per document, and Table 1 shows the statistics of remaining 6 features. In addition, we include several examples of parsed RST trees in Appendix.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 124, |
| "end": 132, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 196, |
| "end": 203, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Probe", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Mean \u00b1 stdev tree depth mean 3.9\u00b11.4 tree depth var 4.6\u00b14.2 tree Yngve mean 9.2\u00b18.8 tree Yngve var 100.6\u00b1164.6 edu len mean 8.6\u00b11.4 edu len var 21.8\u00b116.0 Table 1 : Statistics of the 6 non-occurrence-based RST features. The prefix \"tree \" here refers to the parsed \"RST-tree\".", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 154, |
| "end": 161, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature name", |
| "sec_num": null |
| }, |
| { |
| "text": "language models come with their own tokenizers. Note that RoBERTa adds two special tokens, so this threshold becomes 510 for RoBERTa.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature name", |
| "sec_num": null |
| }, |
| { |
| "text": "We considered the following popular neural LMs:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2022 BERT BASE (Devlin et al., 2019) This LM with 110M parameters is built with 12-layer Transformer encoder (Vaswani et al., 2017) with 768 hidden dimensions. It is trained with masked LM (i.e., cloze) and next sentence prediction objectives using 16GB text. \u2022 BERT-multi (Wolf et al., 2019) Same as BERT, BERT-multi is also a 12-layer Transformer encoder with 768 hidden dimensions and 110M parameters. Its difference from BERT is that, BERT-multi is trained on top 104 languages with the largest Wikipedia. \u2022 RoBERTa (Liu et al., 2019b) is an enhanced version of BERT with the same architecture, similar masked LM objectives, and 10 times larger training corpus (over 160GB). \u2022 GPT-2 (Radford et al., 2019 ) is a 12-layer Transformer decoder with 768 hidden dimensions. There are 117M parameters in total. GPT-2 is pretrained on 40GB of text. Unlike BERT, GPT-2 is a uni-directional LM. Transformer-XL with two streams of self attention and 768 hidden dimensions and 110M parameters. The XLNet we use is trained on 33GB texts using the \"permutation language modeling\" objective, with its LM factorization according to shuffled orders, but its positional encoding correspond to the original sequence order. The permutation LM objective introduces diversity and randomness to the context.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 33, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 106, |
| "end": 128, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 270, |
| "end": 289, |
| "text": "(Wolf et al., 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 517, |
| "end": 536, |
| "text": "(Liu et al., 2019b)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 684, |
| "end": 705, |
| "text": "(Radford et al., 2019", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To make comparisons between models fair, we limit to 12-layer neural LMs. The models are pretrained by Huggingface (Wolf et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 115, |
| "end": 134, |
| "text": "(Wolf et al., 2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language models", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We formulated probing as an optimization problem, and implemented our solution with PyTorch (Paszke et al., 2019) and the Adam optimizer (Kingma and Ba, 2014) for 40 epochs. If the training loss stalls (i.e., does not change by \u2265 10 \u22123 ), or if the training loss rises by more than 10% from the previous epoch, we stop the optimization. All optimizations follow the same learning rate tuning schemas.", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 113, |
| "text": "(Paszke et al., 2019)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In our experiments, the representation dimension d is taken to be 10, while the LM dimensions D is 2048 for XLM and 768 for the rest.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "From Figure 3 , neural LMs encode RST features in different manners, depending on their structures. In general, for BERT-based models, features seem to distribute evenly across layers. On GPT-2 and XLNet, lower layers seem to encode slightly more EDU and Sig features than higher levels, whereas Tree features seem to be more concentrated in layers 2-6. The results on XLM are relatively noisy, possibly because the uni-language version does not benefit from the performance boost of crosslanguage modeling.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 5, |
| "end": 13, |
| "text": "Figure 3", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Where do LMs encode RST features?", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Contrasting with previous work that suggested that middle layers most contain syntactic features (Hewitt and Manning, 2019; Jawahar et al., 2019) , our results indicate a less definitive localization for discourse features, except for the first and final layers. We suggest that the reason they encode less discourse information is that the first layer focuses on connections between \"locations\", while the final layer focuses on extracting representations most relevant to the final task.", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 123, |
| "text": "(Hewitt and Manning, 2019;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 124, |
| "end": 145, |
| "text": "Jawahar et al., 2019)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Where do LMs encode RST features?", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Are RST features equally hard to probe? Figure 3 also shows the difficulty in probing features across feature sets. In BERT-based models, EDU and Tree features are comparably easier to probe, whereas the Sig feature groups is more challenging. However, GPT-2, XLNet, and XLM do not regard EDU or Tree features easier to probe than other groups. Nevertheless, the results on all features correlate more to the Sig features.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 40, |
| "end": 46, |
| "text": "Figure", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Where do LMs encode RST features?", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "How about averaging layers? For comparison, we also used the mean of all 12 layers for each neural LM. Figure 5 shows the probing results. Except GPT-2, other LMs show similar performances when the representations of layers are averaged. In addition, the performances show that Sig features are harder to probe than Tree and EDU features, whereas the aggregation task (using all features) appears harder than each of its three component feature groups.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 103, |
| "end": 111, |
| "text": "Figure 5", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Where do LMs encode RST features?", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We perform ablation studies to illustrate the effectiveness of probing, deconstructing the language model probe step-by-step. First, we get rid of the contextualization component in language modeling by using non-contextualized word embeddings, GloVe and FastText. Then, we discard the semantic component of word embedding by mapping tokens to randomly generated vectors (RandEmbed). Finally, we remove all information pertaining to the text, leading to a random predictor for RST features, RandGuess.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deconstructing the probe", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Non-contextualized word embeddings We consider two popular word embeddings here:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deconstructing the probe", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 GloVe (Pennington et al., 2014) contains 2.2M vocabulary items and produces 300dimensional word vectors. The GloVe embedding we use is pretrained on Common Crawl.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deconstructing the probe", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 FastText (Bojanowski et al., 2017) is trained on Wikipedia 2017 + UMBC (16B tokens) including subword information, and produces 300-dimensional word vectors.", |
| "cite_spans": [ |
| { |
| "start": 11, |
| "end": 36, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deconstructing the probe", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Word embeddings map each token into a Ddimensional semantic space. Therefore, for a document of length L, the embedded matrix also has shape L \u00d7 D. The difference from the contextualized neural LMs is that, the D-dimensional vectors of every word do not depend on their contexts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deconstructing the probe", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Random embeddings In this step, we assign a non-trainable random embedding vector per token in the vocabulary. This removes the semantic information encoded by GloVe and FastText word embeddings. As shown in Figure 4 , 5: RandEmbed is worse than GloVe and FastText (except for GloVe in Sig features task). This verifies some semantic information is preserved in word embeddings.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 208, |
| "end": 216, |
| "text": "Figure 4", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Deconstructing the probe", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Contextualized LMs against baseline First, the lack of context restrict the probing performance of non-contextualized baselines. They are worse than most layers in contextualized LMs (in Figure 4) , and are worse than all except GPT-2 if we average the layers (in Figure 5) .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 187, |
| "end": 196, |
| "text": "Figure 4)", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 264, |
| "end": 273, |
| "text": "Figure 5)", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Deconstructing the probe", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Second, it is impossible for any LM to have a \"negative\" rhetorical capacity. If the probing loss is worse than RandEmbed baseline, that means the RST probe can not detect rhetorical features of the given category encoded in the representations. This is what happens in some layers of GPT-2, XLM, and XLNet, and the mean of all layers of GPT-2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Deconstructing the probe", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Random guesser To measure the capacity of baseline embeddings, we set up a random guesser as a \"baseline-of-baseline\". The random guesser outputs the arithmetic mean of RST features plus a small Gaussian noise (with s.d. \u03c3 \u2208 {0, 0.01, 0.1, 1.0}) The output of RandGuess is completely independent of the discourse. As shown in Table 2 , the best of the four random guessers is much worse than any of the three word embedding baselines, which is expected.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 326, |
| "end": 333, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Deconstructing the probe", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "From probing experiments (Figure 3, 4, and 5) we can see that BERT-based LMs have slightly better rhetorical capacities than XLNet, and much better capacities than GPT-2. We present two hypotheses as following.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 25, |
| "end": 45, |
| "text": "(Figure 3, 4, and 5)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Why are some LMs better?", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "RST Rhetorics favor contexts from both directions BERT-based LMs use Transformer encoders, whereas GPT-2 use Transformer decoders. Their main difference is that a Transformer encoder considers contexts from both \"past\" and \"future\", while a Transformer decoder only conditions on the context from the \"past\" (Vaswani et al., 2017) . GPT-2 attends to uni-directional contexts. Apparently both the \"past\" and \"future\" context would contribute to the rhetorical features of words. Without \"future\" contexts, GPT-2 would encode less rhetorical information.", |
| "cite_spans": [ |
| { |
| "start": 308, |
| "end": 330, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF50" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Config", |
| "sec_num": null |
| }, |
| { |
| "text": "Feature Set", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Config", |
| "sec_num": null |
| }, |
| { |
| "text": "Random permutation makes encoded rhetorics harder to decode The difference between XL-Net and other LMs is the permutation in context. While permutation increases the diversity in discourse, they could also bring in new meaning to the texts. For example, the sentence in Figure 1 (\"I didn't know this is from C, but it is very good!\") has several syntactically plausable factorization sequences: .. Apparently such diversity in contexts makes the upper layers of XLNet contain harder-to-decode rhetorical features. If we average the representations of all layers, XLNet has larger variance than BERT-based LMs. We hypothesize that larger layer-wise difference is a factor of such instability for averaged representations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 271, |
| "end": 279, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Config", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 I didn't know C ... \u2022 ...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Config", |
| "sec_num": null |
| }, |
| { |
| "text": "RST probing is not perfect. While we designed our comparisons to be rigorous, there are still several limitations to the RST probe, described below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Limitations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "\u2022 RST signals are noisy. The RST relation classification task is less defined than established tasks like POS tagging. Humans tend to disagree with the annotators, resulting in a merely 65.8% accuracy in relation classification (i.e., the task introduced by Marcu (2000)). Regardless, state-of-the-art discourse parsers currently have performances slightly higher than 60% Ji and Eisenstein, 2014; Wang et al., 2017) . \u2022 Train / test corpus discrepancy of RST parsers.", |
| "cite_spans": [ |
| { |
| "start": 373, |
| "end": 397, |
| "text": "Ji and Eisenstein, 2014;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 398, |
| "end": 416, |
| "text": "Wang et al., 2017)", |
| "ref_id": "BIBREF53" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Limitations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Most available RST parsers are trained on RST-DT consisting of Wall Street Journal articles. The results of parsers are affected by the corpus. As shown in some examples in Appendix, the IMDB movie review dataset contains less formal languages, introducing noise in segmentations and relation signals.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Limitations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "To counteract noise of this type, we recommend evaluating LMs using a corpus similar to the scenario of applying the LM. \u2022 Only 12-layer LMs are involved, to compare across various layers fairly. But our approach would be applicable to 3-layer ELMo and deeper LMs as well. Appropriate statistical controls would naturally need to be applied. \u2022 Not all documents can be analyzed. First, documents longer than 512 tokens cannot be encoded into one vector in our probing model. Second, while RST provides elegant frameworks for analyzing rhetorical structures of discourse, in practice, the RST pipeline does not guarantee a successful analysis for an arbitrary document scraped online.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Limitations", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Recent work has considered the interpretability of contextualized representations. For example, Jain and Wallace (2019) found attention to be uncorrelated to gradient-based feature importance, while Wiegreffe and Pinter (2019) suggested such approaches allowed too much flexibility to give convincing results. Similarly, Serrano et al. (2019) considered attention representations to be noisy indicators of feature importance. Many tasks in argument mining, similar to our task of examining neural LMs, require understanding the rhetorical aspects of discourse (Lawrence and Reed, 2019) . This allows RST to be applied in relevant work. For example, RST enables understanding and analyzing argument struc-tures of monologues (Peldszus and Stede, 2016) and, when used with other discourse features, RST can improve role-labelling in online arguments (Chakrabarty et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 199, |
| "end": 226, |
| "text": "Wiegreffe and Pinter (2019)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 321, |
| "end": 342, |
| "text": "Serrano et al. (2019)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 560, |
| "end": 585, |
| "text": "(Lawrence and Reed, 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 724, |
| "end": 750, |
| "text": "(Peldszus and Stede, 2016)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 848, |
| "end": 874, |
| "text": "(Chakrabarty et al., 2019)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Probing neural LMs is an emergent diagnostic task on those models. Previous work probed morphological (Bisazza and Tump, 2018) , agreement (Giulianelli et al., 2018) , and syntactic features (Hewitt and Manning, 2019). Hewitt and Liang (2019) compared different probes, and recommended linear probes with as few parameters as possible, for the purpose of reducing overfitting. Recently, Pimentel et al. (2020) argued against this choice from an information-theoretic point of view. Voita and Titov (2020) presents an optimization goal for probes based on minimum description length. Liu et al. (2019a) proposed 16 diverse probing tasks on top of contextualized LMs including token labeling (e.g., PoS), segmentation (e.g., NER, grammatical error detection) and pairwise relations. While LMs augmented with a probing layer could reach state-of-the-art performance on many tasks, they found that LMs still lacked fine-grained linguistic knowledge. DiscoEval (Chen et al., 2019) showed that BERT outperformed traditional pretrained sentence encoders in encoding discourse coherence features, which our results echo.", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 126, |
| "text": "(Bisazza and Tump, 2018)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 139, |
| "end": 165, |
| "text": "(Giulianelli et al., 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 219, |
| "end": 242, |
| "text": "Hewitt and Liang (2019)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 387, |
| "end": 409, |
| "text": "Pimentel et al. (2020)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 482, |
| "end": 504, |
| "text": "Voita and Titov (2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 583, |
| "end": 601, |
| "text": "Liu et al. (2019a)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 956, |
| "end": 975, |
| "text": "(Chen et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this paper, we propose a method to quantitatively analyze the amount of rhetorical information encoded in neural language models. We compute features based on Rhetorical Structure Theory (RST) and probe the RST features from contextualized representations of neural LMs. Among six popular neural LMs, we find that contextualization helps to generally improve the rhetorical capacities of LMs, while individual models may vary in quality. In general, LMs attending to contexts from both directions (BERT-based) encode rhetorical knowledge in a more stable manner than those using unidirectional contexts (GPT-2) or permuted contexts (XLNet).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Our method presents an avenue towards quantitatively describing rhetorical capacities of neural language models based on unlabeled, target-domain corpus. This method may be used for selecting suitable LMs in tasks including rhetorical acts classifications, discourse modeling, and response generation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "As a sanity check, we include experiments on RST-DT (Carlson et al., 2001 ) corpus with the same preprocessing and feature extraction procedures (i.e., perform feature extraction and embedding on the article level, and ignoring the overlength articles). As shown in Figure 6 , BERT-family and XLM outperform GPT-2 and XLNet. Also, the noncontextualized embedding baselines show worse performances than contextualized embeddings in general, with some exceptions (e.g., GPT-2 on EDU features). These are similar to the IMDB results.", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 73, |
| "text": "(Carlson et al., 2001", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 266, |
| "end": 274, |
| "text": "Figure 6", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Experiments on RST-DT", |
| "sec_num": null |
| }, |
| { |
| "text": "What are different is that the probing losses of RST-DT are lower than the IMDB experiments in general. We consider two possible explanations. First, the IMDB signals contain more noise, so that probing rhetorical features from IMDB would be naturally more difficult than probing from the RST-DT dataset. Second, it is possible that the probes overfit the much smaller RST-DT dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Experiments on RST-DT", |
| "sec_num": null |
| }, |
| { |
| "text": "We include several examples of IMDB parse trees in Appendix here, including some examples where the RST parser makes mistakes on a new domain, movie review. For clarity of illustration, these examples are among the shorter movie reviews. More parse trees can be generated by our visualization code, which is contained in our submitted scripts. myself rated a 10/10 I would highly recommend people to watch this movie . <P> Figure 8 : IMDB train/pos/11857 10.txt. There is an EDU segmentation error: the \"I\" is incorrectly assigned to the previous sentence \"This movie overall was a really good movie\". Apparently some lexical cues the EDU segmentator relies on (e.g., sentence finishes with a period sign) is not always followed in IMDB. Some of the transitional scenes were filmed in interesting ways such as time lapse photography , unusual colors , or interesting angles . <s> Also the film is funny is several parts . <s> I also liked how the evil guy was portrayed too . <s> Figure 9 : IMDB train/pos/1000 8.txt. The parser captures the key sentence of this review. All sentences following the first one act as reasons to explain how the reviewer liked the film. it is where Jim is messing up the anchor man 's voice . <s> Figure 10 : IMDB train/pos/10301 8.txt. The interjection, \"well\", is incorrectly identified as the satellite of the summary signal. This is likely caused by the discrepancy between the train (RST-DT) and test (IMDB) corpus discrepancy for the RST parser. The RST-DT dataset contains news articles, which are more formal than the online review in IMDB. The term \"well\" is therefore more likely to be identified as other senses. Figure 11 : IMDB train/pos/11825 8.txt. One might suggest that the last EDU could be moved one level higher (so that it summarizes the whole review), but this parsing is also reasonable, since the mention of kids elaborates the descriptions of the makeup and the views.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 423, |
| "end": 431, |
| "text": "Figure 8", |
| "ref_id": null |
| }, |
| { |
| "start": 980, |
| "end": 988, |
| "text": "Figure 9", |
| "ref_id": null |
| }, |
| { |
| "start": 1228, |
| "end": 1237, |
| "text": "Figure 10", |
| "ref_id": null |
| }, |
| { |
| "start": 1655, |
| "end": 1664, |
| "text": "Figure 11", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "B Examples of parse trees", |
| "sec_num": null |
| }, |
| { |
| "text": "As determined by nltk.tree.4 As determined by any one of the tokenizers, since these", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Figure 13: IMDB train/pos/11686 10.txt.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank the anonymous reviewers for feedback. Rudzicz is supported by a CIFAR Chair in artificial intelligence. Abdalla is supported by a Vanier scholarship.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgement", |
| "sec_num": null |
| }, |
| { |
| "text": " Figure 12 : IMDB train/pos/10788 10.txt. This is an example of the EDU segmentation contains mistake. The \"i wish i\" should be merged with the subsequent EDU, \"could live in big rock candy mountain\". Note that the sentence starts with two lowercase \"i\" (which should be uppercase). The non-standard usages like these are unique for less formal texts like IMDB. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1, |
| "end": 10, |
| "text": "Figure 12", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Rhetorical structure and Alzheimer's disease", |
| "authors": [ |
| { |
| "first": "Mohamed", |
| "middle": [], |
| "last": "Abdalla", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Rudzicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Graeme", |
| "middle": [], |
| "last": "Hirst", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Aphasiology", |
| "volume": "32", |
| "issue": "1", |
| "pages": "41--60", |
| "other_ids": { |
| "DOI": [ |
| "10.1080/02687038.2017.1355439" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohamed Abdalla, Frank Rudzicz, and Graeme Hirst. 2017. Rhetorical structure and Alzheimer's disease. Aphasiology, 32(1):41-60.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Fine-grained analysis of sentence embeddings using auxiliary prediction tasks", |
| "authors": [ |
| { |
| "first": "Yossi", |
| "middle": [], |
| "last": "Adi", |
| "suffix": "" |
| }, |
| { |
| "first": "Einat", |
| "middle": [], |
| "last": "Kermany", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ofer", |
| "middle": [], |
| "last": "Lavi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "In ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yossi Adi, Einat Kermany, Yonatan Belinkov, Ofer Lavi, and Yoav Goldberg. 2017. Fine-grained anal- ysis of sentence embeddings using auxiliary predic- tion tasks. In ICLR, Toulon, France.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Understanding intermediate layers using linear classifier probes", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Alain", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Alain and Yoshua Bengio. 2017. Under- standing intermediate layers using linear classifier probes. In ICLR, Toulon, France.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "What do Neural Machine Translation Models Learn about Morphology", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Fahim", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "861--872", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1080" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Belinkov, Nadir Durrani, Fahim Dalvi, Hassan Sajjad, and James Glass. 2017. What do Neural Ma- chine Translation Models Learn about Morphology? In ACL, pages 861-872, Vancouver, Canada. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "The lazy encoder: A fine-grained analysis of the role of morphology in neural machine translation", |
| "authors": [ |
| { |
| "first": "Arianna", |
| "middle": [], |
| "last": "Bisazza", |
| "suffix": "" |
| }, |
| { |
| "first": "Clara", |
| "middle": [], |
| "last": "Tump", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2871--2876", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1313" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arianna Bisazza and Clara Tump. 2018. The lazy en- coder: A fine-grained analysis of the role of mor- phology in neural machine translation. In EMNLP, pages 2871-2876, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "TACL", |
| "volume": "5", |
| "issue": "", |
| "pages": "135--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. TACL, 5:135-146.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Building a Discourse-Tagged Corpus in the Framework of Rhetorical Structure Theory", |
| "authors": [ |
| { |
| "first": "Lynn", |
| "middle": [], |
| "last": "Carlson", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [ |
| "Ellen" |
| ], |
| "last": "Okurowski", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "SIGDIAL Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lynn Carlson, Daniel Marcu, and Mary Ellen Okurowski. 2001. Building a Discourse-Tagged Corpus in the Framework of Rhetorical Structure Theory. In SIGDIAL Workshop.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "AMPERSAND: Argument Mining for PER-SuAsive oNline Discussions", |
| "authors": [ |
| { |
| "first": "Tuhin", |
| "middle": [], |
| "last": "Chakrabarty", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Hidey", |
| "suffix": "" |
| }, |
| { |
| "first": "Smaranda", |
| "middle": [], |
| "last": "Muresan", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathy", |
| "middle": [], |
| "last": "Mckeown", |
| "suffix": "" |
| }, |
| { |
| "first": "Alyssa", |
| "middle": [], |
| "last": "Hwang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2933--2943", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1291" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tuhin Chakrabarty, Christopher Hidey, Smaranda Muresan, Kathy McKeown, and Alyssa Hwang. 2019. AMPERSAND: Argument Mining for PER- SuAsive oNline Discussions. In EMNLP, pages 2933-2943, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Evaluation Benchmarks and Learning Criteria for Discourse-Aware Sentence Representations", |
| "authors": [ |
| { |
| "first": "Mingda", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zewei", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "649--662", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1060" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mingda Chen, Zewei Chu, and Kevin Gimpel. 2019. Evaluation Benchmarks and Learning Criteria for Discourse-Aware Sentence Representations. In EMNLP, pages 649-662, Hond Kong, China. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Transformer-XL: Attentive Language Models beyond a Fixed-Length Context", |
| "authors": [ |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "2978--2988", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1285" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Car- bonell, Quoc Le, and Ruslan Salakhutdinov. 2019. Transformer-XL: Attentive Language Models be- yond a Fixed-Length Context. In ACL, pages 2978- 2988, Florence, Italy. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "NAACL. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Un- derstanding. In NAACL. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Text-level Discourse Parsing with Rich Linguistic Features", |
| "authors": [ |
| { |
| "first": "Vanessa", |
| "middle": [], |
| "last": "Wei Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Graeme", |
| "middle": [], |
| "last": "Hirst", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "60--68", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vanessa Wei Feng and Graeme Hirst. 2012. Text-level Discourse Parsing with Rich Linguistic Features. In ACL, pages 60-68, Jeju Island, Korea. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A Linear-Time Bottom-Up Discourse Parser with Constraints and Post-Editing", |
| "authors": [ |
| { |
| "first": "Vanessa", |
| "middle": [], |
| "last": "Wei Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Graeme", |
| "middle": [], |
| "last": "Hirst", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "511--521", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/P14-1048" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vanessa Wei Feng and Graeme Hirst. 2014. A Linear- Time Bottom-Up Discourse Parser with Constraints and Post-Editing. In ACL, pages 511-521, Balti- more, Maryland. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The Impact of Deep Hierarchical Discourse Structures in the Evaluation of Text Coherence", |
| "authors": [ |
| { |
| "first": "Vanessa", |
| "middle": [], |
| "last": "Wei Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Ziheng", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Graeme", |
| "middle": [], |
| "last": "Hirst", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "940--949", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vanessa Wei Feng, Ziheng Lin, and Graeme Hirst. 2014. The Impact of Deep Hierarchical Discourse Structures in the Evaluation of Text Coherence. In COLING, pages 940-949, Dublin, Ireland. Dublin City University and Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Under the hood: Using diagnostic classifiers to investigate and improve how language models track agreement information", |
| "authors": [ |
| { |
| "first": "Mario", |
| "middle": [], |
| "last": "Giulianelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Jack", |
| "middle": [], |
| "last": "Harding", |
| "suffix": "" |
| }, |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Mohnert", |
| "suffix": "" |
| }, |
| { |
| "first": "Dieuwke", |
| "middle": [], |
| "last": "Hupkes", |
| "suffix": "" |
| }, |
| { |
| "first": "Willem", |
| "middle": [], |
| "last": "Zuidema", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "EMNLP BlackBoxNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "240--248", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-5426" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mario Giulianelli, Jack Harding, Florian Mohnert, Dieuwke Hupkes, and Willem Zuidema. 2018. Un- der the hood: Using diagnostic classifiers to investi- gate and improve how language models track agree- ment information. In EMNLP BlackBoxNLP, pages 240-248, Brussels, Belgium. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Fast rhetorical structure theory discourse parsing", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Heilman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenji", |
| "middle": [], |
| "last": "Sagae", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Heilman and Kenji Sagae. 2015. Fast rhetor- ical structure theory discourse parsing. arXiv preprint 1505.02425.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Designing and interpreting probes with control tasks", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Hewitt", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2733--2743", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1275" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Hewitt and Percy Liang. 2019. Designing and interpreting probes with control tasks. In EMNLP, pages 2733-2743, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "A Structural Probe for Finding Syntax in Word Representations", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Hewitt", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Minneapolis, Minnesota. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4129--4138", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1419" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Hewitt and Christopher D Manning. 2019. A Structural Probe for Finding Syntax in Word Repre- sentations. In NAACL, pages 4129-4138, Minneapo- lis, Minnesota. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Engineering communication: from principles to practice", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Irish", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "Eliot" |
| ], |
| "last": "Weiss", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Irish and Peter Eliot Weiss. 2009. Engineering communication: from principles to practice. Oxford University Press Canada.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Attention is not Explanation", |
| "authors": [ |
| { |
| "first": "Sarthak", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "Byron", |
| "middle": [ |
| "C" |
| ], |
| "last": "Wallace", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "3543--3556", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1357" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sarthak Jain and Byron C. Wallace. 2019. Attention is not Explanation. In NAACL, pages 3543-3556, Min- neapolis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "What Does BERT Learn about the Structure of Language? In ACL", |
| "authors": [ |
| { |
| "first": "Ganesh", |
| "middle": [], |
| "last": "Jawahar", |
| "suffix": "" |
| }, |
| { |
| "first": "Beno\u00eet", |
| "middle": [], |
| "last": "Sagot", |
| "suffix": "" |
| }, |
| { |
| "first": "Djam\u00e9", |
| "middle": [], |
| "last": "Seddah", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "3651--3657", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/p19-1356" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ganesh Jawahar, Beno\u00eet Sagot, and Djam\u00e9 Seddah. 2019. What Does BERT Learn about the Structure of Language? In ACL, pages 3651-3657, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Representation learning for text-level discourse parsing", |
| "authors": [ |
| { |
| "first": "Yangfeng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "13--24", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/P14-1002" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yangfeng Ji and Jacob Eisenstein. 2014. Represen- tation learning for text-level discourse parsing. In ACL, pages 13-24, Baltimore, Maryland. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "CODRA: A Novel Discriminative Framework for Rhetorical Analysis", |
| "authors": [ |
| { |
| "first": "Shafiq", |
| "middle": [], |
| "last": "Joty", |
| "suffix": "" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Carenini", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond T", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Computational Linguistics", |
| "volume": "41", |
| "issue": "3", |
| "pages": "385--435", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/COLI_a_00226" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shafiq Joty, Giuseppe Carenini, and Raymond T Ng. 2015. CODRA: A Novel Discriminative Framework for Rhetorical Analysis. Computational Linguistics, 41(3):385-435.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "CTRL -A Conditional Transformer Language Model for Controllable Generation", |
| "authors": [ |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Nitish Shirish Keskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Lav", |
| "middle": [], |
| "last": "Mccann", |
| "suffix": "" |
| }, |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Varshney", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitish Shirish Keskar, Bryan McCann, Lav Varsh- ney, Caiming Xiong, and Richard Socher. 2019. CTRL -A Conditional Transformer Language Model for Controllable Generation. arXiv preprint 1909.05858.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "Diederik", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. In ICLR, Banff, Canada.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Detecting dementia in Mandarin Chinese using transfer learning from a parallel corpus", |
| "authors": [ |
| { |
| "first": "Bai", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yi-Te", |
| "middle": [], |
| "last": "Hsu", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Rudzicz", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1991--1997", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1199" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bai Li, Yi-Te Hsu, and Frank Rudzicz. 2019. De- tecting dementia in Mandarin Chinese using trans- fer learning from a parallel corpus. In NAACL, pages 1991-1997, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Discourse Parsing with Attention-based Hierarchical Neural Networks", |
| "authors": [ |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianshi", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Baobao", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "362--371", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1035" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qi Li, Tianshi Li, and Baobao Chang. 2016. Discourse Parsing with Attention-based Hierarchical Neural Networks. In EMNLP, pages 362-371, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "A structured self-attentive sentence embedding", |
| "authors": [ |
| { |
| "first": "Zhouhan", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Minwei", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Cicero", |
| "middle": [], |
| "last": "Nogueira", |
| "suffix": "" |
| }, |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Santos", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bowen", |
| "middle": [], |
| "last": "Xiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhouhan Lin, Minwei Feng, Cicero Nogueira dos San- tos, Mo Yu, Bing Xiang, Bowen Zhou, and Yoshua Bengio. 2017. A structured self-attentive sentence embedding. In ICLR, Toulon, France.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Linguistic Knowledge and Transferability of Contextual Representations", |
| "authors": [ |
| { |
| "first": "Nelson", |
| "middle": [ |
| "F" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [ |
| "E" |
| ], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1073--1094", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/n19-1112" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nelson F. Liu, Matt Gardner, Yonatan Belinkov, Matthew E. Peters, and Noah A. Smith. 2019a. Lin- guistic Knowledge and Transferability of Contextual Representations. pages 1073-1094, Minneapolis, Minnesota. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Investigating the Application of Automated Writing Evaluation to Chinese Undergraduate English Majors: A Case Study of WriteToLearn", |
| "authors": [ |
| { |
| "first": "Sha", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Antony", |
| "middle": [], |
| "last": "Kunnan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "CALICO", |
| "volume": "33", |
| "issue": "1", |
| "pages": "71--91", |
| "other_ids": { |
| "DOI": [ |
| "10.1558/cj.v33i1.26380" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sha Liu and Antony Kunnan. 2016. Investigating the Application of Automated Writing Evaluation to Chinese Undergraduate English Majors: A Case Study of WriteToLearn. CALICO, 33(1):71-91.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "RoBERTa: A Robustly Optimized BERT Pretraining Approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019b. RoBERTa: A Robustly Optimized BERT Pretrain- ing Approach. arXiv preprint 1907.11692.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Learning Word Vectors for Sentiment Analysis", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "E" |
| ], |
| "last": "Maas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Daly", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "142--150", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew L Maas, Raymond E Daly, Peter T Pham, Dan Huang, Andrew Y Ng, and Christopher Potts. 2011. Learning Word Vectors for Sentiment Analysis. In ACL, pages 142-150, Portland, Oregon, USA. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Rhetorical structure theory and text analysis", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mann", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Christian", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandra", |
| "middle": [ |
| "A" |
| ], |
| "last": "I M Matthiessen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Thompson", |
| "suffix": "" |
| } |
| ], |
| "year": 1989, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William C Mann, Christian M I M Matthiessen, and Sandra A Thompson. 1989. Rhetorical structure the- ory and text analysis. ISI Research Report.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Rhetorical Structure Theory: Toward a functional theory of text organization. Interdisciplinary Journal for the Study of Discourse", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandra", |
| "middle": [ |
| "A" |
| ], |
| "last": "Mann", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Thompson", |
| "suffix": "" |
| } |
| ], |
| "year": 1988, |
| "venue": "", |
| "volume": "8", |
| "issue": "", |
| "pages": "243--281", |
| "other_ids": { |
| "DOI": [ |
| "10.1515/text.1.1988.8.3.243" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "William C. Mann and Sandra A. Thompson. 1988. Rhetorical Structure Theory: Toward a functional theory of text organization. Interdisciplinary Jour- nal for the Study of Discourse, 8(3):243-281.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "The Theory and Practice of Discourse Parsing and Summarization", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Marcu. 2000. The Theory and Practice of Discourse Parsing and Summarization. MIT Press, Cambridge, MA, USA.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "The Effectiveness of Using a Hybrid Mode of Automated Writing Evaluation System on EFL Students' Writing", |
| "authors": [ |
| { |
| "first": "Mohammed", |
| "middle": [], |
| "last": "Ali Mohsen", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdulaziz", |
| "middle": [], |
| "last": "Alshahrani", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Teaching English with Technology", |
| "volume": "19", |
| "issue": "1", |
| "pages": "118--131", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammed Ali Mohsen and Abdulaziz Alshahrani. 2019. The Effectiveness of Using a Hybrid Mode of Automated Writing Evaluation System on EFL Stu- dents' Writing. Teaching English with Technology, 19(1):118-131.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "PyTorch: An Imperative Style, High-Performance Deep Learning Library", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Paszke", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Massa", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Lerer", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Bradbury", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [], |
| "last": "Chanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Killeen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zeming", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Natalia", |
| "middle": [], |
| "last": "Gimelshein", |
| "suffix": "" |
| }, |
| { |
| "first": "Luca", |
| "middle": [], |
| "last": "Antiga", |
| "suffix": "" |
| }, |
| { |
| "first": "Alban", |
| "middle": [], |
| "last": "Desmaison", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "K\u00f6pf", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zach", |
| "middle": [], |
| "last": "Devito", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Raison", |
| "suffix": "" |
| }, |
| { |
| "first": "Alykhan", |
| "middle": [], |
| "last": "Tejani", |
| "suffix": "" |
| }, |
| { |
| "first": "Sasank", |
| "middle": [], |
| "last": "Chilamkurthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Benoit", |
| "middle": [], |
| "last": "Steiner", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "NeurIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas K\u00f6pf, Edward Yang, Zach DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Jun- jie Bai, and Soumith Chintala. 2019. PyTorch: An Imperative Style, High-Performance Deep Learning Library. In NeurIPS.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Rhetorical structure and argumentation structure in monologue text", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Peldszus", |
| "suffix": "" |
| }, |
| { |
| "first": "Manfred", |
| "middle": [], |
| "last": "Stede", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "103--112", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W16-2812" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Peldszus and Manfred Stede. 2016. Rhetori- cal structure and argumentation structure in mono- logue text. In ArgMining Workshop, pages 103- 112, Berlin, Germany. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "GloVe: Global Vectors for Word Representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. GloVe: Global Vectors for Word Representation. In EMNLP, pages 1532-1543, Doha, Qatar.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Information-Theoretic Probing for Linguistic Structure", |
| "authors": [ |
| { |
| "first": "Tiago", |
| "middle": [], |
| "last": "Pimentel", |
| "suffix": "" |
| }, |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Valvoda", |
| "suffix": "" |
| }, |
| { |
| "first": "Rowan", |
| "middle": [], |
| "last": "Hall Maudslay", |
| "suffix": "" |
| }, |
| { |
| "first": "Ran", |
| "middle": [], |
| "last": "Zmigrod", |
| "suffix": "" |
| }, |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tiago Pimentel, Josef Valvoda, Rowan Hall Maudslay, Ran Zmigrod, Adina Williams, and Ryan Cotterell. 2020. Information-Theoretic Probing for Linguis- tic Structure. Association of Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Stumping E-Rater: Challenging the Validity of Automated Essay Scoring", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Donald", |
| "suffix": "" |
| }, |
| { |
| "first": "Jill", |
| "middle": [ |
| "C" |
| ], |
| "last": "Powers", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Burstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [ |
| "E" |
| ], |
| "last": "Chodorow", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Fowles", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kukich", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Educational Testing Service", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Donald E Powers, Jill C Burstein, Martin Chodorow, Mary E Fowles, and Karen Kukich. 2001. Stumping E-Rater: Challenging the Validity of Automated Es- say Scoring. Technical report, Educational Testing Service, Princeton, New Jersey.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "The Penn Discourse TreeBank 2.0", |
| "authors": [ |
| { |
| "first": "Rashmi", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Dinesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Eleni", |
| "middle": [], |
| "last": "Miltsakaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Livio", |
| "middle": [], |
| "last": "Robaldo", |
| "suffix": "" |
| }, |
| { |
| "first": "Aravind", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Bonnie", |
| "middle": [], |
| "last": "Webber", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rashmi Prasad, Nikhil Dinesh, Alan Lee, Eleni Milt- sakaki, Livio Robaldo, Aravind Joshi, and Bonnie Webber. 2008. The Penn Discourse TreeBank 2.0. In LREC, Marrakech, Morocco. European Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Language models are unsupervised multitask learners", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "OpenAI Blog", |
| "volume": "", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI Blog, 1(8).", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Do Massively Pretrained Language Models Make Better Storytellers? In CoNLL", |
| "authors": [ |
| { |
| "first": "Abigail", |
| "middle": [], |
| "last": "See", |
| "suffix": "" |
| }, |
| { |
| "first": "Aneesh", |
| "middle": [], |
| "last": "Pappu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rohun", |
| "middle": [], |
| "last": "Saxena", |
| "suffix": "" |
| }, |
| { |
| "first": "Akhila", |
| "middle": [], |
| "last": "Yerukola", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "843--861", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K19-1079" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abigail See, Aneesh Pappu, Rohun Saxena, Akhila Yerukola, and Christopher D Manning. 2019. Do Massively Pretrained Language Models Make Bet- ter Storytellers? In CoNLL, pages 843-861, Hong Kong, China. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Is Attention Interpretable? In ACL", |
| "authors": [ |
| { |
| "first": "Sofia", |
| "middle": [], |
| "last": "Serrano", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Noah", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul G", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Allen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "2931--2951", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sofia Serrano, Noah A Smith, and Paul G Allen. 2019. Is Attention Interpretable? In ACL, pages 2931-2951, Minneapolis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "MASS: Masked Sequence to Sequence Pre-training for Language Generation", |
| "authors": [ |
| { |
| "first": "Kaitao", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, and Tie- Yan Liu. 2019. MASS: Masked Sequence to Se- quence Pre-training for Language Generation. In ICML, Long Beach, California.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Two Practical Rhetorical Structure Theory Parsers", |
| "authors": [ |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Hicks", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [ |
| "A" |
| ], |
| "last": "Valenzuela-Esc\u00e1rcega", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/N15-3001" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mihai Surdeanu, Thomas Hicks, and Marco A Valenzuela-Esc\u00e1rcega. 2015. Two Practical Rhetor- ical Structure Theory Parsers. In NAACL, Denver, Colorado.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "The Uses of Argument", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Stephen Toulmin", |
| "suffix": "" |
| } |
| ], |
| "year": 1958, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Toulmin. 1958. The Uses of Argument. Cam- bridge University Press.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Attention Is All You Need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "NeurIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1017/S0140525X16001837" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention Is All You Need. In NeurIPS, Long Beach, California.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Using Rhetorical Structure Theory to Assess Discourse Coherence for Non-native Spontaneous Speech", |
| "authors": [ |
| { |
| "first": "Xinhao", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Binod", |
| "middle": [], |
| "last": "Gyawali", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "James", |
| "suffix": "" |
| }, |
| { |
| "first": "Hillary", |
| "middle": [ |
| "R" |
| ], |
| "last": "Bruno", |
| "suffix": "" |
| }, |
| { |
| "first": "Keelan", |
| "middle": [], |
| "last": "Molloy", |
| "suffix": "" |
| }, |
| { |
| "first": "Klaus", |
| "middle": [], |
| "last": "Evanini", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zechner", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "DisRPT", |
| "volume": "", |
| "issue": "", |
| "pages": "153--162", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-2719" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xinhao Wang, Binod Gyawali, James V Bruno, Hillary R Molloy, Keelan Evanini, and Klaus Zech- ner. 2019. Using Rhetorical Structure Theory to Assess Discourse Coherence for Non-native Spon- taneous Speech. In DisRPT, pages 153-162, Min- neapolis, Minnesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "A Two-Stage Parsing Method for Text-Level Discourse Analysis", |
| "authors": [ |
| { |
| "first": "Yizhong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Houfeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Vancouver, Canada. Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "184--188", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-2029" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yizhong Wang, Sujian Li, and Houfeng Wang. 2017. A Two-Stage Parsing Method for Text-Level Dis- course Analysis. In ACL, pages 184-188, Vancou- ver, Canada. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Attention is not not Explanation", |
| "authors": [ |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Wiegreffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuval", |
| "middle": [], |
| "last": "Pinter", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "11--20", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1002" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sarah Wiegreffe and Yuval Pinter. 2019. Attention is not not Explanation. In EMNLP, pages 11-20, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "Morgan Funtowicz, and Jamie Brew. 2019. HuggingFace's Transformers: State-of-the-art Natural Language Processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "R'emi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R'emi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. HuggingFace's Trans- formers: State-of-the-art Natural Language Process- ing. arXiv preprint 1910.03771.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "XLNet: Generalized Autoregressive Pretraining for Language Understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Ruslan Salakhutdinov, and Quoc V. Le. 2019. XLNet: Generalized Autoregressive Pretraining for Language Understanding.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "A model and an hypothesis for language structure", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Victor", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Yngve", |
| "suffix": "" |
| } |
| ], |
| "year": 1960, |
| "venue": "Proceedings of the American philosophical society", |
| "volume": "104", |
| "issue": "", |
| "pages": "444--466", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor H Yngve. 1960. A model and an hypothesis for language structure. Proceedings of the American philosophical society, 104(5):444-466.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Transition-based neural RST parsing with implicit syntax features", |
| "authors": [ |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Meishan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Guohong", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "559--570", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nan Yu, Meishan Zhang, and Guohong Fu. 2018. Transition-based neural RST parsing with implicit syntax features. In COLING, pages 559-570, Santa Fe, New Mexico, USA. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "Contrasting Automated and Human Scoring of Essays", |
| "authors": [ |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Educational Testing Service", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mo Zhang. 2013. Contrasting Automated and Human Scoring of Essays. Technical report, Educational Testing Service.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "Detecting cognitive impairments by agreeing on interpretations on linguistic features", |
| "authors": [ |
| { |
| "first": "Zining", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jekaterina", |
| "middle": [], |
| "last": "Novikova", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Rudzicz", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1431--1441", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1146" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zining Zhu, Jekaterina Novikova, and Frank Rudzicz. 2019. Detecting cognitive impairments by agreeing on interpretations on linguistic features. In NAACL, pages 1431-1441, Minnespolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "text": "Loss vs layer plot of six neural LMs on four RST feature sets on IMDB. The solid lines represent all", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "text": "Probing loss, compared to those from non-contextualized baselines, for four feature groups, on IMDB. BERT-based neural LMs stably outperform the word embedding baselines in almost all layers.\u2022 XLM (Lample and Conneau, 2019) is 12-layer Transformer with 2048-hidden dimensions.We use the English model trained with masked language model (MLM) objective. Different from BERT (taking sentence pairs as input), XLM takes continuous streams of tokens as input. \u2022 XLNet) is a 12-layer", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "text": "Probing performances of averaging 12 layers for 6 neural LMs on 4 tasks in IMDB, compared to the three non-contextual baselines. All LMs except GPT-2 outperform non-contextual LM baselines. Plots for RST-DT (Figure 6in Appendix) reveal similar patterns.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "text": "Probing performances of averaging 12 layers for 6 neural LMs on 4 tasks in RST-DT, compared to the three non-contextual baselines.", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "content": "<table><tr><td>: Comparison between RST probing losses of</td></tr><tr><td>non-contextual word embeddings (FastText, GloVe),</td></tr><tr><td>random embedding (RandEmbed), and a trivial guessor</td></tr><tr><td>(RandGuess).</td></tr></table>", |
| "text": "", |
| "num": null, |
| "html": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "this is C ... \u2022 I know it is very good ... \u2022 I didn't know this is good ... \u2022 ... didn't this C good .", |
| "num": null, |
| "html": null |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "content": "<table><tr><td>Elaboration[N][S] This movie is very good . <s> Contrast[N][N] The screenplay is enchanting . <s> Elaboration[N][S] But Meryl Streep is most impressive . <s> Joint[N][N] Her performance is excellent . <s> She brings me to go into the heart of her role . <P> Attribution[S][N] Elaboration[N][S] that Al Pacino over acted Elaboration[N][S] but I mean common obviously for a movie role like this Attribution[S][N] --a cuban drug lord you need a bit of over acting in this role with that cuban accent . <s> Figure 7: Elaboration[N][S] A lot of people are saying Attribution[S][N] This movie overall was a really good movie I Attribution[S][N]</td></tr></table>", |
| "text": "IMDB train/pos/10348 8.txt. The <s> and <P> are appended automatically by the parser, marking the end of sentences and paragraphs respectively.", |
| "num": null, |
| "html": null |
| } |
| } |
| } |
| } |