| { |
| "paper_id": "C18-1029", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:06:17.866738Z" |
| }, |
| "title": "Topic or Style? Exploring the Most Useful Features for Authorship Attribution", |
| "authors": [ |
| { |
| "first": "Yunita", |
| "middle": [], |
| "last": "Sari", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Sheffield", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "y.sari@sheffield.ac.uk" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Stevenson", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Sheffield", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "mark.stevenson@sheffield.ac.uk" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Sheffield", |
| "location": { |
| "country": "UK" |
| } |
| }, |
| "email": "a.vlachos@sheffield.ac.uk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Approaches to authorship attribution, the task of identifying the author of a document, are based on analysis of individuals' writing style and/or preferred topics. Although the problem has been widely explored, no previous studies have analysed the relationship between dataset characteristics and effectiveness of different types of features. This study carries out an analysis of four widely used datasets to explore how different types of features affect authorship attribution accuracy under varying conditions. The results of the analysis are applied to authorship attribution models based on both discrete and continuous representations. We apply the conclusions from our analysis to an extension of an existing approach to authorship attribution and outperform the prior state-of-the-art on two out of the four datasets used.", |
| "pdf_parse": { |
| "paper_id": "C18-1029", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Approaches to authorship attribution, the task of identifying the author of a document, are based on analysis of individuals' writing style and/or preferred topics. Although the problem has been widely explored, no previous studies have analysed the relationship between dataset characteristics and effectiveness of different types of features. This study carries out an analysis of four widely used datasets to explore how different types of features affect authorship attribution accuracy under varying conditions. The results of the analysis are applied to authorship attribution models based on both discrete and continuous representations. We apply the conclusions from our analysis to an extension of an existing approach to authorship attribution and outperform the prior state-of-the-art on two out of the four datasets used.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Authorship attribution plays an important role in many applications, including plagiarism detection and forensic investigation. Approaches to this problem attempt to identify a document's author through analysis of individual's writing style and/or topics they tend to write about. The problem has been extensively studied and a wide range of features has been explored (Stamatatos, 2013; Schwartz et al., 2013; Seroussi et al., 2013; H\u00fcrlimann et al., 2015) . However, there has been a lack of analysis of the behavior of features across multiple datasets or using a range of classifiers. Consequently, it is difficult to determine which types of features will be most useful for a particular authorship attribution dataset.", |
| "cite_spans": [ |
| { |
| "start": 370, |
| "end": 388, |
| "text": "(Stamatatos, 2013;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 389, |
| "end": 411, |
| "text": "Schwartz et al., 2013;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 412, |
| "end": 434, |
| "text": "Seroussi et al., 2013;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 435, |
| "end": 458, |
| "text": "H\u00fcrlimann et al., 2015)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Authorship attribution is a unique task which is closely related to both the representation of individuals' writing style and text categorization. In some cases, where there is a clear topical distinction between the documents written by different authors, content-related features such as those used in text categorization may be effective. However, style-based features are more likely to be effective for datasets containing a more homogeneous set of topics. Previous work on feature exploration for authorship attribution, focused on the overall effectiveness of features without considering the characteristics of the datasets to which they were applied, e.g. (Grieve, 2007; Guthrie, 2008; Stamatatos, 2009; Brennan et al., 2012; Sapkota et al., 2015) . A wide range of features have been applied to the authorship attribution problem and many previous studies concluded that using character n-grams is often effective, e.g. (Peng et al., 2003; Koppel et al., 2011; Schwartz et al., 2013; Sapkota et al., 2015; Sari et al., 2017; Shrestha et al., 2017) . Thus, character n-grams have become the go-to features for this task to capture both an author's topical preferences and writing style.", |
| "cite_spans": [ |
| { |
| "start": 665, |
| "end": 679, |
| "text": "(Grieve, 2007;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 680, |
| "end": 694, |
| "text": "Guthrie, 2008;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 695, |
| "end": 712, |
| "text": "Stamatatos, 2009;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 713, |
| "end": 734, |
| "text": "Brennan et al., 2012;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 735, |
| "end": 756, |
| "text": "Sapkota et al., 2015)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 930, |
| "end": 949, |
| "text": "(Peng et al., 2003;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 950, |
| "end": 970, |
| "text": "Koppel et al., 2011;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 971, |
| "end": 993, |
| "text": "Schwartz et al., 2013;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 994, |
| "end": 1015, |
| "text": "Sapkota et al., 2015;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1016, |
| "end": 1034, |
| "text": "Sari et al., 2017;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1035, |
| "end": 1057, |
| "text": "Shrestha et al., 2017)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This study explores how the characteristics of a dataset affect the usefulness of different types of features for the authorship attribution task. Experiments are carried out using four datasets that have previously been widely used for this task. Three types of features are considered: style, content and hybrid (a mixture of the previous two types). In contrast to previous work, this study finds that character n-grams do not perform equally well in all datasets. The analysis holds for authorship attribution models using discrete and continuous representations. Using topic modeling and feature analysis, the most effective features can be successfully predicted for three of the four datasets. The results of this analysis are applied via a novel extension of a recently proposed neural approach (Sari et al., 2017) and improved state-of-the-art performance are obtained for two of the four datasets.", |
| "cite_spans": [ |
| { |
| "start": 803, |
| "end": 822, |
| "text": "(Sari et al., 2017)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Authorship attribution features are often referred to stylometric features since the main goal of the task is often thought to be modeling the authors' writing style. Grieve (2007) conducted experiments which involved thirty-nine different types of textual measurements commonly used in attribution studies. His experiments, which were performed using the Chi-squared test on The Telegraph Columnist corpus, concluded that the combination of word and punctuation mark profiles are effective features for representing authors. Similar to Grieve, Guthrie (2008) carried out an exploration of 166 features used for authorship attribution including commonly used stylistic features and several others intended to capture emotional tone. He reported that fifteen features, including punctuation marks, pronouns, fog index and average sentence length to be the most useful. Stamatatos (2009) divided authorship attribution features into five groups: lexical, character, syntactic, semantic and application-specific features. Compared to others, lexical and character features are commonly used in authorship attribution work as they provide rich information about the author's topical preferences and writing style. In addition, both types of features can be extracted in many languages and datasets with little effort.", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 180, |
| "text": "Grieve (2007)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 545, |
| "end": 559, |
| "text": "Guthrie (2008)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 868, |
| "end": 885, |
| "text": "Stamatatos (2009)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Simple lexical features (e.g. word frequencies, word n-grams, function words, hapax legomena, word/sentence length) have been widely used since early attribution work (Mendenhall, 1887) . Function words have been proved to be effective features and several studies have reported their usefulness, e.g. (Mosteller and Wallace, 1964; Argamon and Levitan, 2005; Koppel et al., 2005; Juola and Baayen, 2005; Zhao and Zobel, 2005) . Bag-of-words approaches have also been reported as being useful for authorship attribution (Koppel et al., 2011) . These approaches are also commonly applied for sentiment analysis and topic classification tasks (Zhang et al., 2015; Heap et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 185, |
| "text": "(Mendenhall, 1887)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 302, |
| "end": 331, |
| "text": "(Mosteller and Wallace, 1964;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 332, |
| "end": 358, |
| "text": "Argamon and Levitan, 2005;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 359, |
| "end": 379, |
| "text": "Koppel et al., 2005;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 380, |
| "end": 403, |
| "text": "Juola and Baayen, 2005;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 404, |
| "end": 425, |
| "text": "Zhao and Zobel, 2005)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 519, |
| "end": 540, |
| "text": "(Koppel et al., 2011)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 640, |
| "end": 660, |
| "text": "(Zhang et al., 2015;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 661, |
| "end": 679, |
| "text": "Heap et al., 2017)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The usefulness of character n-grams has been highlighted in several studies including (Peng et al., 2003; Stamatatos, 2013; Schwartz et al., 2013; Sapkota et al., 2015) . Koppel et al. (2011) argued that this effectiveness comes from their ability to capture both content and stylistic information. Similar conclusions were reported by Sapkota et al. (2015) . They analysed subgroups of character n-grams in both single and cross-domain settings. They concluded that affixes and punctuation n-grams make a significant contribution towards the effectiveness of character n-grams.", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 105, |
| "text": "(Peng et al., 2003;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 106, |
| "end": 123, |
| "text": "Stamatatos, 2013;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 124, |
| "end": 146, |
| "text": "Schwartz et al., 2013;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 147, |
| "end": 168, |
| "text": "Sapkota et al., 2015)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 171, |
| "end": 191, |
| "text": "Koppel et al. (2011)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 336, |
| "end": 357, |
| "text": "Sapkota et al. (2015)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our study differs from previous work in that we perform dataset analysis using topic modeling followed by feature ablation experiments. Thus, we are able to determine the level that each type of feature affects authorship attribution accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Experiments 1 are performed using four authorship attribution datasets: Judgment (Seroussi et al., 2011) , CCAT10, CCAT50 (Stamatatos, 2008) , and IMDb62 (Seroussi et al., 2010) . These datasets were chosen because they are all commonly used in previous literature and represent a range of characteristics in terms of the number of authors, topic/genre and document length (see Table 1 ). Judgment consists of legal judgments from three Australian High Court judges while both CCAT datasets are subsets of Reuters Corpus Volume 1 (RCV1) (Rose et al., 2002) . The IMDb62 dataset was collected from movie reviews and message board posts of the Internet Movie database. Train/test partitions are provided for both CCAT datasets by the respective authors. For Judgment and IMDb62 we follow previous work (Seroussi et al., 2013 ) by using 10-fold cross validation in our experiments. We do not make use of datasets from recent authorship attribution shared task events, e.g. PAN (Juola, 2012) , due to their relatively small size and fact that they provide a very small number of documents per author.", |
| "cite_spans": [ |
| { |
| "start": 81, |
| "end": 104, |
| "text": "(Seroussi et al., 2011)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 122, |
| "end": 140, |
| "text": "(Stamatatos, 2008)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 154, |
| "end": 177, |
| "text": "(Seroussi et al., 2010)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 537, |
| "end": 556, |
| "text": "(Rose et al., 2002)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 800, |
| "end": 822, |
| "text": "(Seroussi et al., 2013", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 974, |
| "end": 987, |
| "text": "(Juola, 2012)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 378, |
| "end": 385, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The aim of this analysis is to quantify the topical similarity between authors in each of the datasets considered. The motivation for this is that certain datasets may have clear topical preferences between authors which cause authorship attribution to be biased towards topic classification. Therefore, topic modeling can help assess the topical (dis-)similarity among authors. We use Latent Dirichlet Allocation (LDA) (Blei et al., 2003) , a generative probabilistic model for text collections. Documents are represented as mixtures over latent topics, where each topic is characterized by a distribution over words. Assuming a trained topic model over an authorship attribution dataset D, if C \u03b1 is the set of documents written by author \u03b1 and \u03a6 i is the topic distribution for the i-th document in C \u03b1 , then we estimate the topic distribution for a particular author, \u03b8 \u03b1 , as follows:", |
| "cite_spans": [ |
| { |
| "start": 420, |
| "end": 439, |
| "text": "(Blei et al., 2003)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b8 \u03b1 = |C\u03b1| i=1 \u03a6 i |C \u03b1 |", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Dataset Analysis", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Following this, the difference between two author's topic probability distributions \u03b8 \u03b1 and \u03b8 \u03b2 is calculated using the Jensen-Shannon Divergence (JSD) (Cover and Thomas, 2006) :", |
| "cite_spans": [ |
| { |
| "start": 163, |
| "end": 176, |
| "text": "Thomas, 2006)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "JSD(\u03b8 \u03b1 , \u03b8 \u03b2 ) = 1 2 D KL (\u03b8 \u03b1 ||M ) + 1 2 D KL (\u03b8 \u03b2 ||M )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Dataset Analysis", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where M = 1 2 (\u03b8 \u03b1 + \u03b8 \u03b2 ) and D KL is Kullback-Leibler divergence. Table 2 shows the average of JSD for all author pairs in each of the datasets having trained a topic model with varying numbers of topics. High JSD scores indicate more topical dis-similarity between authors in the dataset. 2 The CCAT datasets, which contain on-line news, have higher scores compared to Judgment and IMDb62. The scores for CCAT50 and CCAT10 are similar, despite the fact that the first dataset contains five times the number of authors of the second. The consistency of this comparison across different numbers of topics indicates that this method of assessing content similarity between authors is robust with respect to tuning this parameter. Judgment has the lowest score across the four datasets indicating that the authors discuss the most similar topics. Finally, scores for the IMDb62 dataset obtained were higher than those for Judgment but lower than both CCAT's scores. Differences in scores for IMDb62 are due to the authors' preferences, as some comment on the story while others comment on the characters of the movie. Overall, we observe that the genre of the datasets influences the topical dis-similarity between authors.", |
| "cite_spans": [ |
| { |
| "start": 292, |
| "end": 293, |
| "text": "2", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 68, |
| "end": 75, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Confusion matrices were created to further analyse the differences between authors. These matrices were generated after running LDA with 20 topics for 1000 iterations. Similar patterns were observed using different numbers of topics. Darker color indicates higher JSD score between two authors. In the CCAT50 dataset (Figure 1a) , one author (number 11, indicated by an arrow) has very different topic preferences compared to the others. Articles written by author 11 mainly discuss topics related to gold, exploration, Canada, Indonesia which are rarely picked by the other authors. A similar pattern is found in IMDb62 (see Figure 1b) , where reviews by author 16 (also indicated by an arrow) are dominated by positive comments about movies unlike other authors who tended to write negative reviews or discuss the story and/or the characters. However, unlike the aforementioned datasets, authors in Judgment wrote about relatively similar topics. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 317, |
| "end": 328, |
| "text": "(Figure 1a)", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 626, |
| "end": 636, |
| "text": "Figure 1b)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset Analysis", |
| "sec_num": "4" |
| }, |
| { |
| "text": "An ablation study was carried out to determine the contribution of different features for each dataset. Following previous studies (Abbasi and Chen, 2008; Stamatatos, 2009) , feature groups are divided into three types (see Table 3 ):", |
| "cite_spans": [ |
| { |
| "start": 131, |
| "end": 154, |
| "text": "(Abbasi and Chen, 2008;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 155, |
| "end": 172, |
| "text": "Stamatatos, 2009)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 224, |
| "end": 231, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 Style: Style-based features, such as usage of function words, digits and punctuation, capture an author's writing style. We used pre-defined sets of 174 function words and 12 punctuation marks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 Content: Content-based features, e.g. bags of n-grams, represent the author's topical preferences. All function words are removed when extracting these features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 Hybrid: The final feature type, hybrid, are character n-grams which are intended to capture both writing style and topical preferences (Koppel et al., 2011; Sapkota et al., 2015) .", |
| "cite_spans": [ |
| { |
| "start": 137, |
| "end": 158, |
| "text": "(Koppel et al., 2011;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 159, |
| "end": 180, |
| "text": "Sapkota et al., 2015)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Feature Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Both character and word n-grams are limited to bi-and tri-grams. As the purpose of these ablation experiments is not to outperform previous work, only the 100 most common n-grams are used for each feature type. Authorship attribution experiments were carried out using two classifiers: a single hidden layer Feedforward Neural Network model (FNN) and Logistic Regression (LR). The FNN hyper-parameters including the number of neurons and dropout rates were tuned on the development set for each of the datasets. For Judgment, CCAT10 and CCAT50, we set the number of epochs to 250 and 100 for IMDb62. For all datasets, early stopping was used on the development sets and the models were optimized with the Adam update rule (Kingma and Ba, 2015). Since none of the datasets have a standard development set, we randomly selected 10% of the training data for this purpose. For LR, we found that using the default parameters from Scikit-Learn (Pedregosa et al., 2011) resulted in comparable performances to the FNN. Accuracy was used as the evaluation metric to measure authorship attribution performance. Table 3 : Authorship attribution feature sets.", |
| "cite_spans": [ |
| { |
| "start": 938, |
| "end": 962, |
| "text": "(Pedregosa et al., 2011)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1101, |
| "end": 1108, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Results are presented in Table 4 . The (\u2212) symbol indicates that the respective feature type is excluded.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 25, |
| "end": 32, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature Ablation Results", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The results confirm our topic model-based analysis (see Section 4). Style-based features are more effective for datasets in which authors discuss similar topics, e.g. Judgment and IMDb62. As expected, content-based features are generally more effective when there is more dis-similarity between the topics discussed by the authors in the dataset, e.g. CCAT10 and CCAT50, but are of limited usefulness when the topics are similar (particularly for the Judgment dataset). The hybrid features appear to behave similarly to the content-based features since they are most useful when the topic dis-similarity between authors is high. To examine the results further, we generated confusion matrices for the Logistic Regression (LR) classifier applied on CCAT10 dataset (Figure 2 ). The effect of removing content-based features is shown in Figure 2b where the prediction accuracy for authors Alexander Smith and Mure Dickie drops from 96% and 80% (see Figure 2a ) to 84% and 64% respectively. Content-based features are essential in this particular genre (newswire) dataset, since each author usually has different topical interests. For example, among the ten authors in the dataset, Alexander Smith mostly discussed topic related to investment and finance, while China was dominantly written about by Mure Dickie, Benjamin Kang and Jane Macartney.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 763, |
| "end": 772, |
| "text": "(Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 834, |
| "end": 843, |
| "text": "Figure 2b", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 946, |
| "end": 955, |
| "text": "Figure 2a", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature Ablation Results", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In addition, writing style between authors in this genre can be very similar. Thus, applying style-based or hybrid features alone may not be effective. Additional feature exploration was carried out to analyse what types of features are more important to the classifier overall. We performed an analysis using LIME (Ribeiro et al., 2016) , a model agnostic framework for intepretability. LIME provides explanations about how a classifier made a prediction by identifying important input features. We selected a document from each of the datasets and analysed what kind of features were used in its prediction. Figures 3, 4 , 5 and 6 present the predictions of LR trained on 1000 word unigrams in Judgment, CCAT10, CCAT50, and IMDb62 respectively. In these experiment function words are not removed. For each of the documents presented, LR made correct author predictions with probability close to 100%. The darker shade indicates more important words in the attribution prediction. In the two CCAT datasets the classifier put more weight on content-based words such as Thomson, Canada and Toronto. In contrast, function words e.g. at, had, and, was appear to be more salient in Judgment and IMDb62. We also observed a document in the IMDb62 dataset where the classifier assigns similar prediction probabilities to two authors as presented in Figure 7 . The classifier put the same weight to function words and and to which represent two different classes of authors, 26 and not 26 (the LR classifier uses a one-versus-all scheme). The correct decision of the classifier is more likely helped by the presence of some less significant features such as is, becomes, There, usual and could. ", |
| "cite_spans": [ |
| { |
| "start": 315, |
| "end": 337, |
| "text": "(Ribeiro et al., 2016)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 610, |
| "end": 622, |
| "text": "Figures 3, 4", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 1342, |
| "end": 1350, |
| "text": "Figure 7", |
| "ref_id": "FIGREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": null |
| }, |
| { |
| "text": "These findings are further validated by applying them to a continuous n-grams-based authorship attribution model recently proposed by Sari et al. (2017) . They represented a document as a bag of n-grams features and learned the continuous representation of each feature jointly with the classifier in a shallow feed-forward neural network (Joulin et al., 2017) . Sari et al. conducted experiments with three different feature choices: characters, words and their combination. The character-based model outperformed the state-of-the-art on the CCAT50 and IMDb62 datasets, while producing comparable results on the remaining two.", |
| "cite_spans": [ |
| { |
| "start": 134, |
| "end": 152, |
| "text": "Sari et al. (2017)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 339, |
| "end": 360, |
| "text": "(Joulin et al., 2017)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extending a Neural Model", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We extend their character-based model by incorporating each feature type (style, content and hybrid) as an auxiliary feature represented in discrete form. Auxiliary features provide additional information related to the dataset characteristics. Given x aux as a normalized auxiliary features frequency vector, V is the weight applied to the features and f is the activation function (ReLu), the hidden layer h performs the following computation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extending a Neural Model", |
| "sec_num": "6" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h = f (V x aux )", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Extending a Neural Model", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The probability distribution over the label for a document then can be described as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extending a Neural Model", |
| "sec_num": "6" |
| }, |
| { |
| "text": "p(y|x) = sof tmax(W out [Ax, h]) (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extending a Neural Model", |
| "sec_num": "6" |
| }, |
| { |
| "text": "where x is the frequency vector of features for the document, A is the embedding matrix, W out is the weight matrix of the output layer and [Ax, h] is the concatenation of Ax and h. All the character n-gram embeddings and hidden layer in the model were initialized using Glorot uniform initialization (Glorot and Bengio, 2010) . We used the best hyper-parameters values for each of the datasets which have been tuned in the development set via a small grid search over all combinations of embedding size and dropout rate (specifically dropout in the concatenation layer). The hidden size of hidden auxiliary layer was set to 2. For the rest of the hyper-parameters, we used the values from the baseline model (Sari et al., 2017) . For Judgment, CCAT10 and CCAT50, we set the number of epochs to 250 and 100 for IMDb62. For all datasets, early stopping was used on the development sets and the models were optimized with the Adam update rule (Kingma and Ba, 2015). Table 5 presents the results of the experiment and compares them against previously reported ones on the same data sets. In the bottom portion of the table it can be seen that for each of the four data sets there is at least one feature type which leads to improved results when it is incorporated into the model. Our results demonstrate that better performance can be achieved by taking the data characteristic into account on choosing authorship attribution features. Moreover, the results provide evidence that character n-grams which have been known as typical go-to features do not perform equally well in all types of datasets. For three datasets (CCAT10, CCAT50 and IMDb62) the best result is obtained using the feature type identified as being most useful in Section 5. However, we find that using the style features does not improve results on the Judgment dataset as we had expected. The relatively poor performance of the style features may be due to the baseline model (the continuous character n-grams) which effectively captured all the author's writing style. Thus the addition of auxiliary style features did not lead to any improvement.", |
| "cite_spans": [ |
| { |
| "start": 301, |
| "end": 326, |
| "text": "(Glorot and Bengio, 2010)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 709, |
| "end": 728, |
| "text": "(Sari et al., 2017)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 964, |
| "end": 971, |
| "text": "Table 5", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Extending a Neural Model", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The results reported here for the CCAT50 and IMDb62 datasets outperform the previously best reported results (Sari et al., 2017) and the model reported here therefore represents a new state-of-the-art performance. The improvements for IMDb62 are statistically significant (p < 0.05, paired t-test).", |
| "cite_spans": [ |
| { |
| "start": 109, |
| "end": 128, |
| "text": "(Sari et al., 2017)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "This paper describes experiments on the relationship between the effectiveness of different types of features for authorship attribution and characteristics of datasets. We find that the most effective features for datasets can be predicted by applying topic modeling and feature analysis. Content-based features tend to be suitable for datasets with high topical diversity such as the one constructed from on-line news. Datasets with less topical variance, e.g. legal judgments and movie reviews, benefit more from stylebased features. The effectiveness of our proposed analysis is further validated by the performance of our proposed neural model which achieved the state-of-the-art results on two datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Judgment CCAT10 CCAT50 IMDb62", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "Previous work SVM with affix+punctuation 3-grams (Sapkota et al., 2015 ", |
| "cite_spans": [ |
| { |
| "start": 49, |
| "end": 70, |
| "text": "(Sapkota et al., 2015", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "Code to reproduce the experiments is available from https://github.com/yunitata/coling2018", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We do not assume linear scaling.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the anonymous reviewers for their insightful comments. The first author would like to acknowledge Indonesia Endowment Fund for Education (LPDP) for support in the form of a doctoral studentship.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Writeprints: A Stylometric Approach to Identity-level Identification and Similarity Detection in Cyberspace", |
| "authors": [ |
| { |
| "first": "Ahmed", |
| "middle": [], |
| "last": "Abbasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hsinchun", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "ACM Transactions on Information Systems", |
| "volume": "", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ahmed Abbasi and Hsinchun Chen. 2008. Writeprints: A Stylometric Approach to Identity-level Identification and Similarity Detection in Cyberspace. ACM Transactions on Information Systems (TOIS), 26(2).", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Measuring the Usefulness of Function Words for Authorship Attribution", |
| "authors": [ |
| { |
| "first": "Shlomo", |
| "middle": [], |
| "last": "Argamon", |
| "suffix": "" |
| }, |
| { |
| "first": "Shlomo", |
| "middle": [], |
| "last": "Levitan", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 2005 ACH/ALLC Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "4--7", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shlomo Argamon and Shlomo Levitan. 2005. Measuring the Usefulness of Function Words for Authorship Attribution. In Proceedings of the 2005 ACH/ALLC Conference, pages 4-7.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Circumventing Authorship Recognition to Preserve Privacy and Anonymity", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Brennan", |
| "suffix": "" |
| }, |
| { |
| "first": "Sadia", |
| "middle": [], |
| "last": "Afroz", |
| "suffix": "" |
| }, |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Greenstadt", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "ACM Transactions on Information and System Security", |
| "volume": "15", |
| "issue": "3", |
| "pages": "1--22", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Brennan, Sadia Afroz, and Rachel Greenstadt. 2012. Circumventing Authorship Recognition to Preserve Privacy and Anonymity. ACM Transactions on Information and System Security, 15(3):1-22, November.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Elements of Information Theory", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "Joy", |
| "middle": [ |
| "A" |
| ], |
| "last": "Cover", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Series in Telecommunications and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas M. Cover and Joy A. Thomas. 2006. Elements of Information Theory (Wiley Series in Telecommunications and Signal Processing). Wiley-Interscience.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Local Histograms of Character N-grams for Authorship Attribution", |
| "authors": [ |
| { |
| "first": "Thamar", |
| "middle": [], |
| "last": "Hugo Jair Escalante", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuel", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Montes-Y G\u00f3mez", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "288--298", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hugo Jair Escalante, Thamar Solorio, and Manuel Montes-y G\u00f3mez. 2011. Local Histograms of Character N-grams for Authorship Attribution. In Proceedings of the 49th Annual Meeting of the Association for Com- putational Linguistics: Human Language Technologies -Volume 1, HLT '11, pages 288-298, Stroudsburg, PA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Understanding The Difficulty of Training Deep Feedforward Neural Networks", |
| "authors": [ |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Glorot", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the International Conference on Artificial Intelligence and Statistics, AISTATS 2010", |
| "volume": "", |
| "issue": "", |
| "pages": "249--256", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xavier Glorot and Yoshua Bengio. 2010. Understanding The Difficulty of Training Deep Feedforward Neural Networks. In Proceedings of the International Conference on Artificial Intelligence and Statistics, AISTATS 2010, pages 249-256, Chia Laguna Resort, Sardinia, Italy. Society for Artificial Intelligence and Statistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Quantitative Authorship Attribution: An Evaluation of Techniques", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jack", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Grieve", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Literary and Linguistic Computing", |
| "volume": "22", |
| "issue": "3", |
| "pages": "251--270", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jack. Grieve. 2007. Quantitative Authorship Attribution: An Evaluation of Techniques. Literary and Linguistic Computing, 22(3):251-270, May.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Unsupervised Detection of Anomalous Text", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Guthrie", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Guthrie. 2008. Unsupervised Detection of Anomalous Text. Ph.D. thesis, University of Sheffield.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Word vector enrichment of low frequency words in the bag-of-words model for short text multi-class classification problems", |
| "authors": [ |
| { |
| "first": "Bradford", |
| "middle": [], |
| "last": "Heap", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Bain", |
| "suffix": "" |
| }, |
| { |
| "first": "Wayne", |
| "middle": [], |
| "last": "Wobcke", |
| "suffix": "" |
| }, |
| { |
| "first": "Alfred", |
| "middle": [], |
| "last": "Krzywicki", |
| "suffix": "" |
| }, |
| { |
| "first": "Susanne", |
| "middle": [], |
| "last": "Schmeidl", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bradford Heap, Michael Bain, Wayne Wobcke, Alfred Krzywicki, and Susanne Schmeidl. 2017. Word vector enrichment of low frequency words in the bag-of-words model for short text multi-class classification problems. CoRR, abs/1709.05778.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "GLAD: Groningen Lightweight Authorship Detection-Notebook for PAN at CLEF", |
| "authors": [ |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "H\u00fcrlimann", |
| "suffix": "" |
| }, |
| { |
| "first": "Benno", |
| "middle": [], |
| "last": "Weck", |
| "suffix": "" |
| }, |
| { |
| "first": "Esther", |
| "middle": [], |
| "last": "Van Den", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Berg", |
| "suffix": "" |
| }, |
| { |
| "first": "Malvina", |
| "middle": [], |
| "last": "Simon\u0161uster", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nissim", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "CLEF 2015 Evaluation Labs and Workshop -Working Notes Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manuela H\u00fcrlimann, Benno Weck, Esther van den Berg, Simon\u0160uster, and Malvina Nissim. 2015. GLAD: Groningen Lightweight Authorship Detection-Notebook for PAN at CLEF 2015. In Linda Cappellato, Nicola Ferro, Gareth Jones, and Eric San Juan, editors, CLEF 2015 Evaluation Labs and Workshop -Working Notes Papers, 8-11 September, Toulouse, France. CEUR-WS.org, September.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Bag of tricks for efficient text classification", |
| "authors": [ |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "427--431", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov. 2017. Bag of tricks for efficient text classi- fication. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, pages 427-431, Valencia, Spain, April. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A Controlled-corpus Experiment in Authorship Identification by Crossentropy. Literary and Linguistic Computing", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Juola", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Baayen", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Juola and RH Baayen. 2005. A Controlled-corpus Experiment in Authorship Identification by Cross- entropy. Literary and Linguistic Computing, pages 1-10.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "An Overview of the Traditional Authorship Attribution Subtask", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Juola", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "CLEF 2012 Evaluation Labs and Workshop -Working Notes Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Juola. 2012. An Overview of the Traditional Authorship Attribution Subtask. In Pamela Forner, Jussi Karlgren, and Christa Womser-Hacker, editors, CLEF 2012 Evaluation Labs and Workshop -Working Notes Papers, 17-20 September, Rome, Italy, September.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Adam: A Method for Stochastic Optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceeding of the 3 rd International Conference for Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A Method for Stochastic Optimization. In Proceeding of the 3 rd International Conference for Learning Representations, ICLR 2015, San Diego, CA, May.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Automatically Determining An Anonymous Author's Native Language", |
| "authors": [ |
| { |
| "first": "Moshe", |
| "middle": [], |
| "last": "Koppel", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Schler", |
| "suffix": "" |
| }, |
| { |
| "first": "Kfir", |
| "middle": [], |
| "last": "Zigdon", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Intelligence and Security Informatics", |
| "volume": "", |
| "issue": "", |
| "pages": "209--217", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Moshe Koppel, Jonathan Schler, and Kfir Zigdon. 2005. Automatically Determining An Anonymous Author's Native Language. Intelligence and Security Informatics, pages 209-217.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Authorship Attribution in The Wild. Language Resources and Evaluation", |
| "authors": [ |
| { |
| "first": "Moshe", |
| "middle": [], |
| "last": "Koppel", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Schler", |
| "suffix": "" |
| }, |
| { |
| "first": "Shlomo", |
| "middle": [], |
| "last": "Argamon", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "45", |
| "issue": "", |
| "pages": "83--94", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Moshe Koppel, Jonathan Schler, and Shlomo Argamon. 2011. Authorship Attribution in The Wild. Language Resources and Evaluation, 45(1):83-94, March.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "The Characteristic Curves of Composition", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "C" |
| ], |
| "last": "Mendenhall", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Science", |
| "volume": "IX", |
| "issue": "", |
| "pages": "37--49", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T.C. Mendenhall. 1887. The Characteristic Curves of Composition. Science, IX:37-49.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Inference and Disputed Authorship: The Federalist", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Mosteller", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Wallace", |
| "suffix": "" |
| } |
| ], |
| "year": 1964, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "F Mosteller and D. L Wallace. 1964. Inference and Disputed Authorship: The Federalist. Addison Wesley.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Scikitlearn: Machine learning in Python", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Pedregosa", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Varoquaux", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Gramfort", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Thirion", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Grisel", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Blondel", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Prettenhofer", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Dubourg", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Vanderplas", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Passos", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Cournapeau", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Brucher", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Perrot", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Duchesnay", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2825--2830", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. 2011. Scikit- learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Language Independent Authorship Attribution using Character Level Language Models", |
| "authors": [ |
| { |
| "first": "Fuchun", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Dale", |
| "middle": [], |
| "last": "Schuurmanst", |
| "suffix": "" |
| }, |
| { |
| "first": "Vlado", |
| "middle": [], |
| "last": "Kesel", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the tenth conference on European chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fuchun Peng, Dale Schuurmanst, Vlado Kesel, and Shaojun Wan. 2003. Language Independent Authorship Attribution using Character Level Language Models. In Proceedings of the tenth conference on European chapter of the Association for Computational Linguistics, Budapest, Hungary.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Tensor Space Models for Authorship Identification", |
| "authors": [ |
| { |
| "first": "Spyridon", |
| "middle": [], |
| "last": "Plakias", |
| "suffix": "" |
| }, |
| { |
| "first": "Efstathios", |
| "middle": [], |
| "last": "Stamatatos", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 5th Hellenic Conference on Artificial Intelligence: Theories, Models and Applications, SETN '08", |
| "volume": "", |
| "issue": "", |
| "pages": "239--249", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Spyridon Plakias and Efstathios Stamatatos. 2008. Tensor Space Models for Authorship Identification. In Pro- ceedings of the 5th Hellenic Conference on Artificial Intelligence: Theories, Models and Applications, SETN '08, pages 239-249, Berlin, Heidelberg. Springer-Verlag.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Why Should I Trust You?\": Explaining the Predictions of Any Classifier", |
| "authors": [ |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Ribeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Carlos", |
| "middle": [], |
| "last": "Guestrin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "97--101", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marco Ribeiro, Sameer Singh, and Carlos Guestrin. 2016. \"Why Should I Trust You?\": Explaining the Predictions of Any Classifier. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations, pages 97-101, San Diego, California, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "The Reuters Corpus -from Yesterday's News to Tomorrow's Language Resources", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Rose", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stevenson", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Whitehead", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the Third International Conference on Language Resources and Evaluation (LREC-02)", |
| "volume": "", |
| "issue": "", |
| "pages": "827--832", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "T. Rose, M. Stevenson, and M. Whitehead. 2002. The Reuters Corpus -from Yesterday's News to Tomorrow's Language Resources. In Proceedings of the Third International Conference on Language Resources and Eval- uation (LREC-02), pages 827-832, Las Palmas, Canary Islands.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Not All Character N-grams Are Created Equal: A Study in Authorship Attribution", |
| "authors": [ |
| { |
| "first": "Upendra", |
| "middle": [], |
| "last": "Sapkota", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuel", |
| "middle": [], |
| "last": "Montes", |
| "suffix": "" |
| }, |
| { |
| "first": "Thamar", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "93--102", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Upendra Sapkota, Steven Bethard, Manuel Montes, and Thamar Solorio. 2015. Not All Character N-grams Are Created Equal: A Study in Authorship Attribution. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 93-102, Denver, Colorado, May-June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Continuous N-gram Representations for Authorship Attribution", |
| "authors": [ |
| { |
| "first": "Yunita", |
| "middle": [], |
| "last": "Sari", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Stevenson", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "267--273", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yunita Sari, Andreas Vlachos, and Mark Stevenson. 2017. Continuous N-gram Representations for Authorship At- tribution. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, pages 267-273, Valencia, Spain, April. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Authorship Attribution of Micro-Messages", |
| "authors": [ |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Tsur", |
| "suffix": "" |
| }, |
| { |
| "first": "Ari", |
| "middle": [], |
| "last": "Rappoport", |
| "suffix": "" |
| }, |
| { |
| "first": "Moshe", |
| "middle": [], |
| "last": "Koppel", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceeding of Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1880--1891", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roy Schwartz, Oren Tsur, Ari Rappoport, and Moshe Koppel. 2013. Authorship Attribution of Micro-Messages. In Proceeding of Conference on Empirical Methods in Natural Language Processing, pages 1880-1891, Seattle, USA.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Collaborative Inference of Sentiments from Texts", |
| "authors": [ |
| { |
| "first": "Yanir", |
| "middle": [], |
| "last": "Seroussi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ingrid", |
| "middle": [], |
| "last": "Zukerman", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabian", |
| "middle": [], |
| "last": "Bohnert", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "195--206", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yanir Seroussi, Ingrid Zukerman, and Fabian Bohnert, 2010. Collaborative Inference of Sentiments from Texts, pages 195-206. Springer Berlin Heidelberg, Berlin, Heidelberg.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Ghosts from the High Courts past: Evidence from computational linguistics for Dixon ghosting for McTiernan and Rich", |
| "authors": [ |
| { |
| "first": "Yanir", |
| "middle": [], |
| "last": "Seroussi", |
| "suffix": "" |
| }, |
| { |
| "first": "Russell", |
| "middle": [], |
| "last": "Smyth", |
| "suffix": "" |
| }, |
| { |
| "first": "Ingrid", |
| "middle": [], |
| "last": "Zukerman", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "34", |
| "issue": "", |
| "pages": "984--1005", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yanir Seroussi, Russell Smyth, and Ingrid Zukerman. 2011. Ghosts from the High Courts past: Evidence from computational linguistics for Dixon ghosting for McTiernan and Rich. University of New South Wales Law Journal, 34(3):984-1005.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Authorship Attribution with Topic Models", |
| "authors": [ |
| { |
| "first": "Yanir", |
| "middle": [], |
| "last": "Seroussi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ingrid", |
| "middle": [], |
| "last": "Zukerman", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabian", |
| "middle": [], |
| "last": "Bohnert", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Journal Computational Linguistics", |
| "volume": "40", |
| "issue": "2", |
| "pages": "269--310", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yanir Seroussi, Ingrid Zukerman, and Fabian Bohnert. 2013. Authorship Attribution with Topic Models. Journal Computational Linguistics, 40(2):269-310.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Convolutional Neural Networks for Authorship Attribution of Short Texts", |
| "authors": [ |
| { |
| "first": "Prasha", |
| "middle": [], |
| "last": "Shrestha", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Sierra", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Gonzalez", |
| "suffix": "" |
| }, |
| { |
| "first": "Manuel", |
| "middle": [], |
| "last": "Montes", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| }, |
| { |
| "first": "Thamar", |
| "middle": [], |
| "last": "Solorio", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter", |
| "volume": "2", |
| "issue": "", |
| "pages": "669--674", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Prasha Shrestha, Sebastian Sierra, Fabio Gonzalez, Manuel Montes, Paolo Rosso, and Thamar Solorio. 2017. Convolutional Neural Networks for Authorship Attribution of Short Texts. In Proceedings of the 15th Confer- ence of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, pages 669-674, Valencia, Spain, April. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Author identification: Using Text Sampling to Handle The Class Imbalance Problem. Information Processing and Management", |
| "authors": [ |
| { |
| "first": "Efstathios", |
| "middle": [], |
| "last": "Stamatatos", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "44", |
| "issue": "", |
| "pages": "790--799", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Efstathios Stamatatos. 2008. Author identification: Using Text Sampling to Handle The Class Imbalance Problem. Information Processing and Management, 44(2):790 -799.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "A Survey of Modern Authorship Attribution Methods", |
| "authors": [ |
| { |
| "first": "Efstathios", |
| "middle": [], |
| "last": "Stamatatos", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Journal of the American Society for Information Science and Technology", |
| "volume": "60", |
| "issue": "3", |
| "pages": "538--556", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Efstathios Stamatatos. 2009. A Survey of Modern Authorship Attribution Methods. Journal of the American Society for Information Science and Technology, 60(3):538-556, March.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "On The Robustness of Authorship Attribution based on Character n-gram Features", |
| "authors": [ |
| { |
| "first": "Efstathios", |
| "middle": [], |
| "last": "Stamatatos", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Journal of Law and Policy", |
| "volume": "21", |
| "issue": "2", |
| "pages": "421--439", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Efstathios Stamatatos. 2013. On The Robustness of Authorship Attribution based on Character n-gram Features. Journal of Law and Policy, 21(2):421-439.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Character-level Convolutional Networks for Text Classification", |
| "authors": [ |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Junbo", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Lecun", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 28th International Conference on Neural Information Processing Systems", |
| "volume": "1", |
| "issue": "", |
| "pages": "649--657", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level Convolutional Networks for Text Classifica- tion. In Proceedings of the 28th International Conference on Neural Information Processing Systems -Volume 1, NIPS'15, pages 649-657, Montreal, Canada. MIT Press.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Effective and Scalable Authorship Attribution Using Function Words. Information Retrieval Technology", |
| "authors": [ |
| { |
| "first": "Ying", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Zobel", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "174--189", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ying Zhao and Justin Zobel. 2005. Effective and Scalable Authorship Attribution Using Function Words. Infor- mation Retrieval Technology, pages 174-189.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Author topic distribution (20 topics).", |
| "num": null |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Confusion Matrices of LR classifier with different features types on CCAT10.", |
| "num": null |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Important word unigrams features in Judgment.", |
| "num": null |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Important word unigrams features in CCAT10.", |
| "num": null |
| }, |
| "FIGREF4": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Important word unigrams features in CCAT50.", |
| "num": null |
| }, |
| "FIGREF5": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Important word unigrams features in IMDb62.", |
| "num": null |
| }, |
| "FIGREF6": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Explanation of individual predictions of Logistic Regression classifier on an IMDb62 document using LIME. The bar chart represent the weight given to the most relevant words which are also highlighted in the text.", |
| "num": null |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null, |
| "text": "", |
| "html": null |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null, |
| "text": "", |
| "html": null |
| }, |
| "TABREF6": { |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null, |
| "text": "Feature ablation results.", |
| "html": null |
| }, |
| "TABREF8": { |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null, |
| "text": "Results with comparison against baseline and previous work.", |
| "html": null |
| } |
| } |
| } |
| } |