| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:02:01.985523Z" |
| }, |
| "title": "Identifying Authors Based on Stylometric measures of Vietnamese texts", |
| "authors": [ |
| { |
| "first": "Ho", |
| "middle": [], |
| "last": "Ngoc", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ho Chi Minh City University of Education VNUHCM-University of Science", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Lam", |
| "middle": [], |
| "last": "Vo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ho Chi Minh City University of Education VNUHCM-University of Science", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Diep", |
| "middle": [], |
| "last": "Nhu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ho Chi Minh City University of Education VNUHCM-University of Science", |
| "location": {} |
| }, |
| "email": "vodiepnhu@gmail.com" |
| }, |
| { |
| "first": "Dien", |
| "middle": [], |
| "last": "Dinh", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Tuyet", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nhung", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Author identification has many applications in investigating or resolving authorship disputes. Research on author identification has been conducted in many high resource languages, such as English, Chinese, Spanish, etc. However, for Vietnamese, studies are limited because of the lack of relevant language resources. This paper represents the topic of author identification with the application of stylometric methods: Mendenhall's characteristic curve, Kilgariff's squared method (Kilgariff's Chi-Squared), the Delta method of John Burrows. The study applied three different methods based on a corpus extracted from Vietnamese online newspapers, categorized by each author and achieved results from 50% to 100% depending on the method and number of linguistic features.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Author identification has many applications in investigating or resolving authorship disputes. Research on author identification has been conducted in many high resource languages, such as English, Chinese, Spanish, etc. However, for Vietnamese, studies are limited because of the lack of relevant language resources. This paper represents the topic of author identification with the application of stylometric methods: Mendenhall's characteristic curve, Kilgariff's squared method (Kilgariff's Chi-Squared), the Delta method of John Burrows. The study applied three different methods based on a corpus extracted from Vietnamese online newspapers, categorized by each author and achieved results from 50% to 100% depending on the method and number of linguistic features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "International integration, along with the exponential growth of the Internet, has led to an increase in plagiarism, imitation of celebrities' writing style, and copyright disputes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Due to the enormous amount of information, looking for the style and characteristics of written works in order to identify the author's style is a huge challenge. Globally, there have been numerous studies which find out models to identify the author's style in many languages. However, there are very few studies in natural language processing applying writing style in Vietnamese to attribute authorship.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Stylometry, beginning with attempts to settle authorship disputes, was first developed by Augustus De Morgan in 1851 based on word length. By the late 1880s, Thomas C. Mendenhall had analyzed the word length distribution for works written by Bacon, Marlowe, and Shakespeare to determine the true author of plays supposedly written by Shakespeare. In 1932, George Kingsley Zipf discovered the connection between ranking and the frequency of words, later stated in Zipf's law. In 1944, George Yule created a way to measure frequency of words, used to analyze vocabulary richness, namely Yule's characteristic. In the early 1960s, most research papers refer to Mosteller and Wallace's works on the Federalist Papers, which was considered as a basis of using computation in stylometry. In the next several decades, with the increasing number of digital texts, as well as the growth of the Internet, machine learning techniques, and neural networks, accessing information led to the development of natural language processing tools. Semantics continued to grow in the 21st century, and due to the overwhelming amount of information, copying texts also became more popular, leading to the growth of stylometry which is used in plagiarism detection, author identitication, author profiling, etc.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we use a corpus of Vietnamese online texts to attribute authorship using the following measures: Mendenhall's characteristic curves, Kilgariff's Chi-Squared, John Burrows's Delta measure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Adversarial Stylometry: When translated, a piece of writing has its style imitated, and going through many translators makes its characteristics less distinct. These changes make detecting the original style more difficult.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Detecting stylistic similarities includes the following tasks:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Stylochronometry: In time, an author may change his/her writing style due to changes in vocabulary, lifestyle, environment, age, etc. Studies have sharp distinction because they depend on a language in a specific time period and on a particular author. Author Profiling: extracting the characteristics of a text to gain information about an author such as gender, age, region, time of writing. Authorship Verification: Based on characteristics readily available in the training data, determining whether two texts were written by the same author. Authorship Attribution: an individual or group of authors has characteristic styles that are developed subconsciously. Based on these distinctions, we will identify the true author(s) of texts in a corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In authorship identification using corpus-based approach, we use the NLTK Python package to process the corpus in order to execute the methods of author attribution. Due to limitations in the number of the texts per author, we will choose only 10 authors whose texts contain appropriate number of sentences and words and closely similar in size. Depending on methods and characteristic numbers, our results vary between 50% and 100%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimentation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We use a corpus of Vietnamese online texts, including 1304 texts extracted from several Vietnamese online newspapers (largely from VnExpress), Facebook, and blogs. These texts are written by 10 authors, who give their own opinion or share their own experiences on social issues. The corpus was pre-processed to eliminate links, images, captions, and tokenized semiautomatically. The process of tokenization was carried out with CLC toolkit, an automatic tool developed by Computational Linguistics Center (VNUHCM-University of Science). Then we manually checked the whole corpus and correct the mistakes. The number of texts and tokens of each author are displayed in Table 1 ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 668, |
| "end": 675, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Mendenhall once wrote that an author's \"stylistic signature\" could be found by measuring the frequency with which he or she used words of different lengths. These characteristic curves give results quickly and visually, allowing the researcher to draw a conclusion on the author's style. Applying this method, our group worked on our dataset of works by ten chosen authors. To standardize the size of the text while applying this method, we made the token number in works from each author's bibliography 58,088 token (punctuations removed). On each author's bibliography, we sequentially did the following: calculating the length of each token, calculating the frequency of calculated length in the bibliography, and visualize the data. Besides the visualized data, we use Caroll's index R to measure each author's lexical diversity to have an overview of style: = V: vocabulary size (number of word types) N: text size (number of word tokens) Figure 1 . Equation for lexical diversity Measure 2: Kilgariff's Chi-squared In the dataset whose authors are known, namely Known: let denote the file of i th candidate author Ki (i = 1, 2, \u2026 10)", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 944, |
| "end": 952, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "Let denote the unknown author's file U. Calculate Chi-squared for each of the ten candidate authors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "1. First, build a joint corpus J, including Ki and U, and identify the 500 most frequent words in it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "2. Calculate the proportion of the joint corpus made up of the candidate author's tokens (AuShare).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "AuShare = len(token Ki)/len(token Jcorpus) 3. Look at the 500 most common words in the candidate author's corpus and compare the number of times they can be observed to what would be expected if the author's file and the disputed file were both random samples from the same distribution.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "4. Calculate how often we really see each of the 500 most common words, cw[x] (x = 1, 2, \u2026 500), in Ki and U respectively with:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "-Kcw_ob: observed number of cw in Ki -Kcw_ob: observed number of cw in U 5. Calculate how should we see each cw in Ki and U respectively with:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "-Kcw_ex: expected number of cw in Ki -Ucw_ex: expected number of cw in U 6. Calculate a chi-squared distance of Ki and U:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "2 = 2 + 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "Respectively calculate chi-squared of Ki and U: Figure 2 . Equations for the chi-squared statistic of Ki and U. The smaller the chi-squared value, the more similar the two corpora. Therefore, we will calculate a chi-squared for the difference between each file of the candidate author dataset Known and disputed file U; the smaller value will indicate which of Known is the most similar to U.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 48, |
| "end": 56, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "2 = \u2211 ( _ \u2212 _ ) 2 _ 2 = \u2211 ( _ \u2212 _ ) 2 _", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "Measure 3: John Burrows' Delta measure The Delta measure, proposed by John F. Burrows as a tool to solve the problem of copyright, measures the difference between two sets of text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "1. Combine all files in Known into a single corpus and get n frequency distribution words (test in n=20, n=30 respectively) 2. Calculating n[y] (y =1, 2, \u2026, n) presence for each subcorpus Ki.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "3. Calculating n[y] means ( y) and standard deviations ( y).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "4. Calculating z-scores:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "= \u2212", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "Ci: the observed frequency i: means i: standard deviation Figure 3 . z-scores calculate the z-score in the test set. 5. Calculating features and z-scores for our test file 6. Calculating Delta Find Delta point to compare the test set with each author.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 58, |
| "end": 66, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2206 = \u2211 | ( ) \u2212 ( ) |", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "Zc(i): z-score for feature i in subcorpus 'c' Zt(i): z-score for feature i in the test set Figure 4 . Delta measure", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 91, |
| "end": 99, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "The results are shown in Figure 5 . We observe that each author has the following features: Author59's longest word contains 17 characters, while that of Author203 and Author1050 only has 14.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 25, |
| "end": 33, |
| "text": "Figure 5", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "Every author uses words having between 2 and 4 characters the most. The most prevalent word has 3 characters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "Author1035 and Author1262 yield different results from the other authors. Each of them uses 3letter words the most, followed by 4-letter words instead of 2-letter words like the other eight authors. The authors' lexical diversity: When examined with the same 58,088 tokens, the author having the highest lexical diversity (Caroll index R) is Author1035 with 0.146 whereas the one with the lowest diversity is Author83 with 0.092. The results are shown in Table 2 . Measure 2: Kilgariff's Chi-squared Grieve (2007) assumed that the lower the chisquared measure between two texts, the more likely that they were written by the same author.", |
| "cite_spans": [ |
| { |
| "start": 500, |
| "end": 513, |
| "text": "Grieve (2007)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 455, |
| "end": 462, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "Therefore, the known text giving the smallest Chisquared value would be written by the author most likely to have written the unknown text. Table 3 below shows the Chi-squared results when we tested the text sample for each of the authors.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 140, |
| "end": 147, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "Measure 3: John Burrows' Delta Table 4 and Table 5 display the results of Delta measure when we tested on each of the authors' text (according to the headings). Examined by rows, the smaller the Delta value is, the closer to the author's style the test work is.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 31, |
| "end": 38, |
| "text": "Table 4", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 43, |
| "end": 50, |
| "text": "Table 5", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "After we tested on 30 signatures, the result yields 40%, matching the prediction on 4 out of 10 authors: Author59, Author97, Author1262, Author1289. The results are shown in Table 4 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 174, |
| "end": 181, |
| "text": "Table 4", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "The 30 signatures include: 'l\u00e0 ', 'kh\u00f4ng', 'v\u00e0', 'c\u1ee7a', 'c\u00f3', 'm\u1ed9t', 'ng\u01b0\u1eddi', 't\u00f4i', 'nh\u1eefng', 'cho', '\u0111\u01b0\u1ee3c', 'c\u00e1c', 'th\u00ec', 'trong', 'v\u1edbi', '\u0111\u00f3', '\u0111\u00e3', 'c\u0169ng', '\u0111\u1ec3', 'ph\u1ea3i', 'm\u00e0', '\u1edf', 'nh\u01b0', 'khi', 'n\u00e0y', 'm\u00ecnh', '\u0111\u1ebfn', 'v\u1ec1', 's\u1ebd', '\u0111i'. After we tested on 20 signatures, the result yields 50%, matching the prediction on 5 out of 10 authors: Author83, Author203, Author1035, Author1262, Author1289. The results are shown in Table 5 . The 20 signatures include: 'l\u00e0 ', 'kh\u00f4ng', 'v\u00e0', 'c\u1ee7a', 'c\u00f3', 'm\u1ed9t', 'ng\u01b0\u1eddi', 't\u00f4i', 'nh\u1eefng', 'cho', '\u0111\u01b0\u1ee3c', 'c\u00e1c', 'th\u00ec', 'trong', 'v\u1edbi', '\u0111\u00f3', '\u0111\u00e3', 'c\u0169ng', '\u0111\u1ec3', 'ph\u1ea3i'. ", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 237, |
| "text": "', 'kh\u00f4ng', 'v\u00e0', 'c\u1ee7a', 'c\u00f3', 'm\u1ed9t', 'ng\u01b0\u1eddi', 't\u00f4i', 'nh\u1eefng', 'cho', '\u0111\u01b0\u1ee3c', 'c\u00e1c', 'th\u00ec', 'trong', 'v\u1edbi', '\u0111\u00f3', '\u0111\u00e3', 'c\u0169ng', '\u0111\u1ec3', 'ph\u1ea3i', 'm\u00e0', '\u1edf', 'nh\u01b0', 'khi', 'n\u00e0y', 'm\u00ecnh', '\u0111\u1ebfn', 'v\u1ec1', 's\u1ebd', '\u0111i'.", |
| "ref_id": null |
| }, |
| { |
| "start": 466, |
| "end": 607, |
| "text": "', 'kh\u00f4ng', 'v\u00e0', 'c\u1ee7a', 'c\u00f3', 'm\u1ed9t', 'ng\u01b0\u1eddi', 't\u00f4i', 'nh\u1eefng', 'cho', '\u0111\u01b0\u1ee3c', 'c\u00e1c', 'th\u00ec', 'trong', 'v\u1edbi', '\u0111\u00f3', '\u0111\u00e3', 'c\u0169ng', '\u0111\u1ec3', 'ph\u1ea3i'.", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 425, |
| "end": 432, |
| "text": "Table 5", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Measure 1: Mendenhall's characteristic curves", |
| "sec_num": null |
| }, |
| { |
| "text": "Among the three measures mentioned above, Delta measure does not yield good results as we expected.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In Chi-square statistic, we convert everything to lowercase so that we won't count word tokens that begin with a capital letter because they appear at the beginning of a sentence and lowercased tokens of the same word as two different words. Sometimes this may cause a few errors, for example when a proper noun and a common noun are written the same way except for capitalization, but usually it increases accuracy. In addition, Chi-squared is a coarse method. For one thing, words that appear very frequently tend to carry a disproportionate amount of weight in the final calculation. Sometimes this is fine; other times, subtle differences in style represented by the ways in which authors use more unusual words will go unnoticed. (Laram\u00e9e, 2018).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The algorithm based on taking the number of the most common words (words with highest frequency) in the corpus as a feature. In the VnExpress corpus, we get texts from the \"Perspective\" section, which offers a wide variety of topics, such as finance, society, lifestyle, health, etc. Not all authors write about the same topics, and relativity among topics leads to an inconsistency of topics in the corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Even though we have processed on the sets with the same token number of each author, the disparity in topics may be the reason why the chosen features are biased towards certain authors, rather than representing the whole corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Research in authorship identification in Vietnamese text is uncommon despite its high applicability in many fields. In fact, researchers face difficulties in finding a corpus with sufficient size and information about authors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In this paper, we have presented three different measures of authorship identification; these are the basic methods of determining an author's style such as lexical diversity, number of characters in a word, and word frequency (to find the most frequent words). The Chi-squared measure yields 100% accuracy; whereas Burrows' Delta measure yields 40% accuracy with 30 features, and 50% accuracy with 20 features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In future research, we will the examining on a corpus with a wide variety of topics to increase lexical variety. At the same time, we will prepare a richer annotated corpus so as to work on authorship identification using machine learning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "4.1" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Bots and Gender Identification Based on Stylometry of Tweet Minimal Structure and n-grams Model", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [ |
| "I Valencia" |
| ], |
| "last": "Valencia", |
| "suffix": "" |
| }, |
| { |
| "first": "Helena", |
| "middle": [ |
| "Gomez" |
| ], |
| "last": "Adorno", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Stephens Rhodes", |
| "suffix": "" |
| }, |
| { |
| "first": "& Gibran Fuentes", |
| "middle": [], |
| "last": "Pineda", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex I. Valencia Valencia, Helena Gomez Adorno, Christopher Stephens Rhodes & Gibran Fuentes Pineda. 2019. Bots and Gender Identification Based on Stylometry of Tweet Minimal Structure and n-grams Model. Notebook for PAN at CLEF.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Cross-Domain Authorship Attribution Combining Instance-Based and Profile-Based Features. Notebook for PAN at CLEF", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Bacciu", |
| "suffix": "" |
| }, |
| { |
| "first": "La", |
| "middle": [], |
| "last": "Massimo", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugenio", |
| "middle": [ |
| "Nerio" |
| ], |
| "last": "Morgia", |
| "suffix": "" |
| }, |
| { |
| "first": "&", |
| "middle": [], |
| "last": "Nemmi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Valerio Neri", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Bacciu, Massimo La Morgia, Eugenio Nerio Nemmi & Valerio Neri. 2019. Cross- Domain Authorship Attribution Combining Instance-Based and Profile-Based Features. Notebook for PAN at CLEF.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Computational Stylometry and Machine Learning for Gender and Age Detection in Cyberbullying Texts", |
| "authors": [ |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Pascucci", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincenzo Masucci & Johanna", |
| "middle": [], |
| "last": "Monti", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antonio Pascucci, Vincenzo Masucci & Johanna Monti. 2019. Computational Stylometry and Machine Learning for Gender and Age Detection in Cyberbullying Texts. IEEE.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Stylochronometry: Timeline Prediction in Stylometric Analysis", |
| "authors": [ |
| { |
| "first": "Carmen", |
| "middle": [], |
| "last": "Klaussner", |
| "suffix": "" |
| }, |
| { |
| "first": "& Carl", |
| "middle": [], |
| "last": "Vogel", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carmen Klaussner & Carl Vogel. 2015. Stylochronometry: Timeline Prediction in Stylometric Analysis. Springer International Publishing, Switzerland.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Frequency in Language Memory, Attention and Learning", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Divjak", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Divjak, D. 2019. Frequency in Language Memory, Attention and Learning. Cambridge University Press.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Rolling stylometry", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Eder", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eder, M. 2015. Rolling stylometry. Oxford University Press on behalf of EADH.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Testing Burrows's Delta. Literary and Linguistic Computing", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "L" |
| ], |
| "last": "Hoover", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "19", |
| "issue": "", |
| "pages": "453--475", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hoover, D.L. 2004. Testing Burrows's Delta. Literary and Linguistic Computing. 19(4):453- 475.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Intrinsic Plagiarism Detection using Ngram Classes", |
| "authors": [ |
| { |
| "first": "Imene", |
| "middle": [], |
| "last": "Bensalem", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Salim Chikhi", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Imene Bensalem, Paolo Rosso & Salim Chikhi. 2014. Intrinsic Plagiarism Detection using N- gram Classes. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee & Kristina Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805v2" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee & Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. Cornell University, arXiv: 1810.04805v2.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Anonymous Author Similarity Identification", |
| "authors": [], |
| "year": 2014, |
| "venue": "IEEE Symposium on Security and Privacy", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyung-Ah Sohn, Alemu Molla Kebede & Kaleab Getaneh Tefrie. 2014. Anonymous Author Similarity Identification. IEEE Symposium on Security and Privacy.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Stylometry Detection Using Deep Learning", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Surendran", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [ |
| "P" |
| ], |
| "last": "Harilal", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Hrudya", |
| "suffix": "" |
| }, |
| { |
| "first": "&", |
| "middle": [ |
| "N K" |
| ], |
| "last": "Prabaharan Poornachandran", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Suchetha", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K. Surendran, O. P. Harilal, P. Hrudya, Prabaharan Poornachandran & N. K. Suchetha. 2017. Stylometry Detection Using Deep Learning. Springer Nature Singapore Pte Ltd.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Attributing authorship: An introduction", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Love", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Love, H. 2002. Attributing authorship: An introduction. Cambridge University Press, pp. 133.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Extension of Zipf's Law to Word and Character N-grams for English and Chinese", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [ |
| "I" |
| ], |
| "last": "Le Quan Ha", |
| "suffix": "" |
| }, |
| { |
| "first": "Ji", |
| "middle": [], |
| "last": "Sicilia-Garcia", |
| "suffix": "" |
| }, |
| { |
| "first": "&", |
| "middle": [ |
| "F J" |
| ], |
| "last": "Ming", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "The Association for Computational Linguistics and Chinese Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "77--102", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Le Quan Ha, E. I. Sicilia-Garcia, Ji Ming & F. J. Smith. 2003. Extension of Zipf's Law to Word and Character N-grams for English and Chinese. The Association for Computational Linguistics and Chinese Language Processing, 77-102.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "English-Vietnamese Cross-Language Paraphrase Identification Method", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le Thanh Nguyen & Dinh Dien", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Le Thanh Nguyen & Dinh Dien. 2019. English- Vietnamese Cross-Language Paraphrase Identification Method. Springer.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Vietnamese plagiarism detection method", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le Thanh Nguyen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Nguyen Xuan Toan & Dinh Dien", |
| "volume": "", |
| "issue": "", |
| "pages": "44--51", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3011077.3011109" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Le Thanh Nguyen, Nguyen Xuan Toan & Dinh Dien. 2016. Vietnamese plagiarism detection method. University of Florida, ACM, 44-51. https://doi.org/10.1145/3011077.3011109.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Deanonymizing Authors of Electronic Texts: A Survey on Electronic Text Stylometry", |
| "authors": [ |
| { |
| "first": "Mahmoud", |
| "middle": [], |
| "last": "Khonji", |
| "suffix": "" |
| }, |
| { |
| "first": "& Youssef", |
| "middle": [], |
| "last": "Iraqi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mahmoud Khonji & Youssef Iraqi. 2017. De- anonymizing Authors of Electronic Texts: A Survey on Electronic Text Stylometry. Preprints.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "The Characteristic Curves of Composition", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "C" |
| ], |
| "last": "Mendenhall", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Science", |
| "volume": "9", |
| "issue": "214", |
| "pages": "237--249", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mendenhall, T. C. 1887. The Characteristic Curves of Composition. Science, 9(214): 237-249.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Doppelg\u00e4nger Finder: Taking Stylometry To The Underground. IEEE Symposium on Security and Privacy", |
| "authors": [ |
| { |
| "first": "Sadia", |
| "middle": [], |
| "last": "Afroz", |
| "suffix": "" |
| }, |
| { |
| "first": "Aylin", |
| "middle": [], |
| "last": "Caliskan-Islam", |
| "suffix": "" |
| }, |
| { |
| "first": "Ariel", |
| "middle": [], |
| "last": "Stolerman", |
| "suffix": "" |
| }, |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Greenstadt & Damon", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mccoy", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sadia Afroz, Aylin Caliskan-Islam, Ariel Stolerman, Rachel Greenstadt & Damon McCoy. 2014. Doppelg\u00e4nger Finder: Taking Stylometry To The Underground. IEEE Symposium on Security and Privacy.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "text": "The Mendenhall's characteristic curves", |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "num": null, |
| "content": "<table/>", |
| "text": "below. Information of the corpus and test set", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "num": null, |
| "content": "<table><tr><td>Table 3. Chi-squared results</td></tr></table>", |
| "text": "Vocabulary Richness", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "num": null, |
| "content": "<table/>", |
| "text": "Experimental results of 30 most frequent lexemes", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "num": null, |
| "content": "<table/>", |
| "text": "Experimental results of 20 most frequent lexemes", |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |