| { |
| "paper_id": "C12-1011", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:25:50.932146Z" |
| }, |
| "title": "Text Reuse Detection Using a Composition of Text Similarity Measures", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "B\u00e4r", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": ") Ubiquitous Knowledge Processing Lab (UKP-TUDA", |
| "institution": "Technische Universit\u00e4t Darmstadt", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Torsten", |
| "middle": [], |
| "last": "Zesch", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": ") Ubiquitous Knowledge Processing Lab (UKP-TUDA", |
| "institution": "Technische Universit\u00e4t Darmstadt", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": ") Ubiquitous Knowledge Processing Lab (UKP-TUDA", |
| "institution": "Technische Universit\u00e4t Darmstadt", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Detecting text reuse is a fundamental requirement for a variety of tasks and applications, ranging from journalistic text reuse to plagiarism detection. Text reuse is traditionally detected by computing similarity between a source text and a possibly reused text. However, existing text similarity measures exhibit a major limitation: They compute similarity only on features which can be derived from the content of the given texts, thereby inherently implying that any other text characteristics are negligible. In this paper, we overcome this traditional limitation and compute similarity along three characteristic dimensions inherent to texts: content, structure, and style. We explore and discuss possible combinations of measures along these dimensions, and our results demonstrate that the composition consistently outperforms previous approaches on three standard evaluation datasets, and that text reuse detection greatly benefits from incorporating a diverse feature set that reflects a wide variety of text characteristics.", |
| "pdf_parse": { |
| "paper_id": "C12-1011", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Detecting text reuse is a fundamental requirement for a variety of tasks and applications, ranging from journalistic text reuse to plagiarism detection. Text reuse is traditionally detected by computing similarity between a source text and a possibly reused text. However, existing text similarity measures exhibit a major limitation: They compute similarity only on features which can be derived from the content of the given texts, thereby inherently implying that any other text characteristics are negligible. In this paper, we overcome this traditional limitation and compute similarity along three characteristic dimensions inherent to texts: content, structure, and style. We explore and discuss possible combinations of measures along these dimensions, and our results demonstrate that the composition consistently outperforms previous approaches on three standard evaluation datasets, and that text reuse detection greatly benefits from incorporating a diverse feature set that reflects a wide variety of text characteristics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Text reuse is a common phenomenon and arises, for example, on the Web from mirroring texts on different sites or reusing texts in public blogs. In other text collections such as content authoring systems of communities or enterprises, text reuse arises from keeping multiple versions, copies containing customizations or reformulations, or the use of template texts (Broder et al., 1997) .", |
| "cite_spans": [ |
| { |
| "start": 366, |
| "end": 387, |
| "text": "(Broder et al., 1997)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Problems with text reuse particularly arise in settings where systems are extensively used in a collaborative manner. For example, wikis are web-based, collaborative content authoring systems which offer fast and simple means for adding and editing content (Leuf and Cunningham, 2001) . At any time, users can modify content already present in the wiki, augment existing texts with new facts, ideas, or thoughts, or create new texts from scratch. However, when users contribute to wikis, they need to avoid content duplication. This requires comprehensive knowledge of what content is already present in the wiki, and what is not. As wikis are traditionally growing fast, this is hardly feasible, though. To remedy this issue, we aim at supporting authors of collaborative text collections by means of automatic text reuse detection. We envision a semi-supervised system that informs a content author of potentially pre-existing instances of text reuse, and then lets the author decide how to proceed, e.g. to merge both texts.", |
| "cite_spans": [ |
| { |
| "start": 257, |
| "end": 284, |
| "text": "(Leuf and Cunningham, 2001)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Detecting text reuse has been studied in a variety of tasks and applications, e.g. the detection of journalistic text reuse (Clough et al., 2002) , the identification of rewrite sources for ancient literary texts (Lee, 2007) , or the analysis of text reuse in blogs and web pages (Abdel-Hamid et al., 2009) . Another common instance of text reuse is plagiarism, with the additional constraint that the reuse needs to be unacknowledged. Near-duplicate detection is also a broad field of related work where the detection of text reuse is crucial, e.g. in the context of web search and crawling (Hoad and Zobel, 2003; Henzinger, 2006; Manku et al., 2007) . Prior work, however, mainly utilizes fingerprinting and hashing techniques (Charikar, 2002) for text comparison rather than methods from natural language processing.", |
| "cite_spans": [ |
| { |
| "start": 124, |
| "end": 145, |
| "text": "(Clough et al., 2002)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 213, |
| "end": 224, |
| "text": "(Lee, 2007)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 280, |
| "end": 306, |
| "text": "(Abdel-Hamid et al., 2009)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 592, |
| "end": 614, |
| "text": "(Hoad and Zobel, 2003;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 615, |
| "end": 631, |
| "text": "Henzinger, 2006;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 632, |
| "end": 651, |
| "text": "Manku et al., 2007)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 729, |
| "end": 745, |
| "text": "(Charikar, 2002)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A common approach to text reuse detection is to compute similarity between a source text and a possibly reused text. A multitude of text similarity measures have been proposed for computing similarity based on surface-level and/or semantic features (Mihalcea et al., 2006; Landauer et al., 1998; Gabrilovich and Markovitch, 2007) . However, existing similarity measures typically exhibit a major limitation: They compute similarity only on features which can be derived from the content of the given texts. By following this approach, they inherently imply that the similarity computation process does not need to take any other text characteristics into account.", |
| "cite_spans": [ |
| { |
| "start": 249, |
| "end": 272, |
| "text": "(Mihalcea et al., 2006;", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 273, |
| "end": 295, |
| "text": "Landauer et al., 1998;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 296, |
| "end": 329, |
| "text": "Gabrilovich and Markovitch, 2007)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In contrast, we propose that text reuse detection indeed benefits from also assessing similarity along other text characteristics (dimensions, henceforth). We follow empirical evidence by B\u00e4r et al. (2011) and focus on three characteristic similarity dimensions inherent to texts: content, structure, and style. Figure 1 shows an example of text reuse taken from the Wikipedia Rewrite Corpus (see Section 3.1) where parts of a given source text have been reused either verbatim or by using similar words or phrases. As the example illustrates, the process of creating reused text includes a revision step in which the editor has a certain degree of freedom on how to reuse the source text. This kind of similarity is detectable by content-centric text similarity measures. However, the editor has further split the source text into two individual sentences and changed the order of the reused parts. For detecting the degree of similarity of such a revision, text similarity measures for structural similarity are necessary. Additionally, the given texts exhibit a certain degree of similarity with respect to stylistic features, e.g. vocabulary richness. 1 In (Clough and Stevenson, 2011) . Various parts of the source text have been reused, either verbatim (underlined) or using similar words or phrases (wavy underlined). However, the editor has split the source text into two individual sentences and changed the order of the reused parts.", |
| "cite_spans": [ |
| { |
| "start": 188, |
| "end": 205, |
| "text": "B\u00e4r et al. (2011)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1161, |
| "end": 1189, |
| "text": "(Clough and Stevenson, 2011)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 312, |
| "end": 320, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "order to use such features as indicators of text reuse, we propose to further include measures of stylistic similarity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we thus overcome the traditional limitation of text similarity measures to content features. In contrast, we adopt ideas of seminal studies by cognitive scientists (Tversky, 1977; Goodman, 1972; G\u00e4rdenfors, 2000) and discuss the role of three similarity dimensions for the task of text reuse detection: content, structure, and style, as proposed in our previous work (B\u00e4r et al., 2011) . In Section 2, we report on a multitude of text similarity measures from these dimensions that we used for our experiments. In Section 3, we demonstrate empirically that text reuse can be best detected if measures are combined across dimensions, so that a wide variety of text characteristics are taken into consideration. Our approach consistently outperforms previous work on three standard evaluation datasets, and demonstrates the advantage of integrating text characteristics other than content into the similarity computation process.", |
| "cite_spans": [ |
| { |
| "start": 179, |
| "end": 194, |
| "text": "(Tversky, 1977;", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 195, |
| "end": 209, |
| "text": "Goodman, 1972;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 210, |
| "end": 227, |
| "text": "G\u00e4rdenfors, 2000)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 382, |
| "end": 400, |
| "text": "(B\u00e4r et al., 2011)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section, we report on a variety of similarity measures which we used to compute similarity along characteristic dimensions inherent to texts. 2 We classify them into measures for content similarity, structural similarity, and stylistic similarity, as proposed by B\u00e4r et al. (2011) .", |
| "cite_spans": [ |
| { |
| "start": 150, |
| "end": 151, |
| "text": "2", |
| "ref_id": null |
| }, |
| { |
| "start": 271, |
| "end": 288, |
| "text": "B\u00e4r et al. (2011)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text Similarity Measures", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Probably the easiest way to reuse text is verbatim copying. It can be detected by using string measures which operate on substring sequences. The longest common substring measure (Gusfield, 1997) compares the length of the longest contiguous sequence of characters between two texts, normalized by the text lengths. However, the editorial process in journalistic text reuse or the attempt to obfuscate copying in plagiarism may shorten the longest common substring considerably, e.g. when words are inserted or deleted, or parts of reused text appear in a different order. The longest common subsequence measure (Allison and Dix, 1986) drops the contiguity requirement and allows to detect text reuse in case of word insertions/deletions. Greedy String Tiling (Wise, 1996) further allows to deal with reordered parts of reused text as it determines a set of shared contiguous substrings between two given documents, each substring thereby being a match of maximal length. A multitude of other string similarity measures have been proposed which view texts as sequences of characters and compute their degree of distance according to a given metric. We used the following measures in our experiments: Jaro (1989) , Jaro-Winkler (Winkler, 1990) , Monge and Elkan (1997) , and Levenshtein (1966) .", |
| "cite_spans": [ |
| { |
| "start": 179, |
| "end": 195, |
| "text": "(Gusfield, 1997)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 612, |
| "end": 635, |
| "text": "(Allison and Dix, 1986)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 760, |
| "end": 772, |
| "text": "(Wise, 1996)", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 1200, |
| "end": 1211, |
| "text": "Jaro (1989)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1214, |
| "end": 1242, |
| "text": "Jaro-Winkler (Winkler, 1990)", |
| "ref_id": null |
| }, |
| { |
| "start": 1245, |
| "end": 1267, |
| "text": "Monge and Elkan (1997)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 1270, |
| "end": 1292, |
| "text": "and Levenshtein (1966)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Similarity", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Starting from the observation that not all words in a document are of equal importance, we further employed a similarity measure which weights all words by a tfidf scheme (Salton and McGill, 1983) and computes text similarity as the cosine between two document vectors.", |
| "cite_spans": [ |
| { |
| "start": 171, |
| "end": 196, |
| "text": "(Salton and McGill, 1983)", |
| "ref_id": "BIBREF47" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Similarity", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Comparing word n-grams (Lyon et al., 2001 ) is a popular means for comparing lexical patterns between two texts. The more similar the patterns, the more likely is it that text reuse has occurred. After compiling two sets of n-grams, we compared them using the Jaccard coefficient, following Lyon et al. (2001) , as well as using the containment measure (Broder, 1997) . We tested n-gram sizes for n = 1, 2, . . . , 15, and will use the original system name Ferret (Lyon et al., 2004) to refer to the variant with n = 3 using the Jaccard coefficient, henceforth.", |
| "cite_spans": [ |
| { |
| "start": 23, |
| "end": 41, |
| "text": "(Lyon et al., 2001", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 291, |
| "end": 309, |
| "text": "Lyon et al. (2001)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 353, |
| "end": 367, |
| "text": "(Broder, 1997)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 464, |
| "end": 483, |
| "text": "(Lyon et al., 2004)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Similarity", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Following the idea of comparing lexical patterns, we also used a measure which has not yet been considered for assessing content similarity: character n-gram profiles (Keselj et al., 2003) . 3 We follow the implementation by Barr\u00f3n-Cede\u00f1o et al. (2010) and discard all characters (case insensitive) which are not in the alphabet \u03a3 = {a, . . . , z, 0, . . . , 9}, then generate all n-grams on character level, weight them by a tfidf scheme, and finally compare the feature vectors of both the rewritten and the source text using the cosine measure. While in the original implementation only n = 3 was used, we generalize the measure to n = 2, 3, . . . , 15.", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 188, |
| "text": "(Keselj et al., 2003)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 191, |
| "end": 192, |
| "text": "3", |
| "ref_id": null |
| }, |
| { |
| "start": 225, |
| "end": 252, |
| "text": "Barr\u00f3n-Cede\u00f1o et al. (2010)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Similarity", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In cases where the editor replaced content words by synonyms, string measures typically fail due to the vocabulary gap. We thus used similarity measures which are capable of measuring semantic similarity between words. We used the following word similarity measures with WordNet (Fellbaum, 1998) : Jiang and Conrath (1997) , Lin (1998) , and Resnik (1995) . In order to scale these pairwise word similarity scores to the document level, we follow the aggregation strategy by Mihalcea et al. (2006) : First, a directional similarity score sim d (T i , T j ) is computed from a text T i to a second text T j (Eq. 1). Therefore, for each word w i in T i , its best-matching counterpart in T j is sought (ma xSim(w i , T j )). The similarity scores of all these matches are summed up and weighted according to their inverse document frequency idf (Sp\u00e4rck Jones, 1972) , then normalized. The final document-level similarity figure is the average of applying this strategy in both directions, from T i to T j and vice-versa (Eq. 2).", |
| "cite_spans": [ |
| { |
| "start": 279, |
| "end": 295, |
| "text": "(Fellbaum, 1998)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 298, |
| "end": 322, |
| "text": "Jiang and Conrath (1997)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 325, |
| "end": 335, |
| "text": "Lin (1998)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 338, |
| "end": 355, |
| "text": "and Resnik (1995)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 475, |
| "end": 497, |
| "text": "Mihalcea et al. (2006)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 851, |
| "end": 863, |
| "text": "Jones, 1972)", |
| "ref_id": "BIBREF51" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Similarity", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "sim d (T i , T j ) = w i maxSim(w i , T j ) \u2022 id f (w i ) w i id f (w i ) (1) sim(T i , T j ) = 1 2 sim d (T i , T j ) + sim d (T j , T i ) (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Similarity", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "We also tested text expansion mechanisms with the semantic word similarity measures described above: We used the Moses SMT system (Koehn et al., 2007) , trained on Europarl (Koehn, 2005) , to translate the original English texts via a bridge language (Dutch) back to English. Thereby, the idea was that in the translation process additional lexemes are introduced which alleviate potential lexical gaps. We computed pairwise word similarity with the measures described above and aggregated according to Mihalcea et al. (2006) .", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 150, |
| "text": "(Koehn et al., 2007)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 173, |
| "end": 186, |
| "text": "(Koehn, 2005)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 503, |
| "end": 525, |
| "text": "Mihalcea et al. (2006)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Similarity", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Furthermore, we used the statistical technique Latent Semantic Analysis (LSA) (Landauer et al., 1998 ) for comparing texts. The construction of the semantic space was done using the evaluation corpora (see Section 3). We also used the vector space model Explicit Semantic Analysis (ESA) (Gabrilovich and Markovitch, 2007) . Besides WordNet, we used two additional lexical-semantic resources for the construction of the ESA vector space: Wikipedia 4 and Wiktionary 5 .", |
| "cite_spans": [ |
| { |
| "start": 78, |
| "end": 100, |
| "text": "(Landauer et al., 1998", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 287, |
| "end": 321, |
| "text": "(Gabrilovich and Markovitch, 2007)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Content Similarity", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "As discussed above, we presume that content similarity alone is not a reliable indicator of text reuse. Two independently written texts about the same topic are likely to make use of a common vocabulary to a certain extent. We thus propose to also use measures of structural similarity which compute similarity based on structural aspects inherent to the compared texts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structural Similarity", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Stopword n-grams (Stamatatos, 2011) are based on the idea that text reuse often preserves syntactic similarities while exchanging content words. Thus, the measure removes all content words while preserving only stopwords. All n-grams of both texts are then compared using the containment measure (Broder, 1997) . We tested n-gram sizes for n = 2, 3, . . . , 15.", |
| "cite_spans": [ |
| { |
| "start": 17, |
| "end": 35, |
| "text": "(Stamatatos, 2011)", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 296, |
| "end": 310, |
| "text": "(Broder, 1997)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structural Similarity", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "For the same reason, we also included part-of-speech n-grams in our feature set. Disregarding the actual words that appear in two given texts, computing n-grams along part-of-speech tags allows to detect syntactic similarities between these texts. Again, we tested n-gram sizes for n = 2, 3, . . . , 15, and compared the two sets using the containment measure (Broder, 1997) .", |
| "cite_spans": [ |
| { |
| "start": 360, |
| "end": 374, |
| "text": "(Broder, 1997)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structural Similarity", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We also employed two similarity measures between pairs of words (Hatzivassiloglou et al., 1999) . The word pair order measure assumes that a similar syntactical structure in reused texts may cause two words to occur in the same order in both texts (with any number of words in between). The complementary word pair distance measure counts the number of words which lie between those of a given pair. For each measure, we computed feature vectors for both texts along all shared word pairs and compared the vectors using Pearson's correlation.", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 95, |
| "text": "(Hatzivassiloglou et al., 1999)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Structural Similarity", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Measures of stylistic similarity adopt ideas from authorship attribution (Mosteller and Wallace, 1964) or use statistical properties of texts to compute text similarity. The type-token ratio (TTR) (Templin, 1957) , for example, compares the vocabulary richness of two texts. However, it suffers from sensitivity to variations in text length and the assumption of textual homogeneity (McCarthy and Jarvis, 2010): As a text gets longer, the increase of tokens is linear, while the increase of types steadily slows down. In consequence, lexical repetition causes the TTR value to vary, while it does not necessarily entail that a reader perceives changes in the vocabulary usage. Secondly, textual homogeneity is the assumption of the existence of a single lexical diversity level across a whole text, which may be violated by different rhetorical strategies. Sequential TTR (McCarthy and Jarvis, 2010) alleviates these shortcomings. It iteratively computes a TTR score for a dynamically growing text segment until a point of saturation -i.e. a fixed TTR score of .72 -is reached, then starts anew from that position in the text for a new segment. The final lexical diversity score is computed as the number of tokens divided by the number of segments.", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 102, |
| "text": "(Mosteller and Wallace, 1964)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 197, |
| "end": 212, |
| "text": "(Templin, 1957)", |
| "ref_id": "BIBREF53" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stylistic Similarity", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Inspired by Yule (1939) who discussed sentence length as a characteristic of style, we also used two simple measures, sentence length and token length, in our system. These measures compute the average number of tokens per sentence and the average number of characters per token. Additionally, we compared the average sentence and token lengths between the reused text and the original source. We refer to these measures as sentence ratio and token ratio, respectively.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 23, |
| "text": "Yule (1939)", |
| "ref_id": "BIBREF57" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stylistic Similarity", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Finally, we compare texts by their function word frequencies (Dinu and Popescu, 2009) which have shown to be good style indicators in authorship attribution studies. Following the original work, this measure uses a set of 70 function words identified by Mosteller and Wallace (1964) and computes feature vectors of their frequencies for each possibly reused document and the source text. The comparison of the vectors is then performed using Pearson's correlation.", |
| "cite_spans": [ |
| { |
| "start": 61, |
| "end": 85, |
| "text": "(Dinu and Popescu, 2009)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 254, |
| "end": 282, |
| "text": "Mosteller and Wallace (1964)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Stylistic Similarity", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We utilized three datasets for the evaluation of our system which originate in the fields of plagiarism detection, journalistic text reuse detection, and paraphrase recognition: the Wikipedia Rewrite Corpus (Clough and Stevenson, 2011) , the METER Corpus (Gaizauskas et al., 2001) , and the Webis Crowd Paraphrase Corpus (Burrows et al., 2012) , described below.", |
| "cite_spans": [ |
| { |
| "start": 207, |
| "end": 235, |
| "text": "(Clough and Stevenson, 2011)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 255, |
| "end": 280, |
| "text": "(Gaizauskas et al., 2001)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 321, |
| "end": 343, |
| "text": "(Burrows et al., 2012)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments & Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We carried out the same evaluation procedure for each of the three datasets: First, we computed text similarity scores between all pairs of possibly reused texts and their original sources using all the measures introduced in Section 2. We then used these scores as features for two machine learning classifiers in order to combine them across the three dimensions content, structure, and style. We experimented with two classifiers from the WEKA toolkit (Hall et al., 2009) : a Naive Bayes classifier and a C4.5 decision tree classifier (J48 implementation).", |
| "cite_spans": [ |
| { |
| "start": 455, |
| "end": 474, |
| "text": "(Hall et al., 2009)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments & Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In a 10-fold cross-validation setup, we ran three sets of experiments as follows: (i) First, we tested only the text similarity scores of one single measure at a time as single feature for the classifiers, in order to determine the individually best-performing measures per similarity We then combined the measures per dimension by using multiple text similarity scores as feature set, in order to determine the performance of multiple measures within a single dimension. (iii) Finally, we combined the measures across dimensions to determine the best overall configuration. We compare our results with two baselines: the majority class baseline and the word trigram similarity measure Ferret (Lyon et al., 2004 ) (see Section 2.1).", |
| "cite_spans": [ |
| { |
| "start": 693, |
| "end": 711, |
| "text": "(Lyon et al., 2004", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments & Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Additionally, we report the best results from the literature for comparison.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments & Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Evaluation was carried out in terms of accuracy andF 1 score. By accuracy, we refer to the number of correctly predicted texts divided by the total number of texts. As the class distributions in both datasets are skewed, we report the overallF 1 score as the arithmetic mean across the F 1 scores of all classes in order to account for the class imbalance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments & Results", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Dataset The dataset contains 100 pairs of short texts (193 words on average). For each of 5 questions about topics of computer science (e.g. \"What is dynamic programming?\"), a reference answer (source text, henceforth) has been manually created by copying portions of text from a suitable Wikipedia article. Text reuse now occurs between a source text and an answer given by one of 19 participants. The participants were asked to provide short answers, each of which should comply to one of 4 rewrite levels and hence reuse the source text to a varying extent. According to the degree of rewrite, the dataset is 4-way classified as cut & paste (38 texts; simple copy of text portions from the Wikipedia article), light revision (19; synonym substitutions and changes of grammatical structure allowed), heavy revision (19; rephrasing of Wikipedia excerpts using different words and structure), and no plagiarism (19; answer written independently from the Wikipedia article). An example of a heavy revision was given in Figure 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1018, |
| "end": 1026, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Wikipedia Rewrite Corpus", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We summarize the results on this dataset in Table 2 . 8 In the best configuration, when combining similarity measures across dimensions, our system achieves a performance of Clough and Stevenson (2011) by 5.4% points in terms ofF 1 score compared to their reported numbers, and by 15.3% points compared to our re-implementation of this system 7 . Their system uses a Naive Bayes classifier with only a very small feature set: word n-gram containment (n = 1, 2, . . . , 5) and longest common subsequence. For comparison, we re-implemented their system and also applied it to the two datasets in the remainder of this paper. We report our findings in Sections 3.2 and 3.3.", |
| "cite_spans": [ |
| { |
| "start": 54, |
| "end": 55, |
| "text": "8", |
| "ref_id": null |
| }, |
| { |
| "start": 174, |
| "end": 201, |
| "text": "Clough and Stevenson (2011)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 44, |
| "end": 51, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "In Table 1 , we further report the detailed results for a selected set of individual text similarity measures, listed by similarity dimension. 9 Due to space limitations, we only report a selected set of best-performing measures per dimension and compare them with the baselines: While the majority class baseline performs very poor on this dataset (F 1 = .143), the Ferret baseline achievesF 1 = .517. Some content similarity measures such as word 2-grams containment show a reasonable performance (F 1 = .683), while structural measures cannot exceedF 1 = .554, and stylistic measures perform only slightly better than the majority class baseline (F 1 = .296).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "In Table 3 , we report the best results for the combinations of text similarity measures within and across dimensions. When we combine the measures within their respective dimensions, content outperforms structural and stylistic similarity. However, all combinations of measures across dimensions in addition to content similarity improve the results. The best performance is achieved by combining the three similarity measures longest common subsequence, stopword 10-grams, and character 5-gram profiles from the two dimensions content and structure. This supports our hypothesis that the similarity computation process indeed profits from dimensions other than content. The effects of dimension combination held true regardless of the classifier used, even though the decision tree classifier performed consistently better than Naive Bayes.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "Error Analysis We present the confusion matrix for our best configuration in Table 2 . In total, 15 texts out of 95 have been classified with the wrong label. While all texts except a single one in the class no plagiarism have been classified correctly, 67% of errors (10 texts) are due to misclassifications in the light and heavy revision classes. We assume that these errors are due to questionable gold standard annotations as the annotation guidelines for these two classes are highly similar (Clough and Stevenson, 2011) . For the light revision class, the annotators \"could alter the text in some basic ways\", thereby \"altering the grammatical structure (i.e. paraphrasing).\" Likewise, for the heavy revision class, the annotation manual expected the annotators to \"rephrase the text to generate an answer with the same meaning as the source text, but expressed using different words and structure.\"", |
| "cite_spans": [ |
| { |
| "start": 498, |
| "end": 526, |
| "text": "(Clough and Stevenson, 2011)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 77, |
| "end": 84, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "As each text of this dataset was written by only a single person for a given rewrite category, we decided to conduct an annotation study, in which we were mostly interested in the inter-rater agreement of the subjects. We asked 3 participants to rate the degree of text reuse and provided them with the original annotation guidelines. We used a generalization of Scott's (1955) \u03c0-measure for calculating a chance-corrected inter-rater agreement for multiple raters, which is known as Fleiss' (1971) \u03ba and Carletta's (1996) K. 10 In summary, the results 11 of our study support our hypothesis that the annotators mostly disagree for the light and heavy revision classes, with fair 12 agreements of \u03ba = .34 and \u03ba = .28, respectively. For the cut & paste and no plagiarism classes, we observe moderate 12 agreements, \u03ba = .53 and \u03ba = .56, respectively.", |
| "cite_spans": [ |
| { |
| "start": 363, |
| "end": 377, |
| "text": "Scott's (1955)", |
| "ref_id": "BIBREF49" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "Based on these insights, we decided to fold the light and heavy revision classes into a single class potential plagiarism. This approach was also briefly discussed by Clough and Stevenson (2011) , though not carried out in their work. We report the corresponding results and the confusion matrix in Table 4 . As the classification task gets easier by the reduction to three classes, the results for the Ferret baseline improve, fromF 1 = .517 toF 1 = .745. The re-implementation of the system by Clough and Stevenson (2011) achievesF 1 = .788. Our system again outperforms all other systems withF 1 = .859.", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 194, |
| "text": "Clough and Stevenson (2011)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 496, |
| "end": 523, |
| "text": "Clough and Stevenson (2011)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 299, |
| "end": 306, |
| "text": "Table 4", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "In our envisioned semi-supervised application scenario, potentially reused texts are presented to users in an informative manner. Here, fine-grained distinctions are not necessary, and we decided to go even one step further and fold all potential cases of text reuse. This variant of the dataset results in a binary classification of plagiarized/non-plagiarized texts. We present 10 An exhaustive discussion of inter-rater agreement measures is given by Artstein and Poesio (2008 Table 6 : Results and confusion matrix for the best classification on the METER Corpus the results and the corresponding confusion matrix in Table 5 . In this simplified setting, even the Ferret baseline achieves an excellent performance ofF 1 = .935. Our approach still slightly outperforms (F 1 = .967) the re-implementation of the system by Clough and Stevenson (2011) .", |
| "cite_spans": [ |
| { |
| "start": 380, |
| "end": 382, |
| "text": "10", |
| "ref_id": null |
| }, |
| { |
| "start": 454, |
| "end": 479, |
| "text": "Artstein and Poesio (2008", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 824, |
| "end": 851, |
| "text": "Clough and Stevenson (2011)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 480, |
| "end": 487, |
| "text": "Table 6", |
| "ref_id": null |
| }, |
| { |
| "start": 621, |
| "end": 628, |
| "text": "Table 5", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "An interesting observation across all three variants of the dataset is that the same three texts always constitute severe error instances where e.g. a cut & paste text is falsely labeled as no plagiarism, which is more severe than mislabeling a light revision as a heavy revision. Two of the three cases account for the texts which describe the PageRank algorithm. One of these instances was falsely labeled as cut & paste while it is non-plagiarized, and the other one vice-versa. We attribute the misclassifications to the model built up in the classifier's training phase.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "In the envisioned semi-supervised setting, the remaining less severe error instances, where e.g. a light revision was classified as a heavy revision, can be reviewed by a user of the system. We suppose it is even hard for users to draw a strict line between possibly reused and non-reused texts, as this heavily depends on external effects such as user intentions and the task at hand.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "Dataset The dataset contains news sources from the UK Press Association (PA) and newspaper articles from 9 British newspapers that reused the PA source texts to generate their own texts. The complete dataset contains 1,716 texts from two domains: law & court and show business. All newspaper articles have been annotated whether they are wholly derived from the PA sources (i.e. the PA text has been used exclusively as text reuse source), partially derived (the PA text has been used in addition to other sources), or non-derived (the PA text has not been used at all).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "METER Corpus", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Several newspaper texts, though, have more than a single PA source in the original dataset where it is unclear which (if not all) of the source stories have been used to generate the rewritten story. However, for text reuse detection it is important to have aligned pairs of reused texts and source texts. Therefore, we followed S\u00e1nchez-Vega et al. (2010) and selected a subset of texts where only a single source story is present in the dataset. This leaves 253 pairs of short texts (205 words on average). We further followed S\u00e1nchez-Vega et al. (2010) and folded the annotations to a binary classification of 181 reused (wholly/partially derived) and 72 non-reused instances in order to carry out a comparable evaluation study.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "METER Corpus", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We summarize the results on this dataset in Table 6 . In the best configuration, our system achieves an overall performance ofF 1 = .768. It outperforms the best reference system by S\u00e1nchez-Vega et al. (2010) by 6.3% points in terms ofF 1 score. Their system uses a Naive Bayes classifier with two custom features which compare texts based on the length and frequency of common word sequences and the relevance of individual words. As in Section 3.1, we further report the detailed results for a selected set of individual text similarity measures Table 7 : Results of the best combinations of text similarity measures within and across dimensions on the METER Corpus in Table 1 . From these figures, we learn that many text similarity measures cannot exceed the simple majority class baseline (F 1 = .417) when applied individually.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 44, |
| "end": 51, |
| "text": "Table 6", |
| "ref_id": null |
| }, |
| { |
| "start": 548, |
| "end": 555, |
| "text": "Table 7", |
| "ref_id": null |
| }, |
| { |
| "start": 671, |
| "end": 678, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "In Table 7 , we show that the performance of text reuse detection always improves over individual measures (cf. Table 1 ) when we combine the measures within their respective dimensions. An exception is the combination of structural similarity measures, which only performs on the same level as the best individual measure part-of-speech 3-grams containment. Combinations of content similarity measures show a better performance than combinations of structural or stylistic measures. Our system achieves its best performance on this dataset when text similarity measures are combined across all three dimensions content, structure, and style. The best configuration resulted from using a Naive Bayes classifier with the following measures: Greedy String Tiling, stopword 12-grams, and Sequential TTR. As for the previous dataset, the effects of dimension combination held true regardless of the classifier used.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 7", |
| "ref_id": null |
| }, |
| { |
| "start": 112, |
| "end": 119, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "The influence of the stylistic similarity measures is particularly interesting to note. In contrast to the Wikipedia Rewrite Corpus, including these measures in the composition improves the results on this dataset: Our classifier is able to detect similarity even for reused texts by expert journalists. This is due to the fact that a journalistic text which reuses the original press agency source most likely also shows stylistic similarity in terms of e.g. vocabulary richness.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "Error Analysis We present the confusion matrix for our best configuration in Table 6 . In total, 50 texts out of 253 have been classified incorrectly: 30 instances of text reuse have not been identified by the classifier, and 20 non-reused texts have been mistakenly labeled as such. However, the original annotations have been carried out by only a single annotator (Gaizauskas et al., 2001 ) which may have resulted in subjective judgments. Thus, as for the previous dataset in Section 3.1, we conducted an annotation study with three annotators to gain further insights into the data. The results 11 show that for 61% of all texts the annotators fully agree. The chance-corrected Fleiss' (1971) agreement \u03ba = .47 is moderate 12 .", |
| "cite_spans": [ |
| { |
| "start": 367, |
| "end": 391, |
| "text": "(Gaizauskas et al., 2001", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 77, |
| "end": 84, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "For the 30 instances of text reuse which have not been identified by the classifier, it is particularly interesting to note that many errors are due to the fact that a lower overall text similarity between the possibly reused text and the original source does not necessarily entail the label no reuse. The newspaper article about the English singer-songwriter Liam Gallagher, for example, is originally labeled as text reuse. However, our classifier falsely assigned the label no reuse. It turns out, though, that the reused text is about four times as long as the original press agency source, with lots of new facts being introduced there. Consequently, only a low similarity score can be computed between the additional material in the newspaper article and the original source, and the overall similarity score decreases.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "We conclude that applications will benefit from an improved classifier which better deals with theses instances. For example, similarity features could be computed per section, not per document, which would allow to also identify potential instances of text reuse for only partially matching texts. The currently achieved performance (see Table 6 ) of text reuse detection, though, is sufficient for our envisioned semi-supervised application scenario where content authors are provided only with suggestions of potential instances of text reuse and then are free to decide how to proceed, e.g. to merge both texts. The final decision probably also depends on external factors such as user intentions and the task at hand.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 339, |
| "end": 346, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "Dataset The dataset was originally introduced as part of the PAN 2010 international plagiarism detection competition (Potthast et al., 2010) . It contains 7,859 pairs of original texts along with their paraphrases (28 to 954 words in length) with 4,067 (52%) positive and 3,792 (48%) negative samples. The original texts are book excerpts from Project Gutenberg 14 , and the corresponding paraphrases were acquired in a crowdsourcing process using Amazon Mechanical Turk (Callison-Burch and Dredze, 2010). In the manual filtering process 15 of all acquired paraphrases, Burrows et al. (2012) hereby follow the paraphrase definition by Boonthum (2004) , where a good paraphrase exhibits patterns such as synonym use, changes between active and passive voice, or changing word forms and parts of speech, and a bad paraphrase is rather e.g. a (near-)duplicate or an automated one-for-one word substitution. This definition implies that a more sophisticated interpretation of text similarity scores needs to be learned, where e.g. (near-)duplicates with very high similarity scores are in fact negative samples.", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 140, |
| "text": "(Potthast et al., 2010)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 570, |
| "end": 591, |
| "text": "Burrows et al. (2012)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 635, |
| "end": 650, |
| "text": "Boonthum (2004)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Webis Crowd Paraphrase Corpus", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We summarize the results on this dataset in Table 8 . Even though the Ferret baseline is a strong competitor (F 1 = .789), our approach achieves the best results on this dataset with F 1 = .852. The results reported by Burrows et al. (2012) are slightly worse (F 1 = .837). Their best score was achieved by using a k-nearest neighbor classifier with a feature set of 10 similarity measures. They exclusively used similarity measures that operate on the texts' string sequences and thus capture the content dimension of text similarity only, e.g. Levenshtein (1966) distance and a word n-gram similarity measure. As in the previous sections, we report the detailed results for a selected set of individual text similarity measures in As for the previous datasets, our hypothesis holds true that the combination of similarity dimensions improves the results: When we combine the similarity features within each of the respective dimensions, the performance numbers increase (see Table 9 as compared to Table 1 ). The combination of content similarity measures is stronger than the combination of structural and stylistic similarity measures, and performs on the same level as the original results reported by Burrows et al. (2012) . This is to be expected, as their system uses a feature set which also addresses the content dimension exclusively.", |
| "cite_spans": [ |
| { |
| "start": 219, |
| "end": 240, |
| "text": "Burrows et al. (2012)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 546, |
| "end": 564, |
| "text": "Levenshtein (1966)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 1207, |
| "end": 1228, |
| "text": "Burrows et al. (2012)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 44, |
| "end": 51, |
| "text": "Table 8", |
| "ref_id": "TABREF12" |
| }, |
| { |
| "start": 977, |
| "end": 984, |
| "text": "Table 9", |
| "ref_id": "TABREF14" |
| }, |
| { |
| "start": 1000, |
| "end": 1007, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "When we combine measures across dimensions, the results improve even further. An exception is the combination of content and structural measures, which performs slightly worse than content measures alone due to the lower performance of structural measures on this dataset. The best configuration of our system resulted from combining all three dimensions content, structure, and style in a single classification model using the decision tree classifier, resulting in F 1 = .852. The final feature set contains 16 text similarity features which are listed in Table 10 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 558, |
| "end": 566, |
| "text": "Table 10", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "Error Analysis We present the confusion matrix for our best classification in Table 8 . In total, 1,172 (15%) out of 7,859 text pairs have been classified incorrectly. Out of these, our classifier mistakenly labeled 759 instances of negative samples as true paraphrases, while 413 cases of true paraphrases were not recognized. However, in our opinion the 759 false positives are less severe errors in our envisioned semi-supervised application setting, as user intentions and the current task at hand may highly influence a user's decision to consider texts as reused or not.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 78, |
| "end": 85, |
| "text": "Table 8", |
| "ref_id": "TABREF12" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "In general, we attribute the errors to the particular properties of this dataset, which differ from those of the Wikipedia Rewrite Corpus and the METER Corpus (see Sections 3.1 and 3.2). For those two datasets, the more similar two texts are, the higher their degree of text reuse. For the Webis Crowd Paraphrase Corpus, however, a different interpretation needs to be learned by the classifier: Here, (near-)duplicates and texts with automated word-by-word substitutions, which will receive high similarity scores by any of our content similarity measures, are in fact annotated as bad paraphrases, i.e. negative samples. Unrelated texts, empty samples, or texts alike also belong to the class of negative samples. In consequence, positive samples are only those in the medium similarity range. We assume that the more elaborate definition of positive and negative cases makes it more difficult to learn a proper model for the given data. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": null |
| }, |
| { |
| "text": "The motivation for this work stemmed from the hypothesis that content features alone are not a reliable indicator for text reuse detection. As illustrated in Figure 1 , a reused text may also contain modifications such as split sentences, changed order of reused parts, or stylistic variance. We thus devised an architecture which composes diverse text similarity measures in a supervised classification model. In this model, we overcome the traditional limitation of text similarity measures to content features and compute similarity along three characteristic dimensions inherent to texts: content, structure, and style.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 158, |
| "end": 166, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We evaluated our classification model on three standard datasets where text reuse is prevalent and which originate in the fields of plagiarism detection, journalistic text reuse detection, and paraphrase recognition: the Wikipedia Rewrite Corpus (Clough and Stevenson, 2011) , the METER Corpus (Gaizauskas et al., 2001) , and the Webis Crowd Paraphrase Corpus (Burrows et al., 2012) . Based on the evaluation results, we discussed the influence of each of the similarity dimensions, and demonstrated empirically that text reuse can be best detected if measures are combined across dimensions, so that a wide variety of text features are taken into consideration. The composition consistently outperforms previous approaches across all datasets.", |
| "cite_spans": [ |
| { |
| "start": 246, |
| "end": 274, |
| "text": "(Clough and Stevenson, 2011)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 294, |
| "end": 319, |
| "text": "(Gaizauskas et al., 2001)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 360, |
| "end": 382, |
| "text": "(Burrows et al., 2012)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "As we showed, similarity computation works best if the similarity dimensions are chosen well with respect to the type of text reuse at hand. For the Wikipedia Rewrite Corpus, for example, the stylistic similarity features perform only poorly, which is why the composition of all three dimensions performs slightly worse than than the combination of only content and structural features. For the other two datasets, however, stylistic similarity is a strong dimension within the composition, and consequently the best performance is reached when combining all three dimensions. Based on these insights, we conclude that for novel datasets it is essential to address the dimensions explicitly in the annotation process, so that text reuse detection approaches can be evaluated precisely against particular characteristics of different kinds of data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For future work, we expect that considering a dimensional representation of text similarity features will also benefit any other task where text similarity computation is fundamental and which is yet limited to content features, e.g. paraphrase recognition or automatic essay grading. For the latter, we see great potential for improvements by including, for example, measures for grammar analysis, lexical complexity, or measures assessing text organization with respect to the discourse elements. However, each task exhibits particular characteristics which influence the choice of a suitable set of similarity dimensions. As discussed above, a particular dimension may or may not contribute to an overall improvement based on the nature of the data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions and Future Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The type-token ratio(Templin, 1957) of the texts is .79 and .71, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In addition, we release an open-source framework which contains implementations of all discussed measures in order to stimulate the development of novel measures: http://code.google.com/p/dkpro-similarity-asl", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Traditionally, character n-gram profiles have rather been shown successful for authorship attribution. However, the similarity scores of word n-grams and those of character n-gram profiles are highly correlated: Assuming 5 characters per word on average for English texts(Shannon, 1951), we set n = 3 for word n-grams and n = 15 for character n-grams, and computed Pearson's correlation r between the corresponding similarity scores. We obtained r = .93 and r = .86 on the datasets introduced in Sections 3.1 and 3.2, respectively, and thus conclude that this measure captures content similarity rather than stylistic similarity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "www.wikipedia.org 5 www.wiktionary.org", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Chong et al. (2010) reportF 1 = .698 in their original work. This figure, however, reflects the weighted arithmetic mean over all four classes of the dataset where one class is twice as prominent as each of the others. As dicussed in Section 3, we report allF 1 scores as the unweighted arithmetic mean in order to account for the class imbalance.7 While we were able to reproduce the results of the Ferret baseline as reported byChong et al. (2010), our reimplementation of the system by Clough and Stevenson (2011) (Naive Bayes classifier, same feature set) resulted in a much lower overall performance. We observed the largest difference for the longest common subsequence measure, even though we used a standard implementation(Allison and Dix, 1986) and normalized as described byClough and Stevenson (2011).8 Figures in italics are taken from the literature, while we (re-)implemented the remaining systems. This applies to all result tables in this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Table 1also lists the detailed results for the METER Corpus and the Webis Crowd Paraphrase Corpus. We will discuss the numbers in the corresponding Sections 3.2 and 3.3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Strength of agreement for \u03ba values according toLandis and Koch (1977) 13 We report the results for our re-implementation of the system byClough and Stevenson (2011). In their original work, they did not evaluate on this dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "www.gutenberg.org15 Burrows et al. (2012) do not report any inter-annotator agreements for the filtering process, as the task was split across two annotators and each text pair was labeled by only a single annotator.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work has been supported by the Volkswagen Foundation as part of the Lichtenberg-Professorship Program under grant No. I/82806, and by the Klaus Tschira Foundation under project No. 00.133.2008. We thank Chris Biemann for his inspirations, as well as Carolin Deeg, Andriy Nadolskyy, and Artem Vovk for their participation in the annotation studies.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Detecting the Origin of Text Segments Efficiently", |
| "authors": [ |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Abdel-Hamid", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Behzadi", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Christoph", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Henzinger", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 18th International Conference on World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "61--70", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abdel-Hamid, O., Behzadi, B., Christoph, S., and Henzinger, M. (2009). Detecting the Origin of Text Segments Efficiently. In Proceedings of the 18th International Conference on World Wide Web, pages 61-70, Madrid, Spain.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A bit-string longest-common-subsequence algorithm", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Allison", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [ |
| "I" |
| ], |
| "last": "Dix", |
| "suffix": "" |
| } |
| ], |
| "year": 1986, |
| "venue": "Information Processing Letters", |
| "volume": "23", |
| "issue": "", |
| "pages": "305--310", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Allison, L. and Dix, T. I. (1986). A bit-string longest-common-subsequence algorithm. Informa- tion Processing Letters, 23:305-310.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Inter-Coder Agreement for Computational Linguistics", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Artstein", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Computational Linguistics", |
| "volume": "34", |
| "issue": "4", |
| "pages": "555--596", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Artstein, R. and Poesio, M. (2008). Inter-Coder Agreement for Computational Linguistics. Computational Linguistics, 34(4):555-596.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A Reflective View on Text Similarity", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "B\u00e4r", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Zesch", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the International Conference on Recent Advances in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "515--520", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "B\u00e4r, D., Zesch, T., and Gurevych, I. (2011). A Reflective View on Text Similarity. In Proceedings of the International Conference on Recent Advances in Natural Language Processing, pages 515-520, Hissar, Bulgaria.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Plagiarism Detection across Distant Language Pairs", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Barr\u00f3n-Cede\u00f1o", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 23rd International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "37--45", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Barr\u00f3n-Cede\u00f1o, A., Rosso, P., Agirre, E., and Labaka, G. (2010). Plagiarism Detection across Distant Language Pairs. In Proceedings of the 23rd International Conference on Computational Linguistics, pages 37-45, Beijing, China.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "iSTART: Paraphrase Recognition", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Boonthum", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 42nd Meeting of the Association for Computational Linguistics: Student Research Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "31--36", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Boonthum, C. (2004). iSTART: Paraphrase Recognition. In Proceedings of the 42nd Meeting of the Association for Computational Linguistics: Student Research Workshop, pages 31-36, Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "On the resemblance and containment of documents", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "Z" |
| ], |
| "last": "Broder", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Proceedings of Compression and Complexity of Sequences", |
| "volume": "", |
| "issue": "", |
| "pages": "21--29", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Broder, A. Z. (1997). On the resemblance and containment of documents. Proceedings of Compression and Complexity of Sequences, pages 21-29.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Syntactic clustering of the Web", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "Z" |
| ], |
| "last": "Broder", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "C" |
| ], |
| "last": "Glassman", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "S" |
| ], |
| "last": "Manasse", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Zweig", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Proceedings of the 6th International World Wide Web Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "1157--1166", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Broder, A. Z., Glassman, S. C., Manasse, M. S., and Zweig, G. (1997). Syntactic clustering of the Web. In Proceedings of the 6th International World Wide Web Conference, pages 1157-1166, Santa Clara, CA, USA.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Paraphrase Acquisition via Crowdsourcing and Machine Learning", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Burrows", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Potthast", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Transactions on Intelligent Systems and Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "1--22", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Burrows, S., Potthast, M., and Stein, B. (2012). Paraphrase Acquisition via Crowdsourcing and Machine Learning. Transactions on Intelligent Systems and Technology, V(January):1-22.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Creating Speech and Language Data With Amazon's Mechanical Turk", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the NAACL HLT Workshop on Creating Speech and Language Data with Amazon's Mechanical Turk", |
| "volume": "", |
| "issue": "", |
| "pages": "1--12", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Callison-Burch, C. and Dredze, M. (2010). Creating Speech and Language Data With Amazon's Mechanical Turk. In Proceedings of the NAACL HLT Workshop on Creating Speech and Language Data with Amazon's Mechanical Turk, pages 1-12, Los Angeles, CA, USA.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Assessing Agreement on Classification Tasks: The Kappa Statistic", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Carletta", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Computational Linguistics", |
| "volume": "22", |
| "issue": "2", |
| "pages": "249--254", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carletta, J. (1996). Assessing Agreement on Classification Tasks: The Kappa Statistic. Compu- tational Linguistics, 22(2):249-254.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Similarity Estimation Techniques from Rounding Algorithms", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "S" |
| ], |
| "last": "Charikar", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 34th Annual Symposium on Theory of Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "380--388", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Charikar, M. S. (2002). Similarity Estimation Techniques from Rounding Algorithms. In Proceedings of the 34th Annual Symposium on Theory of Computing, pages 380-388, Montreal, Canada.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Using Natural Language Processing for Automatic Detection of Plagiarism", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Chong", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Specia", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mitkov", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 4th International Plagiarism Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chong, M., Specia, L., and Mitkov, R. (2010). Using Natural Language Processing for Automatic Detection of Plagiarism. In Proceedings of the 4th International Plagiarism Conference, Newcastle upon Tyne, UK.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "METER: MEasuring TExt Reuse", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Clough", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Gaizauskas", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "S" |
| ], |
| "last": "Piao", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wilks", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of 40th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "152--159", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Clough, P., Gaizauskas, R., Piao, S. S., and Wilks, Y. (2002). METER: MEasuring TExt Reuse. In Proceedings of 40th Annual Meeting of the Association for Computational Linguistics, pages 152-159, Philadelphia, PA, USA.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Developing a Corpus of Plagiarised Short Answers. Language Resources and Evaluation: Special Issue on Plagiarism and Authorship Analysis", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Clough", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stevenson", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "45", |
| "issue": "", |
| "pages": "5--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Clough, P. and Stevenson, M. (2011). Developing a Corpus of Plagiarised Short Answers. Lan- guage Resources and Evaluation: Special Issue on Plagiarism and Authorship Analysis, 45(1):5- 24.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Ordinal measures in authorship identification", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [ |
| "P" |
| ], |
| "last": "Dinu", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Popescu", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 3rd PAN Workshop. Uncovering Plagiarism, Authorship and Social Software Misuse", |
| "volume": "", |
| "issue": "", |
| "pages": "62--66", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dinu, L. P. and Popescu, M. (2009). Ordinal measures in authorship identification. In Proceedings of the 3rd PAN Workshop. Uncovering Plagiarism, Authorship and Social Software Misuse, pages 62-66, San Sebastian, Spain.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "WordNet: An Electronic Lexical Database", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Fellbaum", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fellbaum, C. (1998). WordNet: An Electronic Lexical Database. MIT Press.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Measuring nominal scale agreement among many raters", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "L" |
| ], |
| "last": "Fleiss", |
| "suffix": "" |
| } |
| ], |
| "year": 1971, |
| "venue": "Psychological Bulletin", |
| "volume": "76", |
| "issue": "5", |
| "pages": "378--382", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fleiss, J. L. (1971). Measuring nominal scale agreement among many raters. Psychological Bulletin, 76(5):378-382.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Computing Semantic Relatedness using Wikipediabased Explicit Semantic Analysis", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Gabrilovich", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Markovitch", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 20th International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "1606--1611", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gabrilovich, E. and Markovitch, S. (2007). Computing Semantic Relatedness using Wikipedia- based Explicit Semantic Analysis. In Proceedings of the 20th International Joint Conference on Artificial Intelligence, pages 1606-1611, Hyderabad, India.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "The METER Corpus: A corpus for analysing journalistic text reuse", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Gaizauskas", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Foster", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Wilks", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Arundel", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Clough", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Piao", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the Corpus Linguistics 2001 Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "214--223", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gaizauskas, R., Foster, J., Wilks, Y., Arundel, J., Clough, P., and Piao, S. (2001). The METER Corpus: A corpus for analysing journalistic text reuse. In Proceedings of the Corpus Linguistics 2001 Conference, pages 214-223.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Conceptual Spaces: The Geometry of Thought", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "G\u00e4rdenfors", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G\u00e4rdenfors, P. (2000). Conceptual Spaces: The Geometry of Thought. MIT Press.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Seven strictures on similarity", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 1972, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "437--446", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Goodman, N. (1972). Seven strictures on similarity. In Goodman, N., editor, Problems and projects, pages 437-446. Bobbs-Merrill.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Algorithms on Strings, Trees and Sequences: Computer Science and Computational Biology", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Gusfield", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gusfield, D. (1997). Algorithms on Strings, Trees and Sequences: Computer Science and Computational Biology. Cambridge University Press.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "The WEKA Data Mining Software: An Update. SIGKDD Explorations", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Hall", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Holmes", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Pfahringer", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Reutemann", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [ |
| "H" |
| ], |
| "last": "Witten", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "11", |
| "issue": "", |
| "pages": "10--18", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hall, M., Frank, E., Holmes, G., Pfahringer, B., Reutemann, P., and Witten, I. H. (2009). The WEKA Data Mining Software: An Update. SIGKDD Explorations, 11(1):10-18.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Detecting text similarity over short passages: Exploring linguistic feature combinations via machine learning", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Hatzivassiloglou", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "L" |
| ], |
| "last": "Klavans", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Eskin", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of the Joint SIGDAT Conference on Empirical Methods in Natural Language Processing and Very Large Corpora", |
| "volume": "", |
| "issue": "", |
| "pages": "203--212", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hatzivassiloglou, V., Klavans, J. L., and Eskin, E. (1999). Detecting text similarity over short passages: Exploring linguistic feature combinations via machine learning. In Proceedings of the Joint SIGDAT Conference on Empirical Methods in Natural Language Processing and Very Large Corpora, pages 203-212, College Park, MD, USA.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Finding Near-Duplicate Web Pages: A Large-Scale Evaluation of Algorithms", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Henzinger", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 29th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "284--291", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Henzinger, M. (2006). Finding Near-Duplicate Web Pages: A Large-Scale Evaluation of Algorithms. In Proceedings of the 29th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 284-291, Seattle, WA, USA.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Methods for identifying versioned and plagiarized documents", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "C" |
| ], |
| "last": "Hoad", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Zobel", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of the American Society of Information Science and Technology", |
| "volume": "54", |
| "issue": "3", |
| "pages": "203--215", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hoad, T. C. and Zobel, J. (2003). Methods for identifying versioned and plagiarized documents. Journal of the American Society of Information Science and Technology, 54(3):203-215.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Advances in record linkage methodology as applied to the 1985 census of Tampa Florida", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "A" |
| ], |
| "last": "Jaro", |
| "suffix": "" |
| } |
| ], |
| "year": 1989, |
| "venue": "Journal of the American Statistical Association", |
| "volume": "84", |
| "issue": "406", |
| "pages": "414--420", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jaro, M. A. (1989). Advances in record linkage methodology as applied to the 1985 census of Tampa Florida. Journal of the American Statistical Association, 84(406):414-420.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Semantic similarity based on corpus statistics and lexical taxonomy", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "J" |
| ], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "W" |
| ], |
| "last": "Conrath", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Proceedings of the 10th International Conference on Research in Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiang, J. J. and Conrath, D. W. (1997). Semantic similarity based on corpus statistics and lexical taxonomy. In Proceedings of the 10th International Conference on Research in Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "N-gram-based author profiles for authorship attribution", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Keselj", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Cercone", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the Conference of the Pacific Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "255--264", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Keselj, V., Peng, F., Cercone, N., and Thomas, C. (2003). N-gram-based author profiles for authorship attribution. In Proceedings of the Conference of the Pacific Association for Computational Linguistics, pages 255-264.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Europarl: A Parallel Corpus for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 10th Machine Translation Summit", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Koehn, P. (2005). Europarl: A Parallel Corpus for Statistical Machine Translation. In Proceedings of the 10th Machine Translation Summit, pages 79-86, Phuket Island, Thailand.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Moses: Open Source Toolkit for Statistical Machine Translation", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Federico", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Bertoldi", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Cowan", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Moran", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Zens", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Bojar", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Constantin", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Herbst", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics Companion Volume Proceedings of the Demo and Poster Sessions", |
| "volume": "", |
| "issue": "", |
| "pages": "177--180", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Koehn, P., Hoang, H., Birch, A., Callison-Burch, C., Federico, M., Bertoldi, N., Cowan, B., Shen, W., Moran, C., Zens, R., Dyer, C., Bojar, O., Constantin, A., and Herbst, E. (2007). Moses: Open Source Toolkit for Statistical Machine Translation. In Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics Companion Volume Proceedings of the Demo and Poster Sessions, pages 177-180, Prague, Czech Republic.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "An introduction to latent semantic analysis", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "K" |
| ], |
| "last": "Landauer", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "W" |
| ], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Laham", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Discourse Processes", |
| "volume": "25", |
| "issue": "", |
| "pages": "259--284", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Landauer, T. K., Foltz, P. W., and Laham, D. (1998). An introduction to latent semantic analysis. Discourse Processes, 25(2):259-284.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "The measurement of observer agreement for categorical data", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "R" |
| ], |
| "last": "Landis", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "G" |
| ], |
| "last": "Koch", |
| "suffix": "" |
| } |
| ], |
| "year": 1977, |
| "venue": "Biometrics", |
| "volume": "33", |
| "issue": "1", |
| "pages": "159--174", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Landis, J. R. and Koch, G. G. (1977). The measurement of observer agreement for categorical data. Biometrics, 33(1):159-174.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "A Computational Model of Text Reuse in Ancient Literary Texts", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "472--479", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lee, J. (2007). A Computational Model of Text Reuse in Ancient Literary Texts. In Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics, pages 472-479.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "The Wiki Way: Collaboration and Sharing on the Internet", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Leuf", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Cunningham", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Leuf, B. and Cunningham, W. (2001). The Wiki Way: Collaboration and Sharing on the Internet. Addison-Wesley Professional.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Binary codes capable of correcting deletions, insertions, and reversals", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [ |
| "I" |
| ], |
| "last": "Levenshtein", |
| "suffix": "" |
| } |
| ], |
| "year": 1966, |
| "venue": "Soviet Physics Doklady", |
| "volume": "10", |
| "issue": "8", |
| "pages": "707--710", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Levenshtein, V. I. (1966). Binary codes capable of correcting deletions, insertions, and reversals. Soviet Physics Doklady, 10(8):707-710.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "An information-theoretic definition of similarity", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "296--304", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lin, D. (1998). An information-theoretic definition of similarity. In Proceedings of International Conference on Machine Learning, pages 296-304.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "A theoretical basis to the automated detection of copying between texts, and its practical implementation in the Ferret plagiarism and collusion detector", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Lyon", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Barrett", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Malcolm", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Plagiarism: Prevention, Practice and Policies Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lyon, C., Barrett, R., and Malcolm, J. (2004). A theoretical basis to the automated detection of copying between texts, and its practical implementation in the Ferret plagiarism and collusion detector. In In Plagiarism: Prevention, Practice and Policies Conference.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Detecting short passages of similar text in large document collections", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Lyon", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Malcolm", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Dickerson", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "118--125", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lyon, C., Malcolm, J., and Dickerson, B. (2001). Detecting short passages of similar text in large document collections. In Proceedings of Conference on Empirical Methods in Natural Language Processing, pages 118-125.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Detecting Near-Duplicates for Web Crawling", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [ |
| "S" |
| ], |
| "last": "Manku", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "D" |
| ], |
| "last": "Sarma", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 16th International World Wide Web Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "141--149", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manku, G. S., Jain, A., and Sarma, A. D. (2007). Detecting Near-Duplicates for Web Crawling. In Proceedings of the 16th International World Wide Web Conference, pages 141-149, Banff, AB, Canada.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "A validation study of sophisticated approaches to lexical diversity assessment. Behavior research methods", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mccarthy", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| ";" |
| ], |
| "last": "Jarvis", |
| "suffix": "" |
| }, |
| { |
| "first": "-D", |
| "middle": [], |
| "last": "Mtld", |
| "suffix": "" |
| }, |
| { |
| "first": "Hd-D", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "42", |
| "issue": "", |
| "pages": "381--392", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "McCarthy, P. M. and Jarvis, S. (2010). MTLD, vocd-D, and HD-D: A validation study of sophisticated approaches to lexical diversity assessment. Behavior research methods, 42(2):381- 392.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Corpus-based and Knowledge-based Measures of Text Semantic Similarity", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Corley", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Strapparava", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 21st National Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "775--780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mihalcea, R., Corley, C., and Strapparava, C. (2006). Corpus-based and Knowledge-based Measures of Text Semantic Similarity. In Proceedings of the 21st National Conference on Artificial Intelligence, pages 775-780, Boston, MA, USA.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "An efficient domain-independent algorithm for detecting approximately duplicate database records", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Monge", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Elkan", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Proceedings of the SIGMOD Workshop on Data Mining and Knowledge Discovery", |
| "volume": "", |
| "issue": "", |
| "pages": "23--29", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Monge, A. and Elkan, C. (1997). An efficient domain-independent algorithm for detecting approximately duplicate database records. In Proceedings of the SIGMOD Workshop on Data Mining and Knowledge Discovery, pages 23-29, Tucson, AZ, USA.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Inference and disputed authorship: The Federalist", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Mosteller", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "L" |
| ], |
| "last": "Wallace", |
| "suffix": "" |
| } |
| ], |
| "year": 1964, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mosteller, F. and Wallace, D. L. (1964). Inference and disputed authorship: The Federalist. Addison-Wesley.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Overview of the 2nd International Competition on Plagiarism Detection", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Potthast", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Barr\u00f3n-Cede\u00f1o", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Eiselt", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Notebook Papers of CLEF 10 Labs and Workshops", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Potthast, M., Barr\u00f3n-Cede\u00f1o, A., Eiselt, A., Stein, B., and Rosso, P. (2010). Overview of the 2nd International Competition on Plagiarism Detection. In Notebook Papers of CLEF 10 Labs and Workshops, Padua, Italy.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Using Information Content to Evaluate Semantic Similarity in a Taxonomy", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Proceedings of the 14th International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "448--453", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Resnik, P. (1995). Using Information Content to Evaluate Semantic Similarity in a Taxonomy. In Proceedings of the 14th International Joint Conference on Artificial Intelligence, pages 448-453, Montreal, Canada.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Introduction to Modern Information Retrieval", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Salton", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Mcgill", |
| "suffix": "" |
| } |
| ], |
| "year": 1983, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Salton, G. and McGill, M. J. (1983). Introduction to Modern Information Retrieval. McGraw-Hill.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Towards Document Plagiarism Detection Based on the Relevance and Fragmentation of the Reused Text", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "S\u00e1nchez-Vega", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Villase\u00f1or-Pineda", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Montes-Y-G\u00f3mez", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Rosso", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 9th Mexican International Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "24--31", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S\u00e1nchez-Vega, F., Villase\u00f1or-Pineda, L., Montes-y-G\u00f3mez, M., and Rosso, P. (2010). Towards Document Plagiarism Detection Based on the Relevance and Fragmentation of the Reused Text. In Proceedings of the 9th Mexican International Conference on Artificial Intelligence, pages 24-31, Pachuca, Mexico.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Reliability of content analysis: The case of nominal scale coding", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [ |
| "A" |
| ], |
| "last": "Scott", |
| "suffix": "" |
| } |
| ], |
| "year": 1955, |
| "venue": "Public Opinion Quarterly", |
| "volume": "19", |
| "issue": "3", |
| "pages": "321--325", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Scott, W. A. (1955). Reliability of content analysis: The case of nominal scale coding. Public Opinion Quarterly, 19(3):321-325.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Prediction and Entropy of Printed English", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "E" |
| ], |
| "last": "Shannon", |
| "suffix": "" |
| } |
| ], |
| "year": 1951, |
| "venue": "Bell System Technical Journal", |
| "volume": "30", |
| "issue": "", |
| "pages": "50--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shannon, C. E. (1951). Prediction and Entropy of Printed English. Bell System Technical Journal, 30:50-64.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "A statistical interpretation of term specificity and its application in retrieval", |
| "authors": [ |
| { |
| "first": "Sp\u00e4rck", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 1972, |
| "venue": "Journal of Documentation", |
| "volume": "28", |
| "issue": "1", |
| "pages": "11--21", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sp\u00e4rck Jones, K. (1972). A statistical interpretation of term specificity and its application in retrieval. Journal of Documentation, 28(1):11-21.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Plagiarism detection using stopword n-grams", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Stamatatos", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of the American Society for Information Science and Technology", |
| "volume": "62", |
| "issue": "12", |
| "pages": "2512--2527", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stamatatos, E. (2011). Plagiarism detection using stopword n-grams. Journal of the American Society for Information Science and Technology, 62(12):2512-2527.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Certain language skills in children", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "C" |
| ], |
| "last": "Templin", |
| "suffix": "" |
| } |
| ], |
| "year": 1957, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Templin, M. C. (1957). Certain language skills in children. University of Minnesota Press.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Features of Similarity", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Tversky", |
| "suffix": "" |
| } |
| ], |
| "year": 1977, |
| "venue": "Psychological Review", |
| "volume": "84", |
| "issue": "", |
| "pages": "327--352", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tversky, A. (1977). Features of Similarity. In Psychological Review, volume 84, pages 327-352.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "String Comparator Metrics and Enhanced Decision Rules in the Fellegi-Sunter Model of Record Linkage", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [ |
| "E" |
| ], |
| "last": "Winkler", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Proceedings of the Section on Survey Research Methods", |
| "volume": "", |
| "issue": "", |
| "pages": "354--359", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Winkler, W. E. (1990). String Comparator Metrics and Enhanced Decision Rules in the Fellegi- Sunter Model of Record Linkage. In Proceedings of the Section on Survey Research Methods, pages 354-359.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "YAP3: Improved detection of similarities in computer program and other texts", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Wise", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of the 27th SIGCSE technical symposium on Computer science education", |
| "volume": "", |
| "issue": "", |
| "pages": "130--134", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wise, M. J. (1996). YAP3: Improved detection of similarities in computer program and other texts. In Proceedings of the 27th SIGCSE technical symposium on Computer science education, pages 130-134, Philadelphia, PA, USA.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "On sentence-length as a statistical characteristic of style in prose: With application to two cases of disputed authorship", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [ |
| "U" |
| ], |
| "last": "Yule", |
| "suffix": "" |
| } |
| ], |
| "year": 1939, |
| "venue": "Biometrika", |
| "volume": "30", |
| "issue": "3/4", |
| "pages": "363--390", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yule, G. U. (1939). On sentence-length as a statistical characteristic of style in prose: With application to two cases of disputed authorship. Biometrika, 30(3/4):363-390.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "text": "PageRank is a link analysis algorithm used by the ::::: Google ::::::: Internet :::::: search ::::: engine that assigns a numerical weighting to :::: each ::::::: element of a ::::::::: hyperlinked ::: set ::: of :::::::: documents, such as the World Wide Web, with the purpose of \"measuring\" its relative importance within the set. Text Reuse. The PageRank algorithm is used to designate :::: every ::::: aspect of a :: set ::: of ::::::::: hyperlinked ::::::::: documents with a numerical weighting. It is used by the :::::: Google :::::: search :::::engine to estimate the relative importance of a web page according to this weighting.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "text": "Example of text reuse taken from the Wikipedia Rewrite Corpus", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF1": { |
| "text": "Performance of selected similarity measures on the Wikipedia Rewrite Corpus, the METER Corpus, and the Webis Crowd Paraphrase Corpus, grouped by similarity dimension", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF3": { |
| "text": "Results and confusion matrix (expected class vs. classification result) for the best classification on the Wikipedia Rewrite Corpus for the original 4-way classification dimension. (ii)", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF5": { |
| "text": "Results of the best combinations of text similarity measures within and across dimensions on the Wikipedia Rewrite CorpusF", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF7": { |
| "text": "Results and confusion matrix on the Wikipedia Rewrite Corpus for the folded 3-way classification", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>System</td><td colspan=\"2\">Acc.F 1</td><td/><td/><td/></tr><tr><td>Majority Class Baseline Ferret Baseline Clough and Stevenson (2011)</td><td>.600 .937</td><td>.375 .935</td><td>exp. plagiarism class.</td><td>plagiarism 55</td><td>no plag. 2</td></tr><tr><td>-our re-implementation -as reported</td><td>.958 .947</td><td>.957 n/a</td><td>no plag.</td><td>1</td><td>37</td></tr><tr><td>Our Approach</td><td>.968</td><td>.967</td><td/><td/><td/></tr></table>" |
| }, |
| "TABREF8": { |
| "text": "", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF9": { |
| "text": "http://www.ukp.tu-darmstadt.de/data/text-similarity/text-reuse-annotations", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>System</td><td colspan=\"2\">Acc.F 1</td><td/><td/><td/></tr><tr><td>Majority Class Baseline Ferret Baseline Clough and Stevenson (2011) 13 S\u00e1nchez-Vega et al. (2010)</td><td>.715 .684 .692 .783</td><td>.417 .535 .680 .705</td><td>class. reuse exp. no reuse</td><td>reuse 151 20</td><td>no reuse 30 52</td></tr><tr><td>Our Approach</td><td>.802</td><td>.768</td><td/><td/><td/></tr><tr><td/><td/><td/><td/><td/><td>).</td></tr></table>" |
| }, |
| "TABREF12": { |
| "text": "Results and confusion matrix for the best classification on the Webis Crowd Paraphrase Corpus", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF13": { |
| "text": "These figures show that", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>Text Similarity Dimension</td><td colspan=\"2\">Acc.F 1</td></tr><tr><td>Combinations within dimensions</td><td/><td/></tr><tr><td>Content</td><td>.840</td><td>.839</td></tr><tr><td>Structure</td><td>.816</td><td>.814</td></tr><tr><td>Style</td><td>.819</td><td>.817</td></tr><tr><td>Combinations across dimensions</td><td/><td/></tr><tr><td>Content + Style Content + Structure Structure + Style Content + Structure + Style</td><td>.844 .838 .831 .853</td><td>.843 .838 .830 .852</td></tr></table>" |
| }, |
| "TABREF14": { |
| "text": "Results of the best combinations of text similarity measures within and across dimensions on the Webis Crowd Paraphrase Corpus regardless of the similarity dimension many measures achieve a very reasonable performance when applied individually, with the measures Greedy String Tiling and word 2-grams containment performing best.", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF15": { |
| "text": "ContentESA (WordNet, with + w/o stopwords), Greedy String Tiling, Jaro, Longest Common Substring, Longest Common Subseq. (2 norm.), n-gram Jaccard (n = {6, 14, 15}), Resnik (SMT wrapper) Structure Lemma Pair Ordering, POS 2-grams Jaccard, Stopword 6-grams Style Function Word Frequencies, Sequential TTR, Token Ratio", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| }, |
| "TABREF16": { |
| "text": "Feature set used to achieve the best results on the Webis Crowd Paraphrase Corpus", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |