| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T02:10:38.337056Z" |
| }, |
| "title": "", |
| "authors": [], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This study investigates the robustness and stability of a likelihood ratio-based (LRbased) forensic text comparison (FTC) system against the size of background population data. Focus is centred on a score-based approach for estimating authorship LRs. Each document is represented with a bagof-words model, and the Cosine distance is used as the score-generating function. A set of population data that differed in the number of scores was synthesised 20 times using the Monte-Carol simulation technique. The FTC system's performance with different population sizes was evaluated by a gradient metric of the log-LR cost (Cllr). The experimental results revealed two outcomes: 1) that the score-based approach is rather robust against a small population size-in that, with the scores obtained from the 40~60 authors in the database, the stability and the performance of the system become fairly comparable to the system with a maximum number of authors (720); and 2) that poor performance in terms of Cllr, which occurred because of limited background population data, is largely due to poor calibration. The results also indicated that the score-based approach is more robust against data scarcity than the feature-based approach; however, this finding obliges further study.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This study investigates the robustness and stability of a likelihood ratio-based (LRbased) forensic text comparison (FTC) system against the size of background population data. Focus is centred on a score-based approach for estimating authorship LRs. Each document is represented with a bagof-words model, and the Cosine distance is used as the score-generating function. A set of population data that differed in the number of scores was synthesised 20 times using the Monte-Carol simulation technique. The FTC system's performance with different population sizes was evaluated by a gradient metric of the log-LR cost (Cllr). The experimental results revealed two outcomes: 1) that the score-based approach is rather robust against a small population size-in that, with the scores obtained from the 40~60 authors in the database, the stability and the performance of the system become fairly comparable to the system with a maximum number of authors (720); and 2) that poor performance in terms of Cllr, which occurred because of limited background population data, is largely due to poor calibration. The results also indicated that the score-based approach is more robust against data scarcity than the feature-based approach; however, this finding obliges further study.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The likelihood ratio (LR) conceptual framework has been studied for its effect on various types of forensic evidence; it was mathematically shown that, with some very reasonable assumptions, the LR is the only way of assessing the uncertainty inherited in evidential evaluation (Aitken, 2018; Aitken and Taroni, 2004; Good, 1991) . It is becoming recognised as the logical and legally correct framework for both analysing forensic evidence and presenting it in court (Balding, 2005; Evett et al., 1998; Marquis et al., 2011; Morrison, 2009; Neumann et al., 2007 ). Yet, some argue that the LR is one possible tool for communicating to decision makers (Lund and Iyer, 2017: 1) . Although forensic text comparison (FTC) currently lags behind other forensic sciences, some studies have demonstrated that linguistic text evidence can be properly analysed using the LR framework (Ishihara, 2014 (Ishihara, , 2017a (Ishihara, , 2017b .", |
| "cite_spans": [ |
| { |
| "start": 278, |
| "end": 292, |
| "text": "(Aitken, 2018;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 293, |
| "end": 317, |
| "text": "Aitken and Taroni, 2004;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 318, |
| "end": 329, |
| "text": "Good, 1991)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 467, |
| "end": 482, |
| "text": "(Balding, 2005;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 483, |
| "end": 502, |
| "text": "Evett et al., 1998;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 503, |
| "end": 524, |
| "text": "Marquis et al., 2011;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 525, |
| "end": 540, |
| "text": "Morrison, 2009;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 541, |
| "end": 561, |
| "text": "Neumann et al., 2007", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 651, |
| "end": 675, |
| "text": "(Lund and Iyer, 2017: 1)", |
| "ref_id": null |
| }, |
| { |
| "start": 874, |
| "end": 889, |
| "text": "(Ishihara, 2014", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 890, |
| "end": 908, |
| "text": "(Ishihara, , 2017a", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 909, |
| "end": 927, |
| "text": "(Ishihara, , 2017b", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction: The Likelihood Ratio Framework and Forensic Text Comparison", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In the LR framework, instead of assessing the probabilities of two competing hypotheses given the evidence, the probabilities of observing the evidence (E) are assessed given the hypotheses: the prosecution hypothesis (Hp) against the defence hypothesis (Hd) (Aitken and Stoney, 1991; Aitken and Taroni, 2004; Robertson et al., 2016) . Therefore, the LR can be defined as in Equation (1).", |
| "cite_spans": [ |
| { |
| "start": 259, |
| "end": 284, |
| "text": "(Aitken and Stoney, 1991;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 285, |
| "end": 309, |
| "text": "Aitken and Taroni, 2004;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 310, |
| "end": 333, |
| "text": "Robertson et al., 2016)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction: The Likelihood Ratio Framework and Forensic Text Comparison", |
| "sec_num": "1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "LR= p(E|H p ) p(E|H d )", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Introduction: The Likelihood Ratio Framework and Forensic Text Comparison", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In the case of FTC, the LR is the ratio between the two conditional probabilities of the measured difference (considered the evidence E) between the source-known texts (i.e., from the suspect) and the source-questioned texts (i.e., from the offender): one represents the probability of the evidence if they had been produced by the same author (Hp), and the other represents the probability of observing the same evidence if they had originated from different authors (Hd).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction: The Likelihood Ratio Framework and Forensic Text Comparison", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Thus, the evidence E, which is the measured difference between two texts (x,y) can be expressed as \u0394(x,y). A bag-of-words model is used to represent each text in this study. Thus x and y stand for the vectors of relative word frequencies ( , \u2208 {1 \u22ef }, \u2208 { , } ) of the texts to be compared (x= { 1 , 2 \u22ef } and y= { 1 , 2 \u22ef } ). Thus, Equation (1) can be rewritten as Equation 2, where f denotes a probability density function.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction: The Likelihood Ratio Framework and Forensic Text Comparison", |
| "sec_num": "1" |
| }, |
| { |
| "text": "LR= ( ( , )| ) ( ( , )| ) = (\u2206({ 1 , 2 \u22ef }, { 1 , 2 \u22ef })| ) (\u2206({ 1 , 2 \u22ef }, { 1 , 2 \u22ef })| ) (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction: The Likelihood Ratio Framework and Forensic Text Comparison", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The probability density functions under Hp and Hd need to be trained from a data set of scores.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction: The Likelihood Ratio Framework and Forensic Text Comparison", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Once a forensic scientist has estimated the LR as the weight of the evidence, the LR is then interpreted as a multiplicative factor by which the Bayesian theorem is used to update the prior odds (the factfinder's prior beliefs about the hypotheses) to the posterior odds (the factfinder's beliefs after observing the evidence). The factfinder (e.g., jury or judge) is thus responsible for quantifying the prior odds of the hypotheses, and the forensic scientist is responsible for estimating the LR. That is, the ultimate decision of a case (i.e., guilty or not guilty) is determined by the factfinder, who must update the prior odds to the posterior odds with the LR.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction: The Likelihood Ratio Framework and Forensic Text Comparison", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this study, LRs are estimated using a scorebased approach that has been extensively studied with several evidence types (Bolck et al., 2015; Hepler et al., 2012; Ramos et al., 2017) . An alternative to the score-based approach is the featurebased approach, which has been applied to authorship text evidence (Ishihara, 2014) . In score-based approaches, the likelihood of the score-which is usually quantified as a similarity/difference or a distance between paired samples that can be represented in the form of feature vector-is assessed against the probabilistic distributions from the same-source and different-source scores. This process is called score-to-LR conversion. The conversion model must be constructed with relevant training data; naturally, the more the data, the more accurately the system can perform.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 143, |
| "text": "(Bolck et al., 2015;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 144, |
| "end": 164, |
| "text": "Hepler et al., 2012;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 165, |
| "end": 184, |
| "text": "Ramos et al., 2017)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 311, |
| "end": 327, |
| "text": "(Ishihara, 2014)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction: The Likelihood Ratio Framework and Forensic Text Comparison", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The types and conditions of the linguistic evidence used in criminal cases are all unique. It is often the case that relevant data for the case must be collected in a customised manner from scratch to train the score-to-LR conversion model. However, forensic scientists usually cannot afford to collect such a large number of data. Therefore, it is crucial that forensic scientists know how the FTC system's performance is influenced by the number of data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction: The Likelihood Ratio Framework and Forensic Text Comparison", |
| "sec_num": "1" |
| }, |
| { |
| "text": "For this purpose, a series of experiments was conducted with the data that were synthesised by a Monte-Carlo simulation technique.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction: The Likelihood Ratio Framework and Forensic Text Comparison", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Two sets of experiments were conducted, with the first set aiming to identify the conditions under which the FTC system optimally performs (see Section 3.1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Design", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In the second set, with the best-performing conditions set, the FTC system's performance is assessed by altering the data number for training the score-to-LR conversion model (see Section 3.2). The database, pre-processing of data, logistic-regression calibration and assessment metrics are also discussed in this section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Design", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The current study used a portion of the Amazon Product Data Authorship Verification Corpus 1 (Halvani et al., 2017) , which contained 21,534 product reviews from 3,228 reviewers. The review texts were equalised to be approximately 4kB in size, which corresponds to approximately 750 words in length. The reviewers contributed multiple product reviews for Amazon, but only those who produced six or more reviews were selected from the corpus, resulting in 2,160 reviewers. Only the first six reviews of each reviewer were selected for the two sets of experiments.", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 115, |
| "text": "(Halvani et al., 2017)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Database", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "To compare a source-questioned (offender) sample and a source-known (suspect) sample, the six reviews were first separated into two groups: the first three and the last three, from which three documents that differed in word length (750, 1,500 and 2,250 words) were created by concatenating them. The first review text of each group was used as it originally appeared (i.e., as a document of 750 words). The first and second texts were also concatenated into a document of 1,500 words. All three texts were then combined into a document of 2,250 words. Documents of different word lengths were prepared for testing the correlation between the number of words and the system's performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Database", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The entire database was divided into the three mutually exclusive sub-databases of 'test', 'back-ground' and 'development', each of which comprised documents from 720 authors (=2,160/3). The documents from the test database were used to assess the system's performance by generating same-author (SA) and different-author (DA) comparisons. From the 720 authors from the test database, each of whom had two documents for each word length, 720 SA comparisons and 517,680 DA comparisons (720C2\u00d72) were possible for each word length.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Database Partition", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The documents from the background database were used to obtain SA and DA scores, which were in turn used to train the score-to-LR conversion model. The composition of the background database was identical in quantity to that of the test database. That is, 720 SA scores and 517,680 DA scores could be obtained from the background database.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Database Partition", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The resultant LRs after the score-to-LR conversion may not have been calibrated due to various reasons. In this case, the uncalibrated LRs had to be converted to interpretable LRs through a process of calibration. A typical and robust model for the calibration procedure is logistic regression (Morrison, 2013) , and the development database was used to train the logistic regression. A more detailed explanation for logistic-regression calibration is provided in Section 2.4.", |
| "cite_spans": [ |
| { |
| "start": 294, |
| "end": 310, |
| "text": "(Morrison, 2013)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Database Partition", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Documents were tokenised with the tokens() function in the quanteda library (Benoit et al., 2018) of the R statistical package in the default setting. That is, all characters were changed to lower case and punctuation marks were not removed; the punctuation marks were thus considered single-word tokens. No stemming algorithm was applied.", |
| "cite_spans": [ |
| { |
| "start": 76, |
| "end": 97, |
| "text": "(Benoit et al., 2018)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tokenisation and a Bag-of-Words Model", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "The 420 most frequent words appearing in the entire dataset were selected as components for the bag-of-words model. The relative frequencies of the words in the model were then calculated for each document. These relative frequencies were used instead of word counts because the length of each document varied. The word frequencies of the bag-of-words vector were z-score normalised to equalise the amount of information across the words in the vector. If this step was not taken, then the information that was encoded in the frequently occurring words would substantially and unevenly influence the outcomes of the experiments, as word frequencies follow the distribution described by Zipf's law (Zipf, 1932) .", |
| "cite_spans": [ |
| { |
| "start": 697, |
| "end": 709, |
| "text": "(Zipf, 1932)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tokenisation and a Bag-of-Words Model", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "The LRs that are estimated using the scorebased approach are usually well calibrated; they can thus be interpreted as the weight of evidence. As will be reported in Section 4, LRs become less calibrated when the background data are limited. Figure 1 contains two Tippett plots which show the magnitude of the LRs derived from a simulation under a specific experimental condition (randomly generated scores from 10 authors for 2,250 words). Tippett plots show the cumulative proportion of the LRs of the SA comparisons, which are plotted rising from the left, as well as of the LRs of the DA comparisons, plotted rising from the right. For the Tippett plots, the cumulative proportion of trails is plotted on the Y-axis against the log10 LRs on the X-axis. The intersection of the two curves is the equal error rate (EER) which indicates the operating point at which the miss and false alarm rates are equal. As can be seen from Figure 1a , the intersection of the two curves is not aligned with log10LR=0. That means, the derived LRs are not well calibrated; thus they cannot be interpreted as the weight of evidence.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 241, |
| "end": 249, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 928, |
| "end": 937, |
| "text": "Figure 1a", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Logistic-Regression Calibration", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "These uncalibrated LRs must be converted to calibrated LRs to be interpreted as the weight of evidence. A logistic-regression calibration (Br\u00fcmmer and du Preez, 2006) is employed for this purpose. Logistic-regression calibration is operated by applying linear shifting and scaling to the uncalibrated LRs, in the log odds space, relative to a decision boundary; its aim is to minimise the magnitude and incidence of uncalibrated LRs that are known to misleadingly support the incorrect hypothesis, and also to maximise the values of uncalibrated LRs correctly supporting the hypotheses. A logistic-regression line, the weights of which are estimated on the basis of the LRs derived from a training database, is used to monotonically shift and scale the uncalibrated LRs to the calibrated LRs. By way of exemplification, assuming a logistic-regression line of the type y=ax+b (where x is the uncalibrated LR and y is the calibrated LR, and the weights, a and b, are estimated on the basis of the (uncalibrated) LRs derived from the development database), the formula y=ax+b is used to shift by the amount of b, and scale by the amount of a, the uncalibrated LRs to the calibrated LRs. The LRs presented in Figure 1b are the outcome of the application of logistic-regression calibration to the LRs given in Figure 1a .", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 166, |
| "text": "(Br\u00fcmmer and du Preez, 2006)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1205, |
| "end": 1214, |
| "text": "Figure 1b", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 1305, |
| "end": 1314, |
| "text": "Figure 1a", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Logistic-Regression Calibration", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "It is common to assess the performance of any identification or classification system based on its accuracy and error rates. However, accuracy and error rates are binary and categorical (e.g., correct or incorrect); this is not suitable for the nature of LR, which is gradient and continuous.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Evaluation", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "A more appropriate metric for assessing LRbased systems is arguably the log-LR cost (Cllr) (Br\u00fcmmer and du Preez, 2006) , which was originally developed for LR-based automatic speaker recognition systems. Cllr can be obtained through Equation (3).", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 119, |
| "text": "(Br\u00fcmmer and du Preez, 2006)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Evaluation", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "= 1 2 ( [ 1 \u2211 2 (1 + 1 ) ] + [ 1 \u2211 2 (1 + )] )", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Performance Evaluation", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "and refer to the number of SA and DA comparisons, respectively. LRi and LRj refer to the linear LRs that are derived from these SA and DA comparisons. In this metric, all LRs (except \u00b1infinity) are attributed penalties in proportion to their magnitudes, with the LRs that support the counterfactual hypotheses being more severely penalised. The Cllr is based on information theory, and if the Cllr value is higher than one, then the system is performing worse than not utilising the evidence at all.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Evaluation", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "The Cllr is a metric that assesses a system's overall validity. It comprises two components: discrimination loss (Cllr min ) and calibration loss (Cllr cal ): Cllr=Cllr min +Cllr cal . The Cllr min is a theoretical minimum Cllr value that can be obtained through pool adjacent violators algorithms (Br\u00fcmmer and du Preez, 2006) .", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 326, |
| "text": "(Br\u00fcmmer and du Preez, 2006)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Evaluation", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "A series of FTC experiments was conducted with a score-based LR approach to identify under what conditions the system would yield the best outcome. In these experiments, scores were measured with Cosine distance, with the bag-of-words model consisting of N most frequent words. The scores were then converted to their LRs based on the conversion model that was trained by the scores calculated from the SA and DA comparisons, which were compiled from the background database. The size (N) of the bag-of-words vector is incremented from N=20 to N=420 by 20 to identify the best-performing N. The Normal, Log-Normal, Weibull and Gamma models were tried as possible conversion models, but only the model that fit the data best in terms of the Akaike information criterion (AIC) (Akaike, 1974) was selected for each experiment (separately for the SA and DA models). Cosine distance was used because of its superior performance to other measures (Evert et al., 2017; Smith and Aldridge, 2011) .", |
| "cite_spans": [ |
| { |
| "start": 775, |
| "end": 789, |
| "text": "(Akaike, 1974)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 941, |
| "end": 961, |
| "text": "(Evert et al., 2017;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 962, |
| "end": 987, |
| "text": "Smith and Aldridge, 2011)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preparatory Experiments and Outcomes", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The Cllr values are plotted as a function of the feature number (N) in Figure 2 , separately for 750, 1,500 and 2,250 words. Regardless of the word length, the system performed best with N=260. The overall trend for the Cllr trajectory is similar across the word lengths, revealing a relatively large improvement in performance as the N increased from 20 to 120 and the Cllr values started converging towards N=260. After N=260, the performance remained relatively unchanged, indicating that the inclusion of less-frequent words did not contribute to the improvement. The best-fitted models when N=260 are outlined in Table 1 and are used for the Monte-Carlo simulation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 71, |
| "end": 79, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 618, |
| "end": 625, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Preparatory Experiments and Outcomes", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In the preparatory experiment, the score-to-LR conversion models were trained with the data in the background database, which comprised texts written by 720 authors. Using the model as the basis, the scores of X number of authors (X=[5, 10, 20, 30, 40, 60, 80, \u2026, 720] ) were randomly generated 20 times to build the conversion models. The Normal, Log-Normal, Weibull and Gamma parametric models were fitted to the scores that were randomly generated separately for SA and DA comparisons in the maximum likelihood estimation method. The best-fitted model was chosen according to its AIC values. Figure 3 illustrates the simulation process for the length of 750 words. Out of the texts written by 720 authors from the background database, 720 SA and 517,680 DA scores were estimated. These scores are plotted as histograms: the white histogram represents SA and the grey histogram represents DA. Their fitted models (Weibull) are presented as solid red and blue curves, respectively. From these two models, the scores for the SA and DA comparisons-which are possible from 30 authors (i.e., 30 SA and 870 SA scores)-were randomly generated 20 times. Their models are represented by thin black curves. These models were used for the score-to-LR conversion. Figure 3 : Illustration of a Monte-Carlo simulation with the base SA and DA scores, of which the histograms are white and grey, respectively. The red and blue curves are models of the SA and DA scores, respectively. The thin lines represent the models of the 20 sets of randomly generated scores from 30 authors.", |
| "cite_spans": [ |
| { |
| "start": 230, |
| "end": 268, |
| "text": "(X=[5, 10, 20, 30, 40, 60, 80, \u2026, 720]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 595, |
| "end": 603, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 1254, |
| "end": 1262, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments with the Monte-Carlo Simulation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The boxplots presented in Figure 4 reveal the degree of fluctuations in the Cllr values of the 20 simulations; they also indicate how the Cllr values converge as the number of authors increases.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 26, |
| "end": 34, |
| "text": "Figure 4", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Regardless of the word length, the FTC system's performance substantially fluctuates when the background database only comprises the text samples from 5~10 authors; that is, the performance is not stable. However, this instability quickly recovers if the text samples are collected from 20 or more authors. This is a positive finding in terms of FTC's practical application, as forensic scientists cannot afford the time and money required to collect a large number of data that are relevant to each case if they cannot find an already-existing dataset that is suitable to the case. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussions", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Weibull Weibull 1500", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SA scores DA scores 750", |
| "sec_num": null |
| }, |
| { |
| "text": "Weibull Normal 2250", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SA scores DA scores 750", |
| "sec_num": null |
| }, |
| { |
| "text": "Weibull Normal It is evident from Figure 4 (black circles) that the system's overall performance improves exponentially from N=5 to N=40, resulting in the outcome in which the performance with N=40 is nearly compatible with its performance with N=720.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 34, |
| "end": 42, |
| "text": "Figure 4", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "SA scores DA scores 750", |
| "sec_num": null |
| }, |
| { |
| "text": "To further investigate the reasons underlying the fluctuations in performance (especially with the small number of N), the Cllr min and Cllr cal values (discrimination loss and calibration loss, respectively) are plotted separately in Figures 5 and 6 , respectively. They are presented in the same manner as Figure 4 . As can be observed in Figure 5 , being apart from the word length of 750, the system's discriminability is highly stable, even with small Ns. Specifically, regarding the word length of 2,250, Figure 5c reveals that the Cllr min values are constant and far less fluctuated, as they are not affected by the number of authors in the background database.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 235, |
| "end": 250, |
| "text": "Figures 5 and 6", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 308, |
| "end": 316, |
| "text": "Figure 4", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 341, |
| "end": 349, |
| "text": "Figure 5", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 511, |
| "end": 520, |
| "text": "Figure 5c", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "SA scores DA scores 750", |
| "sec_num": null |
| }, |
| { |
| "text": "That is, in terms of discrimination performance, when many words (e.g., 1,500 and 2,550 words) are available, the system is robust and stable against a small background population size.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SA scores DA scores 750", |
| "sec_num": null |
| }, |
| { |
| "text": "In contrast, Figure 6 indicates that the Cllr cal values exhibit a highly similar trend to that of the Cllr values that are plotted in Figure 4 -in that, a great variability in the Cllr cal values is observed when the number of authors is small (e.g., N=5~10); however, this variability begins converging rapidly with more authors. This signifies that the Cllr cal values also demonstrate a quick recovery with more authors. The observations drawn from Figures 5 and 6 reveal that the poor performance associated with a small number of authors (N=5~10), as indicated by the Cllr values from Figure 4 , is not due to the system's poor discriminability, but due to poor calibration. Following this interpretation, logistic-regression calibration was applied to all LRs, in which a gain in overall performance was expected. The Cllr values of the calibrated LRs are again plotted as boxplots in Figure 7 . It is apparent from Figure 7 that the system's performance has noticeably improved in both stability and accuracy; the degree of fluctuations in the Cllr values is lessened and the mean Cllr values are lower, even with small Ns. Ishihara (2016) previously investigated how background population size affected the performance of an LR-based FTC system. In the experiments, the LRs were estimated using the multivariate kernel density (MVKD) LR formula (Aitken and Lucy, 2004) , with two to eight stylometric features. Texts collected from 140 authors were used to extract necessary statistical information for a Monte-Carlo simulation, for which a mixture Gaussian model was used. The MVKD is a type of feature-based approach for estimating LRs. The population size was incremented by 10 from 10 authors to 140 authors.", |
| "cite_spans": [ |
| { |
| "start": 1132, |
| "end": 1147, |
| "text": "Ishihara (2016)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1354, |
| "end": 1377, |
| "text": "(Aitken and Lucy, 2004)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 13, |
| "end": 21, |
| "text": "Figure 6", |
| "ref_id": "FIGREF4" |
| }, |
| { |
| "start": 135, |
| "end": 143, |
| "text": "Figure 4", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 591, |
| "end": 599, |
| "text": "Figure 4", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 892, |
| "end": 900, |
| "text": "Figure 7", |
| "ref_id": null |
| }, |
| { |
| "start": 923, |
| "end": 931, |
| "text": "Figure 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "SA scores DA scores 750", |
| "sec_num": null |
| }, |
| { |
| "text": "Although a direct comparison between the current study and Ishihara's (2016) study cannot be validly made, some noticeable differences can still be highlighted. The number of features (2~8) used in Ishihara's study was far smaller than that of the current study (260), and Ishihara reported a great improvement in Cllr (from 10 to 50~60 authors), after which a small but continuous improvement could be observed with more authors. He also reported a relatively high variability in Cllr, even with a large number of authors (e.g., 140). In light of these comparative observations, the FTC system's performance appears to reach its optimum with a smaller population size for the scorebased approach rather than for the feature-based approach. Further, the fluctuation in performance also begins converging with a lesser number of background data for the score-based approach than for the feature-based approach. The relative robustness of the score-based approach that the current study revealed for linguistic text evidence aligns with the findings in previous studies regarding other types of evidence (Aitken, 2018; Bolck et al., 2015) . However, the difference in performance between the score-and feature-based approaches must be further investigated under mutually comparable conditions.", |
| "cite_spans": [ |
| { |
| "start": 1102, |
| "end": 1116, |
| "text": "(Aitken, 2018;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1117, |
| "end": 1136, |
| "text": "Bolck et al., 2015)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "SA scores DA scores 750", |
| "sec_num": null |
| }, |
| { |
| "text": "Based on Figure 7 , it can be concluded that logistic-regression calibration leads to an improvement in terms of the system's stability and validity. For training the logistic-regression weights, the development database that comprised the texts from 720 authors was employed. It is evident that the calibration performance also mainly relies on the quantity of the data in the development database. The positive outcome after applying the calibration is likely attributable to the amount of data in the development database. Therefore, it is pertinent to analyse how the development database's size influences the FTC system's performance, as the application of calibration appears to be essential when the background database is substantially small in number (e.g., 5~10 authors). ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 9, |
| "end": 17, |
| "text": "Figure 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "SA scores DA scores 750", |
| "sec_num": null |
| }, |
| { |
| "text": "The robustness and stability of a score-based LR FTC system with a bag-of-words model were investigated with different numbers of background population data, which were synthesised by a Monte-Carol simulation. The experiments' results revealed that the score-based FTC system is fairly robust and stable in performance against the limited number of background population data. For example, with 40~60 authors, the performance is both nearly compatible and as stable as with 720 authors. This is a beneficial finding for FTC practitioners. Additionally, the instability and suboptimal performance observed in terms of Cllr with a small number of data (e.g., 5~20 authors) were mainly attributed to poor calibration (i.e., the derived LRs were not calibrated) rather than to the poor discriminability potential.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Further Study", |
| "sec_num": "5" |
| }, |
| { |
| "text": "A comparison with the outcomes of previous studies indicates that the score-based approach may be more robust against a limited number of background population data than a feature-based approach; however, this point warrants further study.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Further Study", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Available at http://bit.ly/1OjFRhJ.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The author thanks the reviewers for their valuable comments. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Bayesian hierarchical random effects models in forensic science", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "G G" |
| ], |
| "last": "Aitken", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Frontier in Genetics", |
| "volume": "9", |
| "issue": "126", |
| "pages": "1--14", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aitken, C. G. G. (2018) Bayesian hierarchical random effects models in forensic science. Frontier in Genetics 9(Article 126): 1-14.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Evaluation of trace evidence in the form of multivariate data", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "G G" |
| ], |
| "last": "Aitken", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Lucy", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Journal of the Royal Statistical Society, Series C (Applied Statistics)", |
| "volume": "53", |
| "issue": "1", |
| "pages": "109--122", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aitken, C. G. G. and Lucy, D. (2004) Evaluation of trace evidence in the form of multivariate data. Journal of the Royal Statistical Society, Series C (Applied Statistics) 53(1): 109-122.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "The Use of Statistics in Forensic Science", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "G G" |
| ], |
| "last": "Aitken", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [ |
| "A" |
| ], |
| "last": "Stoney", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aitken, C. G. G. and Stoney, D. A. (1991) The Use of Statistics in Forensic Science. New York: Ellis Horwood.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Statistics and the Evaluation of Evidence for Forensic Scientists", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "G G" |
| ], |
| "last": "Aitken", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Taroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aitken, C. G. G. and Taroni, F. (2004) Statistics and the Evaluation of Evidence for Forensic Scientists. Chichester: John Wiley & Sons.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "A new look at the statistical model identification", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Akaike", |
| "suffix": "" |
| } |
| ], |
| "year": 1974, |
| "venue": "IEEE Transactions on Automatic Control", |
| "volume": "19", |
| "issue": "6", |
| "pages": "716--723", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Akaike, H. (1974) A new look at the statistical model identification. IEEE Transactions on Automatic Control 19(6): 716-723.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Weight-of-Evidence for Forensic DNA Profiles", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "J" |
| ], |
| "last": "Balding", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Balding, D. J. (2005) Weight-of-Evidence for Forensic DNA Profiles. Hoboken: John Wiley & Sons.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "quanteda: An R package for the quantitative analysis of textual data", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Benoit", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Watanabe", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Nulty", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Obeng", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Matsuo", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Journal of Open Source Software", |
| "volume": "3", |
| "issue": "30", |
| "pages": "774--776", |
| "other_ids": { |
| "DOI": [ |
| "10.21105/joss.00774" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Benoit, K., Watanabe, K., Wang, H., Nulty, P., Obeng, A., M\u00fcller, S. and Matsuo, A. (2018) quanteda: An R package for the quantitative analysis of textual data. Journal of Open Source Software 3(30): 774- 776. https://doi.org/10.21105/joss.00774", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Evaluating score-and feature-based likelihood ratio models for multivariate continuous data: Applied to forensic MDMA comparison", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bolck", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "F" |
| ], |
| "last": "Ni", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lopatka", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Law, Probability and Risk", |
| "volume": "14", |
| "issue": "3", |
| "pages": "243--266", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bolck, A., Ni, H. F. and Lopatka, M. (2015) Evaluating score-and feature-based likelihood ratio models for multivariate continuous data: Applied to forensic MDMA comparison. Law, Probability and Risk 14(3): 243-266.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Applicationindependent evaluation of speaker detection", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Br\u00fcmmer", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Du Preez", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Computer Speech and Language", |
| "volume": "20", |
| "issue": "2-3", |
| "pages": "230--275", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.csl.2005.08.001" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Br\u00fcmmer, N. and du Preez, J. (2006) Application- independent evaluation of speaker detection. Computer Speech and Language 20(2-3): 230-275. https://dx.doi.org/10.1016/j.csl.2005.08.001", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Understanding and explaining Delta measures for authorship attribution", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Evert", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Proisl", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Jannidis", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Reger", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Pielstr\u00f6m", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Sch\u00f6ch", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Vitt", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Digital Scholarship in the Humanities", |
| "volume": "32", |
| "issue": "suppl_2", |
| "pages": "4--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Evert, S., Proisl, T., Jannidis, F., Reger, I., Pielstr\u00f6m, S., Sch\u00f6ch, C. and Vitt, T. (2017) Understanding and explaining Delta measures for authorship attribution. Digital Scholarship in the Humanities 32(suppl_2): ii4-ii16.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A Bayesian approach to interpreting footwear marks in forensic casework", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [ |
| "W" |
| ], |
| "last": "Evett", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "A" |
| ], |
| "last": "Lambert", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "S" |
| ], |
| "last": "Buckleton", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Science & Justice", |
| "volume": "38", |
| "issue": "4", |
| "pages": "72118--72123", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/S1355-0306(98)72118-5" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Evett, I. W., Lambert, J. A. and Buckleton, J. S. (1998) A Bayesian approach to interpreting footwear marks in forensic casework. Science & Justice 38(4): 241- 247. https://dx.doi.org/10.1016/S1355- 0306(98)72118-5", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Weight of evidence and the Bayesian likelihood ratio", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [ |
| "J" |
| ], |
| "last": "Good", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Good, I. J. (1991) Weight of evidence and the Bayesian likelihood ratio. In C. G. G. Aitken and D. A. Stoney (eds.), The Use of Statistics in Forensic Science 85- 106. Chichester: Ellis Horwood.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Authorship verification based on compressionmodels", |
| "authors": [ |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Halvani", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Winter", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Graner", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1706.00516" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Halvani, O., Winter, C. and Graner, L. (2017). Authorship verification based on compression- models. arXiv preprint arXiv:1706.00516. Retrieved on 25 June 2020 from http://arxiv.org/abs/1706.00516", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Score-based likelihood ratios for handwriting evidence", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "B" |
| ], |
| "last": "Hepler", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "P" |
| ], |
| "last": "Saunders", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "J" |
| ], |
| "last": "Davis", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Buscaglia", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Forensic Science International", |
| "volume": "219", |
| "issue": "1-3", |
| "pages": "129--140", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hepler, A. B., Saunders, C. P., Davis, L. J. and Buscaglia, J. (2012) Score-based likelihood ratios for handwriting evidence. Forensic Science International 219(1-3): 129-140.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "A likelihood ratio-based evaluation of strength of authorship attribution evidence in SMS messages using N-grams", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ishihara", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "International Journal of Speech Language and the Law", |
| "volume": "21", |
| "issue": "1", |
| "pages": "23--50", |
| "other_ids": { |
| "DOI": [ |
| "10.1558/ijsll.v21i1.23" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ishihara, S. (2014) A likelihood ratio-based evaluation of strength of authorship attribution evidence in SMS messages using N-grams. International Journal of Speech Language and the Law 21(1): 23- 50. http://dx.doi.org/10.1558/ijsll.v21i1.23", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "An effect of background population sample size on the performance of a likelihood ratio-based forensic text comparison system: A Monte Carlo simulation with Gaussian mixture model", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ishihara", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of Proceedings of the Australasian Language Technology Association Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "113--121", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ishihara, S. (2016) An effect of background population sample size on the performance of a likelihood ratio-based forensic text comparison system: A Monte Carlo simulation with Gaussian mixture model. In T. Cohn (ed.), Proceedings of Proceedings of the Australasian Language Technology Association Workshop 2016: 113-121.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Strength of forensic text comparison evidence from stylometric features: A multivariate likelihood ratio-based analysis", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ishihara", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "The International Journal of Speech, Language and the Law", |
| "volume": "24", |
| "issue": "1", |
| "pages": "67--98", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ishihara, S. (2017a) Strength of forensic text comparison evidence from stylometric features: A multivariate likelihood ratio-based analysis. The International Journal of Speech, Language and the Law 24(1): 67-98.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Strength of linguistic text evidence: A fused forensic text comparison system", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ishihara", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Forensic Science International", |
| "volume": "278", |
| "issue": "", |
| "pages": "184--197", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.forsciint.2017.06.040" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ishihara, S. (2017b) Strength of linguistic text evidence: A fused forensic text comparison system. Forensic Science International 278: 184-197. https://doi.org/10.1016/j.forsciint.2017.06.040", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Likelihood ratio as weight of forensic evidence: A closer look", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [ |
| "P" |
| ], |
| "last": "Lund", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Iyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Journal of Research of the National Institute of Standards and Technology", |
| "volume": "122", |
| "issue": "", |
| "pages": "1--32", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lund, S. P. and Iyer, H. (2017) Likelihood ratio as weight of forensic evidence: A closer look. Journal of Research of the National Institute of Standards and Technology 122(Article 27): 1-32.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Handwriting evidence evaluation based on the shape of characters: Application of multivariate likelihood ratios", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Marquis", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Bozza", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Schmittbuhl", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Taroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Forensic Sciences", |
| "volume": "56", |
| "issue": "Suppl_1", |
| "pages": "238--242", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marquis, R., Bozza, S., Schmittbuhl, M. and Taroni, F. (2011) Handwriting evidence evaluation based on the shape of characters: Application of multivariate likelihood ratios. Journal of Forensic Sciences 56(Suppl_1): S238-242.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Forensic voice comparison and the paradigm shift", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [ |
| "S" |
| ], |
| "last": "Morrison", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Science & Justice", |
| "volume": "49", |
| "issue": "4", |
| "pages": "298--308", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Morrison, G. S. (2009) Forensic voice comparison and the paradigm shift. Science & Justice 49(4): 298- 308.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Tutorial on logistic-regression calibration and fusion: Converting a score to a likelihood ratio", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [ |
| "S" |
| ], |
| "last": "Morrison", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Australian Journal of Forensic Sciences", |
| "volume": "45", |
| "issue": "2", |
| "pages": "173--197", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Morrison, G. S. (2013) Tutorial on logistic-regression calibration and fusion: Converting a score to a likelihood ratio. Australian Journal of Forensic Sciences 45(2): 173-197.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Computation of likelihood ratios in fingerprint identification for configurations of any number of minutiae", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Champod", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Puch-Solis", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Egli", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Anthonioz", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bromage-Griffiths", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Journal of Forensic Science", |
| "volume": "52", |
| "issue": "1", |
| "pages": "54--64", |
| "other_ids": { |
| "DOI": [ |
| "10.1111/j.1556-4029.2006.00327.x" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Neumann, C., Champod, C., Puch-Solis, R., Egli, N., Anthonioz, A. and Bromage-Griffiths, A. (2007) Computation of likelihood ratios in fingerprint identification for configurations of any number of minutiae. Journal of Forensic Science 52(1): 54-64. https://dx.doi.org/10.1111/j.1556- 4029.2006.00327.x", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "From biometric scores to forensic likelihood ratios", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Ramos", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "P" |
| ], |
| "last": "Krish", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Fierrez", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Meuwly", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Handbook of Biometrics for Forensic Science", |
| "volume": "", |
| "issue": "", |
| "pages": "305--327", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ramos, D., Krish, R. P., Fierrez, J. and Meuwly, D. (2017) From biometric scores to forensic likelihood ratios. In M. Tistarelli and C. Champod (eds.), Handbook of Biometrics for Forensic Science 305- 327. Cham: Springer.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Interpreting Evidence: Evaluating Forensic Science in the Courtroom", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Robertson", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "A" |
| ], |
| "last": "Vignaux", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "E H" |
| ], |
| "last": "Berger", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robertson, B., Vignaux, G. A. and Berger, C. E. H. (2016) Interpreting Evidence: Evaluating Forensic Science in the Courtroom (2nd ed.). Chichester: John Wiley and Sons, Inc.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Improving authorship attribution: Optimizing Burrows' Delta method", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [ |
| "W H" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Aldridge", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Quantitative Linguistics", |
| "volume": "18", |
| "issue": "1", |
| "pages": "63--88", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Smith, P. W. H. and Aldridge, W. (2011) Improving authorship attribution: Optimizing Burrows' Delta method. Journal of Quantitative Linguistics 18(1): 63-88.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Selected Studies of the Principle of Relative Frequency in Language", |
| "authors": [ |
| { |
| "first": "G", |
| "middle": [ |
| "K" |
| ], |
| "last": "Zipf", |
| "suffix": "" |
| } |
| ], |
| "year": 1932, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zipf, G. K. (1932) Selected Studies of the Principle of Relative Frequency in Language. Cambridge: Harvard University Press.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Example Tippett plots showing uncalibrated (Panel a) and calibrated (b) LRs. Black=SA LRs; Grey=DA LRs; Solid curves=uncalibrated LRs; Dotted curves=calibrated LRs.", |
| "num": null |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Cllr values plotted as a function of the number of features, separately for the word lengths of 750, 1,500 and 2,250. The large circles indicate the best Cllr.", |
| "num": null |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Boxplots displaying the degree of fluctuation in Cllr values as a function of the size of the background database. Black circles indicate the mean Cllr values for each size of the background database.", |
| "num": null |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Boxplots displaying the degree of fluctuation in Cllr min values as a function of the size of the background database. Black circles indicate the mean Cllr min values for each size of the background database.", |
| "num": null |
| }, |
| "FIGREF4": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "Boxplots showing the degree of fluctuation in Cllr cal as a function of the size of the background database. Black circles indicate the mean Cllr cal values for each size of the background database.", |
| "num": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "num": null, |
| "text": "", |
| "html": null, |
| "content": "<table><tr><td>Shunichi Ishihara</td></tr><tr><td>Speech and Language Laboratory</td></tr><tr><td>The Australian National University</td></tr><tr><td>shunichi.ishihara@anu.edu.au</td></tr></table>" |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "num": null, |
| "text": "Best-fitted parametric models for the SA and DA scores.", |
| "html": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |