| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:42:34.326591Z" |
| }, |
| "title": "Maintaining Quality in FEVER Annotation", |
| "authors": [ |
| { |
| "first": "Henri", |
| "middle": [], |
| "last": "Schulte", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IT University of Copenhagen", |
| "location": { |
| "country": "Denmark" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Julie", |
| "middle": [], |
| "last": "Binau", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IT University of Copenhagen", |
| "location": { |
| "country": "Denmark" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IT University of Copenhagen", |
| "location": { |
| "country": "Denmark" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We propose two measures for measuring the quality of constructed claims in the FEVER task. Annotating data for this task involves the creation of supporting and refuting claims over a set of evidence. Automatic annotation processes often leave superficial patterns in data, which learning systems can detect instead of performing the underlying task. Humans also can leave these superficial patterns, either voluntarily or involuntarily (due to e.g. fatigue). The two measures introduced attempt to detect the impact of these superficial patterns. One is a new information-theoretic and distributionality based measure, DCI; and the other an extension of neural probing work over the ARCT task, utility. We demonstrate these measures over a recent major dataset, that from the English FEVER task in 2019.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We propose two measures for measuring the quality of constructed claims in the FEVER task. Annotating data for this task involves the creation of supporting and refuting claims over a set of evidence. Automatic annotation processes often leave superficial patterns in data, which learning systems can detect instead of performing the underlying task. Humans also can leave these superficial patterns, either voluntarily or involuntarily (due to e.g. fatigue). The two measures introduced attempt to detect the impact of these superficial patterns. One is a new information-theoretic and distributionality based measure, DCI; and the other an extension of neural probing work over the ARCT task, utility. We demonstrate these measures over a recent major dataset, that from the English FEVER task in 2019.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The FEVER task frames verification of claims given knowledge as a retrieval and three-class entailment problem. Given a claim, supporting or refuting text must be found, and a judgment made as to whether or not the text supports the claim.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "One way in which annotation performance lapses present is with the use of shortcuts. An easy shortcut for this task would be to insert a few direct negation words into claim texts, thus making them clash with the associated evidence. A recent study of ARCT, the Argument Reasoning Comprehension Task, in which systems have to pick a warrant given a claim a premise, found that annotators were prone to inserting words such as 'not' when constructing negative examples, which later models (such as BERT) could then pick up on (Niven and Kao, 2019) . These superficial shortcuts were prevalent to the extent that removing this information led to a significant drop in BERT argument reasoning performance, from 77% to 50%.", |
| "cite_spans": [ |
| { |
| "start": 525, |
| "end": 546, |
| "text": "(Niven and Kao, 2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Mindful of the similar nature of the ARCT and FEVER tasks, we apply an extended version of Niven & Kao's metric to the FEVER dataset, and present an information theoretic measure over skipgrams in FEVER claims to detect candidate superficial features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The annotation process for FEVER is involved. The FEVER dataset (Thorne et al., 2018) comprises a total of 185,445 claims created from Wikipedia articles and annotated as either SUPPORTS, REFUTES or NOTENOUGHINFO. Additionally, claims that are labelled SUPPORTS and REFUTES also come with the evidence against which this judgement has been made. This FEVER data was created with the help of 50 annotators and in two stages: First creating claims from Wikipedia articles, then labelling them against evidence from Wikipedia. The claim generation stage entails providing annotators with a randomly sampled sentence from the introductory section of an English Wikipedia article and asking them to create claims about the article's entity. In addition to basing their claims on the provided sentence alone, annotators were also given the choice to utilize information from hyperlinked articles to allow for more complex claims (Thorne et al., 2018) . Annotators were also asked to create different variants of these claims by, for example, negating, generalizing or replacing part of the claim. This was done to introduce refutable and non-verifiable claims into the dataset. While trialing, the authors realized that \"the majority of annotators had difficulty generating non-trivial negation mutations [...] beyond adding 'not' to the original\" (Thorne et al., 2018) . We investigate the impact of these trivial negations on the quality of the dataset later on. In the second stage, annotators labeled the previously created claims as either SUPPORTS, REFUTES or NOTENOUGHINFO. For the first two classes, annotators also marked the sentences they used as evidence for their decision. Once again, the annotators had access to articles hyperlinked in the entity's article as well. The final dataset is segmented into multiple subsets, with the training set retaining a majority of the claims at a size of 145,449. The quality of their annotations is ensured by cross-checking labels through five-way agreement, Super-Annotators and even validation by the authors themselves. Yet, despite spotting the issue with non-trivial negations early in the process, they do not report on any investigations into the quality of their claims. One might argue that annotation accuracy loses its importance if the task is performed on the basis of biased data. Nevertheless, as with most complex annotation tasks over language, the complex nature of this annotation process is prone to annotation exhaustion and shortcuts (Pustejovsky and Stubbs, 2012) .", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 85, |
| "text": "(Thorne et al., 2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 923, |
| "end": 944, |
| "text": "(Thorne et al., 2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1342, |
| "end": 1363, |
| "text": "(Thorne et al., 2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 2503, |
| "end": 2533, |
| "text": "(Pustejovsky and Stubbs, 2012)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotation in FEVER", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We propose two quality metrics for FEVER. The goal of FEVER data is to help train inference/verification/entailment tools that are wellgeneralised. Thus, a quality metric should help detect when annotated data risks being unsuitable for that purpose. The new metrics outlined here are generic and can be applied to data for other classification tasks. They are proposed with the goal of identifying surface-level linguistic patterns that 'leak' class information, helping dataset builders improve the quality of their data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quality Metrics", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The first metric we propose is a simple information theoretic measure of how much a pattern contributes to a classification. In this case, patterns are extracted using skip-grams. These capture a good amount of information about a corpus (Guthrie et al., 2006) while also giving a way of ignoring the typically-rare named entities that are rich in FEVER claims and focusing on the surrounding language. The metric is the weighted inverse information gain of a skip-gram relative to a pair of classes. Weighting is determined by the frequency of documents bearing the skip-gram in the corpus, which normalises skew from highly imbalanced but rare phrases. For dataset D and cue k, where cues are e.g. skip-gram features:", |
| "cite_spans": [ |
| { |
| "start": 238, |
| "end": 260, |
| "text": "(Guthrie et al., 2006)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "IG(D, k) = H(D) \u2212 H(D|k)", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We are interested in items that cause high information gain, i.e. 1 \u2212 IG(D, f ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "This should be weighted with the impact that a pattern can potentially have in a given dataset and split. For this reason, feature counts should be normalised by the size of each class. That is, when calculating entropy:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "H(X) = \u2212\u03a3 n i=1 P (x i )logP (x i )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Let D cue=k be the set of data bearing cue k, and D class=y be the set of data with class label y drawn from the set of class labels Y . The normalised distribution N of cue frequencies for cue k is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "N = { |D cue=k \u2229 D class=i | |D cue=k | |i \u2208 Y }", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Given this class-balanced dataset weighting, we can then define the information-based factor \u03bb h trivially thus:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03bb h = 1 \u2212 H(N )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "A term is also required to correct for the rareness of features. Features that occur only for one class, but are seldom, should not receive a high value.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "On the other hand, knowing that features in language typically follow a Zipfian frequency distribution (Montemurro, 2001) , one should still have useful resolution beyond the most-frequent items. Thus we specify a frequency-based scaling factor \u03bb f as a root of the scaled frequency weight:", |
| "cite_spans": [ |
| { |
| "start": 103, |
| "end": 121, |
| "text": "(Montemurro, 2001)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u03bb f = (|dk : d \u2208 D||D| \u22121 ) 1 s (5)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Where s is a scaling factor corresponding to the estimated exponent of the features' power law frequency distribution. For English, s = 3 gives reasonable results (i.e. taking the cube root).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "These two are combined taking their squared product to form DCI:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "DCI = \u03bb h \u00d7 \u03bb f", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "A note regarding language: in this case, we consider 1, 2, and 3-grams, with skips in the range of [0, 2] . This is suitable for English; other languages might benefit from broader skip ranges.", |
| "cite_spans": [ |
| { |
| "start": 99, |
| "end": 102, |
| "text": "[0,", |
| "ref_id": null |
| }, |
| { |
| "start": 103, |
| "end": 105, |
| "text": "2]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset-weighted Cue Information", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We follow the approach of Niven and Kao (2019) in determining a productivity and coverage score for each cue in the data. As the structure of their dataset is fundamentally different from the dataset presented in Thorne et al. (2018) , we have made amendments to their methodology in order to attain comparable results.", |
| "cite_spans": [ |
| { |
| "start": 26, |
| "end": 46, |
| "text": "Niven and Kao (2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 213, |
| "end": 233, |
| "text": "Thorne et al. (2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cue Productivity and Coverage Probes", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "As in Niven and Kao (2019), we consider any uni-or bigram a potential cue. We extract these cues from the claims in the dataset and take note of the associated label. This allows us to calculate the applicability of a given cue (\u03b1 k ), which represents the absolute number of claims in the dataset that contain the cue irrespective of their label. Let T be the set of all cues and n the number of claims.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cue Productivity and Coverage Probes", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u03b1 k = n i=1 1 \u2203k \u2208 T (7)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cue Productivity and Coverage Probes", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The productivity of a cue (\u03c0 k ) is the frequency of the most common label across the claims that contain the cue. In practical terms, the productivity is the chance that a model correctly labels a claim by assigning it the most common label of a given cue in the claim.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cue Productivity and Coverage Probes", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03c0 k = max n i=1 1 \u2203j, k \u2208 T j \u03b1 k", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Cue Productivity and Coverage Probes", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "From this definition productivity may be in the range [ 1 m , 1] where m is the number of unique labels -three in our case. The coverage of a cue (\u03be k ) is defined by Niven and Kao (2019) as \u03be k = \u03b1 k /n. We retain this definition with the caveat that, due to the fundamentally different architecture of the data, we derive \u03b1 k differently.", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 187, |
| "text": "Niven and Kao (2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cue Productivity and Coverage Probes", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "This approach assumes a balanced dataset with regard to the frequency of each label. If executed on an imbalanced dataset, a given cue's productivity would be dominated by the most frequent label, not because it is actually more likely to appear in a claim with that label but purely since the label is more frequent overall. We generate a balanced sample by undersampling majority classes. In order to not discard data from the majority classes, however, we repeat the process ten times with random samples. We find that this is a better compromise than oversampling minority classes or introducing class weights when calculating productivity, as Niven and Kao (2019) acknowledge that a cue is only useful to a machine learning model if \u03c0 k > 1/m. In their case, every claim can have two possible labels, i.e. m = 2. For the FEVER dataset three labels exist. This means that the productivity threshold at which cues start becoming useful to a model is higher in the ARCT task. We should therefore actually consider the utility of a cue to the model (\u03c1 k ).", |
| "cite_spans": [ |
| { |
| "start": 648, |
| "end": 668, |
| "text": "Niven and Kao (2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cue Productivity and Coverage Probes", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u03c1 k = \u03c0 k \u2212 1 m (9)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cue Productivity and Coverage Probes", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "4 Running the metrics", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cue Productivity and Coverage Probes", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We apply the described methodology to the FEVER training dataset presented in Thorne et al. (2018) and thereby determine productivity and coverage for 14,320 cues. Considering the cues with a productivity of 1, i.e. cues that could predict the label with a 100% accuracy, is not particularly relevant as none of them have a coverage over 0.01, meaning that they only appear in \u2264 1% of claims. In fact, there are 12,126 cues that only ever appear with one label (\u2248 85%). Table 1 shows the cues with the highest coverage. It is dominated by common English stop words with productivity near the minimum of 1 3 . This means that to a machine learning model these cues provide very little utility in finding a shortcut. Some of the more common cues do still provide some utility though. The cues \"an\", \"to\" and \"and\" each appear in 6-8% of all claims and provide 0.44, 0.53 and 0.49 productivity respectively. These values pale, however, in comparison to the slightly less common but considerably more productive cues \"not\" and \"only\" (see table 2 ). While these only have a coverage of 0.04 each ( Table 2 , they provide productivity of 0.86 and 0.90 respectively. Even though Thorne et al. (2018) explicitly mention that they attempted to minimize the use of \"not\" for the creation of refuted claims, we find that in our sample claims containing \"not\" were labelled REFUTES 86% of the time. We find no other cues with comparable coverage to reach such high productivity.", |
| "cite_spans": [ |
| { |
| "start": 78, |
| "end": 98, |
| "text": "Thorne et al. (2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1173, |
| "end": 1193, |
| "text": "Thorne et al. (2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 470, |
| "end": 477, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1035, |
| "end": 1042, |
| "text": "table 2", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 1094, |
| "end": 1101, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Neural Probe Results", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Niven and Kao (2019) find that in the Argument Reasoning Comprehension Task (ARCT) dataset (Habernal et al., 2018) the cue \"not\" has a productivity of 61% and coverage of 64%. In the FEVER training data \"not\" to has a higher productivity but lower coverage.", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 114, |
| "text": "(Habernal et al., 2018)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Probe Results", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For \"not\" this provides a utility value of \u2248 0.11 in ARCT and \u2248 0.53 in the train set of FEVER, meaning that in the FEVER data the cue provides a significantly higher utility to a ML model. This conclusion is only drawn from the utility alone though. For the sake of comparability across both utility and coverage, we condense these values to one metric by taking their harmonic mean. We choose the harmonic mean as it assigns higher values to cues that are both utilisable and covering. For \"not\" this results in \u2248 0.19 in ARCT and \u2248 0.07 in the FEVER training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Probe Results", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Considering cues by their harmonic mean of utility and coverage suggests that despite their high productivity, \"not\" and \"only\" might not be the most relevant cues in the data, being preceded by common stop words that yet provide noticeable utility (see Table 3 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 254, |
| "end": 261, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Neural Probe Results", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Besides \"not\", some relatively neutral, such as \"to\" and \"and\", also appear in a somewhat imbalanced manner. In fact, in our samples 53% of claims containing \"to\" are labelled as REFUTES and 49% of claims containing \"and\" are labelled as SUPPORTS. These distributions are hard to predict. We therefore encourage analyses of this during dataset construction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Neural Probe Results", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "DCI enables ranking of superficial n-grams. Table 4 presents the most informative superficial patterns in the FEVER data. We can see that \"not\" plays a prolific role, especially as part of a trigram. This might be what one would expect given the high utility of this word (Table 3) . Both support/refute and refute/not-enough-data partitions give the most highly-ranked skip-grams; support/not-enough-data doesn't generate annotation artefacts as frequently.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 272, |
| "end": 281, |
| "text": "(Table 3)", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "DCI Results", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Applying productivity, utility and coverage indicates a dearth of the sort of superficial features in FEVER that were present in previous tasks (namely the ARCT dataset). This is somewhat at odds with other work over FEVER. Schuster et al. (2019) find that local mutual information (LMI) reveals some n-grams that are strongly-associated with negative examples, and are able to predict claim veracity based on claims alone. The phrases that Schuster et al. find match those top-ranked by our DCI metric. We can therefore see that mutual informationbased measures (LMI, DCI) find different biases to frequency-associative measures, such as those use to find cues in the ARCT task. It may be worth applying e.g. LMI or DCI to the ARCT data to see if complementary results emerge.", |
| "cite_spans": [ |
| { |
| "start": 224, |
| "end": 246, |
| "text": "Schuster et al. (2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Note that we examine all n-and skip-grams in the dataset, without smoothing. Suntwal et al. (2019) experiment with removing named entities and rare noun-phrases from their dataset when training models. While this is likely to reduce variances in the data representation, enhancing the signal, the goal of this work is to find the strongest signals, and go down from there, rather than remove noise in a \"bottom-up\" fashion. This is not the first investigation into biases related to crowdsourcing and human annotation: Belinkov et al. (2019) find patterns in corpora for inference. Sabou et al. (2014) and Bontcheva et al. (2017) discuss best practices in crowdsourcing for corpus creation. Notably, the number of annotations created by a single annotator should be capped strongly, to avoid nuances of a single worker's style disrupting the data significantlyrather, many annotators should contribute to the data. We propose further controlling quality by looking for superficial patterns during the annotation process, and asking annotators to consider re-formulating their input choices if such patterns are present.", |
| "cite_spans": [ |
| { |
| "start": 77, |
| "end": 98, |
| "text": "Suntwal et al. (2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 582, |
| "end": 601, |
| "text": "Sabou et al. (2014)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 606, |
| "end": 629, |
| "text": "Bontcheva et al. (2017)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Annotators are prone to introducing artefacts, certainly in the construction of datasets involving synthesis of claims and counterclaims. This paper presented metrics and an analysis of the English FEVER dataset with three previously-used measures: productivity, coverage and utility; and a new measure, dataset-weighted cue information. We find that the FEVER dataset is somewhat free of superficial artefacts, and present a truncated set of its most-informative (or most distracting) patterns.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This research was partly supported by the Danmarks Frie Forskningsfond project Verif-AI.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "On adversarial removal of hypothesis-only bias in natural language inference", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Poliak", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Stuart", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Shieber", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander M", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Eighth Joint Conference on Lexical and Computational Semantics (*SEM 2019)", |
| "volume": "", |
| "issue": "", |
| "pages": "256--262", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Belinkov, Adam Poliak, Stuart M Shieber, Benjamin Van Durme, and Alexander M Rush. 2019. On adversarial removal of hypothesis-only bias in natural language inference. In Proceedings of the Eighth Joint Conference on Lexical and Computa- tional Semantics (*SEM 2019), pages 256-262.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Crowdsourcing named entity recognition and entity linking corpora", |
| "authors": [ |
| { |
| "first": "Kalina", |
| "middle": [], |
| "last": "Bontcheva", |
| "suffix": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Handbook of Linguistic Annotation", |
| "volume": "", |
| "issue": "", |
| "pages": "875--892", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kalina Bontcheva, Leon Derczynski, and Ian Roberts. 2017. Crowdsourcing named entity recognition and entity linking corpora. In Handbook of Linguistic Annotation, pages 875-892. Springer.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A closer look at skip-gram modelling", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Guthrie", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Allison", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "1222--1225", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Guthrie, Ben Allison, Wei Liu, Louise Guthrie, and Yorick Wilks. 2006. A closer look at skip-gram modelling. In LREC, pages 1222-1225.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The argument reasoning comprehension task: Identification and reconstruction of implicit warrants", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Habernal", |
| "suffix": "" |
| }, |
| { |
| "first": "Henning", |
| "middle": [], |
| "last": "Wachsmuth", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| }, |
| { |
| "first": "Benno", |
| "middle": [], |
| "last": "Stein", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1930--1940", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Habernal, Henning Wachsmuth, Iryna Gurevych, and Benno Stein. 2018. The argument reasoning comprehension task: Identification and reconstruc- tion of implicit warrants. In Proceedings of the Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics, pages 1930- 1940.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Beyond the zipfmandelbrot law in quantitative linguistics", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Marcelo", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Montemurro", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Physica A: Statistical Mechanics and its Applications", |
| "volume": "300", |
| "issue": "3-4", |
| "pages": "567--578", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcelo A Montemurro. 2001. Beyond the zipf- mandelbrot law in quantitative linguistics. Physica A: Statistical Mechanics and its Applications, 300(3- 4):567-578.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Probing neural network comprehension of natural language arguments", |
| "authors": [ |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Niven", |
| "suffix": "" |
| }, |
| { |
| "first": "Hung-Yu", |
| "middle": [], |
| "last": "Kao", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4658--4664", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timothy Niven and Hung-Yu Kao. 2019. Probing neu- ral network comprehension of natural language ar- guments. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 4658-4664.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Natural Language Annotation for Machine Learning: A guide to corpus-building for applications", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Amber", |
| "middle": [], |
| "last": "Stubbs", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Pustejovsky and Amber Stubbs. 2012. Natu- ral Language Annotation for Machine Learning: A guide to corpus-building for applications. O'Reilly Media, Inc.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Corpus annotation through crowdsourcing: Towards best practice guidelines", |
| "authors": [ |
| { |
| "first": "Marta", |
| "middle": [], |
| "last": "Sabou", |
| "suffix": "" |
| }, |
| { |
| "first": "Kalina", |
| "middle": [], |
| "last": "Bontcheva", |
| "suffix": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Derczynski", |
| "suffix": "" |
| }, |
| { |
| "first": "Arno", |
| "middle": [], |
| "last": "Scharl", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "859--866", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marta Sabou, Kalina Bontcheva, Leon Derczynski, and Arno Scharl. 2014. Corpus annotation through crowdsourcing: Towards best practice guidelines. In LREC, pages 859-866.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Towards debiasing fact verification models", |
| "authors": [ |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Darsh", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "Yun Jie Serene", |
| "middle": [], |
| "last": "Yeo", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Roberto Filizzola", |
| "suffix": "" |
| }, |
| { |
| "first": "Enrico", |
| "middle": [], |
| "last": "Ortiz", |
| "suffix": "" |
| }, |
| { |
| "first": "Regina", |
| "middle": [], |
| "last": "Santus", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Barzilay", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3410--3416", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tal Schuster, Darsh Shah, Yun Jie Serene Yeo, Daniel Roberto Filizzola Ortiz, Enrico Santus, and Regina Barzilay. 2019. Towards debiasing fact verification models. In Proceedings of the Conference on Em- pirical Methods in Natural Language Processing (EMNLP), pages 3410-3416.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "On the importance of delexicalization for fact verification", |
| "authors": [ |
| { |
| "first": "Sandeep", |
| "middle": [], |
| "last": "Suntwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Mithun", |
| "middle": [], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Sharp", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3404--3409", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sandeep Suntwal, Mithun Paul, Rebecca Sharp, and Mihai Surdeanu. 2019. On the importance of delexi- calization for fact verification. In Proceedings of the Conference on Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 3404-3409.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "FEVER: A Large-scale Dataset for Fact Extraction and VERification", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Thorne", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| }, |
| { |
| "first": "Christos", |
| "middle": [], |
| "last": "Christodoulopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "Arpit", |
| "middle": [], |
| "last": "Mittal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "809--819", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Thorne, Andreas Vlachos, Christos Christodoulopoulos, and Arpit Mittal. 2018. FEVER: A Large-scale Dataset for Fact Extraction and VERification. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics, pages 809-819.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "text": "", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>: High-productivity cues</td></tr><tr><td>those methods inflate the productivity of rare cues</td></tr><tr><td>that appear exclusively in the smallest class.</td></tr><tr><td>Productivity values alone are not necessarily</td></tr><tr><td>comparable across datasets.</td></tr></table>" |
| }, |
| "TABREF3": { |
| "text": "Top seven cues by harmonic mean of utility and coverage", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF5": { |
| "text": "Highest DCI skip-grams, i.e. most classinformative superficial features, in the English FEVER dataset", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |