| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:42:14.641784Z" |
| }, |
| "title": "Distilling the Evidence to Augment Fact Verification Models", |
| "authors": [ |
| { |
| "first": "Beatrice", |
| "middle": [], |
| "last": "Portelli", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Udine", |
| "location": {} |
| }, |
| "email": "portelli.beatrice@spes.uniud.it" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "CSAIL", |
| "institution": "", |
| "location": { |
| "region": "MIT" |
| } |
| }, |
| "email": "jzhao7@mit.edu" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "CSAIL", |
| "institution": "", |
| "location": { |
| "region": "MIT" |
| } |
| }, |
| "email": "tals@csail.mit.edu" |
| }, |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Serra", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Udine", |
| "location": {} |
| }, |
| "email": "giuseppe.serra@uniud.it" |
| }, |
| { |
| "first": "Enrico", |
| "middle": [], |
| "last": "Santus", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "CSAIL", |
| "institution": "", |
| "location": { |
| "region": "MIT" |
| } |
| }, |
| "email": "esantus@mit.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The alarming spread of fake news in social media, together with the impossibility of scaling manual fact verification, motivated the development of natural language processing techniques to automatically verify the veracity of claims. Most approaches perform a claimevidence classification without providing any insights about why the claim is trustworthy or not. We propose, instead, a model-agnostic framework that consists of two modules: (1) a span extractor, which identifies the crucial information connecting claim and evidence; and (2) a classifier that combines claim, evidence, and the extracted spans to predict the veracity of the claim. We show that the spans are informative for the classifier, improving performance and robustness. Tested on several state-of-the-art models over the FEVER dataset, the enhanced classifiers consistently achieve higher accuracy while also showing reduced sensitivity to artifacts in the claims.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The alarming spread of fake news in social media, together with the impossibility of scaling manual fact verification, motivated the development of natural language processing techniques to automatically verify the veracity of claims. Most approaches perform a claimevidence classification without providing any insights about why the claim is trustworthy or not. We propose, instead, a model-agnostic framework that consists of two modules: (1) a span extractor, which identifies the crucial information connecting claim and evidence; and (2) a classifier that combines claim, evidence, and the extracted spans to predict the veracity of the claim. We show that the spans are informative for the classifier, improving performance and robustness. Tested on several state-of-the-art models over the FEVER dataset, the enhanced classifiers consistently achieve higher accuracy while also showing reduced sensitivity to artifacts in the claims.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The increased quantity of information that circulates in social media and on the Web every day, together with the high cost of assessing its veracity, has demanded the application of natural language processing (NLP) techniques to the task of fact verification. In the last years, the NLP community has proposed a large number of datasets and approaches for addressing this task, facing complicated challenges that are still far from being solved.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The task of fact verification can be split into (i) retrieving one or more candidate pieces of evidence; (ii) assessing whether they are either supporting or refuting a claim, or whether they contains insufficient information to state either of the above. In this paper, we mostly focus on the reasoning between the claim and the evidence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To generate models that work on real world data, fact verification solutions are expected to: (i) per- form well not only on synthetic datasets but also in realistic scenarios, where both text form and text content are highly unpredictable; (ii) produce transparent decisions, providing an explanation for their verdict, so that the readers may consider whether trusting them or not. To address these two requirements, we propose a model-agnostic framework that includes two modules: (i) a span extractor that aims to identify in the evidence the pieces of relevant information that are informative with respect to the claim; (ii) a classifier that uses the claim, evidence and extracted spans to predict whether the evidence is supporting, refuting or containing insufficient information. The spans extracted by the first module are useful to enhance the classifier and inform the user. Humans can in fact exploit the spans to effectively understand why a claim is true or false.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We evaluate our pipeline with three highly performing neural models on the FEVER dataset (Thorne et al., 2018) , comparing the uninformed to the informed setting. While this dataset includes ground truth for both evidence retrieval and evidence classification, in this paper we only exploit the latter annotations. Our experiments show that the models informed with the extracted spans consistently achieve higher performance than their uninformed counterparts, demonstrating the usefulness of spans. We also evaluate our models on the challenging SYMMETRIC FEVER dataset (Schuster et al., 2019) , which tests system's robustness in absence of FEVER's artifacts. We find the models trained with our pipeline to achieve higher accuracy.", |
| "cite_spans": [ |
| { |
| "start": 89, |
| "end": 110, |
| "text": "(Thorne et al., 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 572, |
| "end": 595, |
| "text": "(Schuster et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Finally, we assess the quality of the extracted spans as decision rationales to be shown to end-user. Manually examining a subset of outputs shows that 67% of the support and 88% of the refute spans are well explanatory with respect to the decision, leading to an aggregated score of 75%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Fake news detection has recently gained interest in the NLP community. Most of the initial works have focused on style (Feng et al., 2012) and linguistic approaches (P\u00e9rez-Rosas and Mihalcea, 2015) . Despite the good performance in synthetic datasets, these methods failed when applied to real-world data. New approaches based on fact verification over retrieved evidence have therefore taken the stage in the literature.", |
| "cite_spans": [ |
| { |
| "start": 119, |
| "end": 138, |
| "text": "(Feng et al., 2012)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 165, |
| "end": 197, |
| "text": "(P\u00e9rez-Rosas and Mihalcea, 2015)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Datasets. Several fact verification datasets were developed over the last decade. Vlachos and Riedel (2014) created a dataset which consisted of 221 statements and hyperlinks to pieces of evidence of various formats. Many datasets were created in the following years, with collections of claims of increasing size and various kinds of additional information. Among them Ferreira and Vlachos (2016)'s debunking dataset (300 rumoured claims and 2,595 associated news articles) and Wang (2017)'s LIAR dataset (12,836 short statements labeled for veracity, topic and various metadata on the speaker). In the last years, most systems have been developed over FEVER (Thorne et al., 2018) , a large-scale dataset for Fact Extraction and VERification that consists of 185,445 claims and their related evidence, labeled as either supporting, refuting or not containing enough information.", |
| "cite_spans": [ |
| { |
| "start": 660, |
| "end": 681, |
| "text": "(Thorne et al., 2018)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Approaches. There has been a large development since the first approaches for fact verification (Ferreira and Vlachos, 2016; Wang, 2017; Long et al., 2017) . To provide a strong base-line for FEVER, Thorne et al. (2018) proposed a pipeline consisting of document and sentence retrieval and a multi-layer perceptron as textual entailment recognizer. More sophisticated models followed. Among them, the Bi-Directional Attention Flow (BiDAF) network (Seo et al., 2016a) , originally introduced for machine comprehension, has been recently adapted to the task of fact verification (Tokala et al., 2019) . BiDAF combines LSTMs with both a context-to-query and query-tocontext attention, to produce a query-aware context representation at multiple hierarchical levels. Nie et al. (2019) introduced the Neural Semantic Matching Networks (NSMNs), which aligns two encoded texts and computes the semantic matching between the aligned representations with LSTMs and used it to earn the first place in the first competitions organized on the FEVER dataset. Soleimani et al. (2019) exploits the contextualized representations of a pre-trained BERT (Devlin et al., 2019) model for both sentence selection and fact verification.", |
| "cite_spans": [ |
| { |
| "start": 96, |
| "end": 124, |
| "text": "(Ferreira and Vlachos, 2016;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 125, |
| "end": 136, |
| "text": "Wang, 2017;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 137, |
| "end": 155, |
| "text": "Long et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 199, |
| "end": 219, |
| "text": "Thorne et al. (2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 447, |
| "end": 466, |
| "text": "(Seo et al., 2016a)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 577, |
| "end": 598, |
| "text": "(Tokala et al., 2019)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 763, |
| "end": 780, |
| "text": "Nie et al. (2019)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1046, |
| "end": 1069, |
| "text": "Soleimani et al. (2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1136, |
| "end": 1157, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Given a claim C = {c 1 , . . . , c n } and a piece of evidence E = {e 1 , . . . , e m }, two word sequences of length n and m respectively, the fact verification problem requires to predict the relation rel = {(S)upports, (R)efutes, (I)nsufficient} between E and C. Framework. We propose a pipeline of two modules: a span extractor M span and a classifier M classifier . The goal of M span (C, E) is to identify polarizing pieces of information {e i 1 , . . . , e i N } in E without which rel(E, C) would be neutral (i.e. C would neither be entailed nor contradicted by E). The identified pieces of information are passed to M classifier , together with C and E, to perform a three-label classification aimed at predicting rel(E, C):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "3" |
| }, |
| { |
| "text": "M classifier (C, E, {e i 1 , . . . , e i N }) = l \u2208 {S, R, I}.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We utilize the TokenMasker architecture from Shah et al. (2020) for M span . This masker was developed to identify the minimal group of tokens without which E would be neutral with respect to C. M span is trained by getting feedback from a pretrained neutrality classifier. Shah et al. (2020) use the ESIM model with GloVe embeddings trained on FEVER as a neutrality classifier. We choose to use the RoBERTa model (Liu et al., 2019) in-Figure 2: Framework outline: (i) the claim and the evidence pass through the span extractor, which quantifies the relative importance of their words; (ii) claim, evidence and spans are then passed to the classification module, which decides whether the evidence is supporting, refuting or insufficient to judge the claim. stead, pretrained on an entailment task over a multigenre corpus (i.e. three-label classification: entailment/neutral/contradiction on the MULTINLI dataset (Williams et al., 2018) ).", |
| "cite_spans": [ |
| { |
| "start": 414, |
| "end": 432, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 914, |
| "end": 937, |
| "text": "(Williams et al., 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Span Extractor", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The choice of using a rationale-style extractor (Shah et al., 2020) is due to its ability to provide informative spans that can be used as explanations to the relation of the evidence with the claim. This approach was shown to perform better than simply relying on the internal attention weights of a classifier (Lei et al., 2016; Jain and Wallace, 2019) .", |
| "cite_spans": [ |
| { |
| "start": 312, |
| "end": 330, |
| "text": "(Lei et al., 2016;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 331, |
| "end": 354, |
| "text": "Jain and Wallace, 2019)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Span Extractor", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To test our assumption, we consider three neural network architectures that have achieved the best performance on the first FEVER shared Task recently: BiDAF (Seo et al., 2016b) , NSMN (Nie et al., 2019) and BERT (Devlin et al., 2019) . Note that the architecture of M classifier is independent of M span . The spans extracted by M classifier are forwarded to the classifier by concatenating them to the original evidence, followed by a separator token. BiDAF consists of four layers: (i) the embedding layer, which encodes two raw text sequences (i.e. C and E) into two vector sequences\u0108 and\u00ca; (ii) the attention layer, which computes the attention scores between the two sequences and returns two attended sequences C A and E A ; (iii) the modeling layer, which takes C A and E A as input and outputs two fixed size vectors,\u0108 A and\u00ca A , that capture the semantic similarity between the original sequences; and (iv) the output layer, which takes\u0108 A and\u00ca A and returns the output labels. NSMN encodes C and E into vector sequences\u0108 and\u00ca, similarly to BiDAF. It then applies an alignment layer, which computes the alignment matrix, A =\u0108 T\u00ca , and the aligned representations, C A and E A , using\u0108,\u00ca, A. It follows a matching layer, which performs semantic matching using LSTM between C A and\u0108, as well as E A and\u00ca, to output matching matrices M C and M E , which are finally pooled by the output layer and mapped to output labels. BERT (we use the base-uncased version) consists of 12 encoder layers with self-attention (enc 1 , . . . , enc 12 ) and one classification layer. Each encoder enc i takes an input sequence I i\u22121 and outputs I i , a sequence of the same length where each token is replaced with an embedding capturing its relationship with the other words in I i\u22121 . The output of enc i becomes the input of enc i+1 . I 0 is set as the concatenation of C and E, preceded by the special [CLS] token. The output of the last encoder enc 12 is therefore an highly embedded representation of C and E. It is passed to the classification layer which maps the representation of the [CLS] token to the output labels.", |
| "cite_spans": [ |
| { |
| "start": 158, |
| "end": 177, |
| "text": "(Seo et al., 2016b)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 185, |
| "end": 203, |
| "text": "(Nie et al., 2019)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 213, |
| "end": 234, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classifiers", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We evaluate the three classifiers described in section 3 in two conditions: uninformed (W/O) and informed (With), where the latter refers to the utilization of the information extracted by M span .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We use the FEVER dataset to train all of our classifiers. We evaluate the classifiers both on FEVER and on SYMMETRIC FEVER. FEVER dataset (Thorne et al., 2018) : the current largest available Wikipedia-based dataset, consisting of 185,445 claims. Each claim is matched with supporting or refuting evidence from Wikipedia or with a \"not enough information\" label.", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 159, |
| "text": "(Thorne et al., 2018)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We use the development set from FEVER's shared-task as our test set (containing 19,998 samples). We randomly split FEVER's training set into our training and validation sets. Following this process, we have 125,451 samples in our training set (73,369 support, 23,109 refute, and 28,973 insufficient information).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "While evidence sentences for supporting and refuting examples are provided in the ground truth, those for the \"insufficient information\" were obtained by us. We use the document retrieval module of the best performing system on the first FEVER Shared Task (Nie et al., 2019) . Given a claim and the Wikipedia dump provided with the FEVER dataset, this document retrieval module returns a list of Wikipedia articles which are possibly related to the claim, ranked with a score calculated by comparing the claim, the title of the article and its first sentence. We keep the highest scoring document. Thereafter, we pick the sentence with the highest TF-IDF similarity with the claim. Also, to disambiguate pronouns, we extend all evidence sentences by appending the title of their Wikipedia page. SYMMETRIC FEVER (Schuster et al., 2019) : a smaller unbiased extension of FEVER, consisting of 712 claim-evidence pairs which were synthetically generated from FEVER to remove strong cues in the claims which could allow predicting the label without looking at the evidence (give-away phrases).", |
| "cite_spans": [ |
| { |
| "start": 256, |
| "end": 274, |
| "text": "(Nie et al., 2019)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 811, |
| "end": 834, |
| "text": "(Schuster et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "TokenMasker is trained on the same dataset and configuration as Shah et al. (2020). However, we replace their neutrailty classifier with a RoBERTa classifier, pretrained on MNLI. This model is trained once and used in inference mode for all subsequent experiments. BiDAF is trained for 12 epochs using cross entropy loss and Adam optimizer with initial learning rate 1e-3. We use a dropout probability of 0.2 and a batch size of 8. NSMN is trained for 12 epochs using cross entropy loss and Adam optimizer with initial learning rate 1e-4. We use a dropout probability of 0.5 and a batch size of 8. BERT is fine-tuned for 8 epochs using cross entropy loss and Adam optimizer with initial learning rate 2e-5. We use a dropout probability of 0.1 and a batch size of 16.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hyperparameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "These hyperparemeters were found to achieve the highest accuracy on our validation set. For our final classifiers, we fix these settings and retrain them using the full FEVER training set. Table 1 shows the results obtained in our experiments on both FEVER and the SYMMETRIC dataset. Scores are much higher in the first dataset as the systems can rely on give-away phrases, some words in the claims which have a high correlation with the correct output label regardless of the evidence. This situation does not exist in the SYMMETRIC dataset, where the give-away phrases have been eliminated. As expected, all systems perform worse on this dataset, but the drop in performance is more significant for the uninformed models (W/O) than for the informed (With) ones. In fact, the informed models consistently perform better than the uninformed ones (W/O), often obtaining statistical significance. While the difference in performance between W/O and With is particularly relevant for BiDAF and NSMN, it thins for BERT, which is already a strong classifier leveraging on a robust pretraining. Output Explainability. We also manually evaluated the spans for 100 randomly extracted claimoutput pairs, to assess whether they represented an understandable explanation for the verdict. The spans were deemed explanatory in 88% of the cases for refute claims and 67% of the support claims, which leads to an aggregated score of 75%. The extracted spans are therefore not only informative to the classifier, but can also be used to produce human-readable justifications for a positive or negative relation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 189, |
| "end": 196, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hyperparameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "This paper has introduced a classifier-agnostic framework that allows fact verification models to improve their performance and robustness, utilizing concise spans of the available evidence sentences. The experiments have shown that the extracted spans are indeed informative for the final classifier, supporting the usefulness of the framework. Furthermore, this work opens the possibility of providing to the human users a justification for the model's predictions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In In NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Syntactic stylometry for deception detection", |
| "authors": [ |
| { |
| "first": "Song", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Ritwik", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Song Feng, Ritwik Banerjee, and Yejin Choi. 2012. Syntactic stylometry for deception detection. In ACL.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Emergent: a novel data-set for stance classification", |
| "authors": [ |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Ferreira", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William Ferreira and Andreas Vlachos. 2016. Emer- gent: a novel data-set for stance classification. In HLT-NAACL.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Attention is not explanation", |
| "authors": [ |
| { |
| "first": "Sarthak", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| }, |
| { |
| "first": "Byron", |
| "middle": [ |
| "C" |
| ], |
| "last": "Wallace", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sarthak Jain and Byron C. Wallace. 2019. Attention is not explanation. In NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Rationalizing neural predictions", |
| "authors": [ |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Lei", |
| "suffix": "" |
| }, |
| { |
| "first": "Regina", |
| "middle": [], |
| "last": "Barzilay", |
| "suffix": "" |
| }, |
| { |
| "first": "Tommi", |
| "middle": [], |
| "last": "Jaakkola", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "107--117", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1011" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tao Lei, Regina Barzilay, and Tommi Jaakkola. 2016. Rationalizing neural predictions. In Proceedings of the 2016 Conference on Empirical Methods in Nat- ural Language Processing, pages 107-117, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Roberta: A robustly optimized bert pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke S. Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. ArXiv, abs/1907.11692.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Fake news detection through multi-perspective speaker profiles", |
| "authors": [ |
| { |
| "first": "Yunfei", |
| "middle": [], |
| "last": "Long", |
| "suffix": "" |
| }, |
| { |
| "first": "Qin", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rong", |
| "middle": [], |
| "last": "Xiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Minglei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Chu-Ren", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yunfei Long, Qin Lu, Rong Xiang, Minglei Li, and Chu-Ren Huang. 2017. Fake news detection through multi-perspective speaker profiles. In IJC- NLP.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Combining fact extraction and verification with neural semantic matching networks", |
| "authors": [ |
| { |
| "first": "Yixin", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| }, |
| { |
| "first": "Haonan", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yixin Nie, Haonan Chen, and Mohit Bansal. 2019. Combining fact extraction and verification with neu- ral semantic matching networks. In In AAAI.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Experiments in open domain deception detection", |
| "authors": [ |
| { |
| "first": "Ver\u00f3nica", |
| "middle": [], |
| "last": "P\u00e9rez", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Rosas", |
| "suffix": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ver\u00f3nica P\u00e9rez-Rosas and Rada Mihalcea. 2015. Ex- periments in open domain deception detection. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Towards debiasing fact verification models", |
| "authors": [ |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Darsh", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "Yun Jie Serene", |
| "middle": [], |
| "last": "Yeo", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Roberto Filizzola", |
| "suffix": "" |
| }, |
| { |
| "first": "Enrico", |
| "middle": [], |
| "last": "Ortiz", |
| "suffix": "" |
| }, |
| { |
| "first": "Regina", |
| "middle": [], |
| "last": "Santus", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Barzilay", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP-IJCNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1341" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tal Schuster, Darsh Shah, Yun Jie Serene Yeo, Daniel Roberto Filizzola Ortiz, Enrico Santus, and Regina Barzilay. 2019. Towards debiasing fact verification models. In In EMNLP-IJCNLP.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Bidirectional attention flow for machine comprehension", |
| "authors": [ |
| { |
| "first": "Min Joon", |
| "middle": [], |
| "last": "Seo", |
| "suffix": "" |
| }, |
| { |
| "first": "Aniruddha", |
| "middle": [], |
| "last": "Kembhavi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ali", |
| "middle": [], |
| "last": "Farhadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Min Joon Seo, Aniruddha Kembhavi, Ali Farhadi, and Hannaneh Hajishirzi. 2016a. Bidirectional at- tention flow for machine comprehension. CoRR, abs/1611.01603.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Bidirectional attention flow for machine comprehension", |
| "authors": [ |
| { |
| "first": "Minjoon", |
| "middle": [], |
| "last": "Seo", |
| "suffix": "" |
| }, |
| { |
| "first": "Aniruddha", |
| "middle": [], |
| "last": "Kembhavi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ali", |
| "middle": [], |
| "last": "Farhadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, and Hannaneh Hajishirzi. 2016b. Bidirectional atten- tion flow for machine comprehension. ArXiv, abs/1611.01603.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Automatic fact-guided sentence modification", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Darsh", |
| "suffix": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "Regina", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Barzilay", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Darsh J Shah, Tal Schuster, and Regina Barzilay. 2020. Automatic fact-guided sentence modification. In In AAAI.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Bert for evidence retrieval and claim verification", |
| "authors": [ |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Soleimani", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcel", |
| "middle": [], |
| "last": "Worring", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amir Soleimani, Christof Monz, and Marcel Worring. 2019. Bert for evidence retrieval and claim verifica- tion.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "FEVER: a large-scale dataset for fact extraction and verification", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Thorne", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Thorne, Andreas Vlachos, Christos Christodoulopoulos, and Arpit Mittal. 2018. FEVER: a large-scale dataset for fact extraction and verification. In In NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "AttentiveChecker: A bi-directional attention flow mechanism for fact verification", |
| "authors": [ |
| { |
| "first": "Santosh", |
| "middle": [], |
| "last": "Tokala", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Vishal", |
| "suffix": "" |
| }, |
| { |
| "first": "Avirup", |
| "middle": [], |
| "last": "Saha", |
| "suffix": "" |
| }, |
| { |
| "first": "Niloy", |
| "middle": [], |
| "last": "Ganguly", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1230" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Santosh Tokala, Vishal G, Avirup Saha, and Niloy Gan- guly. 2019. AttentiveChecker: A bi-directional at- tention flow mechanism for fact verification. In In NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Fact checking: Task definition and dataset construction", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "LTCSS@ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Vlachos and Sebastian Riedel. 2014. Fact checking: Task definition and dataset construction. In LTCSS@ACL.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "liar, liar pants on fire\": A new benchmark dataset for fake news detection", |
| "authors": [ |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wang", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William Yang Wang. 2017. \"liar, liar pants on fire\": A new benchmark dataset for fake news detection. In ACL.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A broad-coverage challenge corpus for sentence understanding through inference", |
| "authors": [ |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Nangia", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adina Williams, Nikita Nangia, and Samuel Bow- man. 2018. A broad-coverage challenge corpus for sentence understanding through inference. In In NAACL-HLT.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Examples of claim-evidence pairs from the FEVER dataset. The evidence spans extracted by our system are underlined and presented in color." |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td>Claim</td></tr></table>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "ClaimSusanSarandon was nominated for five Emmy Awards. Evidence [wiki/Susan Sarandon] On television, she is a five-time Emmy Award nominee, including for her guest roles on the sitcoms Friends 2001 and Malcolm in the Middle (2002), and the TV films Bernard and Doris (2007) and You Don't Know Jack (2010). Label SUPPORT" |
| }, |
| "TABREF2": { |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "type_str": "table", |
| "text": "Accuracy of the models on the FEVER and the SYMMETRIC datasets. Results for BERT are the average over 5 runs with the same hyperparameters. Significance: * if p < 0.1, ** if p < 0.05." |
| } |
| } |
| } |
| } |