| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:42:12.536701Z" |
| }, |
| "title": "Language Models as Fact Checkers?", |
| "authors": [ |
| { |
| "first": "Nayeon", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Hong Kong University of Science and Technology", |
| "location": {} |
| }, |
| "email": "nayeon.lee@connect.ust.hk" |
| }, |
| { |
| "first": "Belinda", |
| "middle": [ |
| "Z" |
| ], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "belindali@fb.com" |
| }, |
| { |
| "first": "Sinong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "sinongwang@fb.com" |
| }, |
| { |
| "first": "Wen-Tau", |
| "middle": [], |
| "last": "Yih", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "haom@fb.com" |
| }, |
| { |
| "first": "Madian", |
| "middle": [], |
| "last": "Khabsa", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "mkhabsa@fb.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Recent work has suggested that language models (LMs) store both common-sense and factual knowledge learned from pre-training data. In this paper, we leverage this implicit knowledge to create an effective end-to-end fact checker using a solely a language model, without any external knowledge or explicit retrieval components. While previous work on extracting knowledge from LMs have focused on the task of open-domain question answering, to the best of our knowledge, this is the first work to examine the use of language models as fact checkers. In a closed-book setting, we show that our zero-shot LM approach outperforms a random baseline on the standard FEVER task, and that our finetuned LM compares favorably with standard baselines. Though we do not ultimately outperform methods which use explicit knowledge bases, we believe our exploration shows that this method is viable and has much room for exploration. * Work done while at Facebook AI. (a) Traditional fact-checking pipeline.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Recent work has suggested that language models (LMs) store both common-sense and factual knowledge learned from pre-training data. In this paper, we leverage this implicit knowledge to create an effective end-to-end fact checker using a solely a language model, without any external knowledge or explicit retrieval components. While previous work on extracting knowledge from LMs have focused on the task of open-domain question answering, to the best of our knowledge, this is the first work to examine the use of language models as fact checkers. In a closed-book setting, we show that our zero-shot LM approach outperforms a random baseline on the standard FEVER task, and that our finetuned LM compares favorably with standard baselines. Though we do not ultimately outperform methods which use explicit knowledge bases, we believe our exploration shows that this method is viable and has much room for exploration. * Work done while at Facebook AI. (a) Traditional fact-checking pipeline.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Pre-trained language models have recently lead to significant advancements in wide variety of NLP tasks, including question-answering, commonsense reasoning, and semantic relatedness (Devlin et al., 2018; Radford et al., 2019; Radford et al., 2018) . These models are typically trained on documents mined from Wikipedia (among other websites). Recently, a number of works have found that LMs store a surprising amount of world knowledge, focusing particularly on the task of open-domain question answering (Petroni et al., 2019; Roberts et al., 2020) . In this paper, we explore whether we can leverage the knowledge in LMs for fact checking.", |
| "cite_spans": [ |
| { |
| "start": 183, |
| "end": 204, |
| "text": "(Devlin et al., 2018;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 205, |
| "end": 226, |
| "text": "Radford et al., 2019;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 227, |
| "end": 248, |
| "text": "Radford et al., 2018)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 506, |
| "end": 528, |
| "text": "(Petroni et al., 2019;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 529, |
| "end": 550, |
| "text": "Roberts et al., 2020)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We propose an approach (Fig. 1b ) that replaces the document retriever and evidence selector models in traditional fact-checking ( Fig. 1a ) with a single language model that generates masked tokens. This offers a number of advantages over the traditional approach: first, the procedure is overall simpler, requiring fewer resources and computation -we do not need to maintain an explicit knowledge base external to our LM, and we do not need an explicit retrieval step. The latter in particular can lead to a huge speedup in the system, since we can skip the time-consuming step of searching over a potentially massive space of documents. Second, LMs are widely-available and are currently attracting significant research effort. Thus, research in language-modeling, particularly in improving LMs ability to memorizing knowledge, may also improve the overall effectiveness of our fact-checking pipeline. Lastly, our system further shifts the paradigm towards \"one model for all\" -LMs have been used for a wide variety of tasks, and now also for fact checking.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 23, |
| "end": 31, |
| "text": "(Fig. 1b", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 131, |
| "end": 138, |
| "text": "Fig. 1a", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In order to determine the feasibility of our approach, we start with a human review study where participants are given a claim from FEVER (Thorne et al., 2018a) , and are asked to validate the claim using only a BERT language model. We found that users had reasonable success in determining claim validity. Empowered by the results, we design an end-to-end neural approach for utilizing BERT as a fact checker (see Figure 1b) . At a high level, we first generate an evidence sentence by masking the claim and using BERT to \"fill in\" the mask. We then feed the generated sentence, alongside the original claim, to a verification classifier model that classifies whether the claim is supported, refuted, or the information is insufficient to make a call.", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 160, |
| "text": "(Thorne et al., 2018a)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 415, |
| "end": 425, |
| "text": "Figure 1b)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The rest of the paper is organized as such: Section 2 gives an overview of the problem space. Section 3 describes our preliminary experiments. Sections 4 and 5 highlights our main methods (i.e. end-to-end model, experimental setup), and 6 reports our main results. Sections 7 and 8 conclude our paper with a discussion and future works.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Task The main goal of fact-checking is to validate the truthfulness of a given claim. Each claim is assigned one of three labels: support, refute, or not enough information (NEI) to verify.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Dataset We use FEVER (Thorne et al., 2018a) , a large-scale fact-checking dataset with around 5.4M Wikipedia documents. Claims were generated by extracting sentences from Wikipedia (with possible mutations), and were annotated by humans with their verification label and/or evidence sentences from Wikipedia.", |
| "cite_spans": [ |
| { |
| "start": 21, |
| "end": 43, |
| "text": "(Thorne et al., 2018a)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Traditional pipeline Traditional fact-checking systems ( Fig. 1a) access knowledge within an external knowledge base (i.e. Wikipedia) to validate a claim. They use a multi-step, pipelined approach, which involve IR-modules, such as document retrievers and evidence selectors, for retrieving the appropriate evidence, and verification modules that take in {claim, [evidences]} pairs and predict a final verification label Our pipeline As shown in Fig.1b , our proposed pipeline replaces both the external knowledge base as well as the IR modules with a pretrained language model. In the remainder of this paper, we utilize BERT. Future work can explore other language models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 57, |
| "end": 65, |
| "text": "Fig. 1a)", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 446, |
| "end": 452, |
| "text": "Fig.1b", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Querying the Language Model In Petroni et al. (2019) , language models were used as knowledge base to answer open-domain questions. To do this, the authors devised a probe known as \"LAMA\", which generates fill-in-the-blank cloze-style statements from questions. For example, in order to answer the question 'Where is Microsoft's headquarter?', the question would be rewritten as as 'Microsoft's headquarter is in [MASK] ' and fed into a language model for the answer.", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 52, |
| "text": "Petroni et al. (2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 413, |
| "end": 419, |
| "text": "[MASK]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Inspired by LAMA (Petroni et al., 2019) , we also generate evidences from language models through fill-in-the-blank style tasks.", |
| "cite_spans": [ |
| { |
| "start": 17, |
| "end": 39, |
| "text": "(Petroni et al., 2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In order to determine the feasibility of our approach, we began by conducting a human review study on 50 random-selected claims from FEVER (Thorne et al., 2018a) . Participants were asked to validate each claim with only a language model, by following these steps:", |
| "cite_spans": [ |
| { |
| "start": 139, |
| "end": 161, |
| "text": "(Thorne et al., 2018a)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploratory Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "1. Mask a token from the claim, depending on component of the claim we wish to verify: Thomas Jefferson founded the University of Virginia after retiring \u2192 Thomas Jefferson founded the University of [MASK] after retiring.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploratory Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this example, the user is verifying which university was founded by Thomas Jefferson. Note that the user could alternatively choose to mask Thomas Jefferson in order to verify the founder of University of Virginia.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploratory Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "2. Get the top-1 predicted token from the LM.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploratory Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Top-1 predicted token = Virginia.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploratory Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "3. If predicted token matches the masked token, the claim is supported, otherwise it is refuted.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploratory Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Virginia \u2261 Virginia \u2192 SUPPORTS", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploratory Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In other words, we asked participants to serve as the \"masking\" and \"verification classifier\" components of our fact-checking pipeline in Fig. 1b .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 138, |
| "end": 145, |
| "text": "Fig. 1b", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Exploratory Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Two participants examined the 50 claims, and eventually achieved an average accuracy of 55%. 1 We also conducted this zero-shot study on a larger scale and in a more systematic way, by taking all claims in the full FEVER dataset, and always masking the last token. 2 Otherwise, we preserve steps 2 and 3 from above. Even with this na\u00efve token-matching approach, we were able to obtain precision 56% and F1 59% for the positive label (SUPPORT).", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 94, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploratory Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our preliminary experiments' results illustrate that, with a good masking mechanism and verification model, language models can indeed feasibly be used for fact-checking.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Exploratory Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Enlightened by results from our preliminary experiments, we devise an end-to-end model that automates and improve upon the masking and verification steps that were conducted by humans. Specifically, we resolve two limitations: 1. manual masking of claims, and 2. na\u00efve validation of the predicted token that fails to deal with synonyms and other semantic variants of the answer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "End-to-End Fact-Checking Model", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We mask the last named entity in the claim, which we identify using an offthe-shelf Named-Entity-Recognition (NER) model from spaCy Honnibal and Montani (2017) . In particular, we choose to mask named entities in order to better ensure that the token we mask actually makes use of the knowledge encoded in language models. (Otherwise, we may mask tokens that only make use of the LM's ability to recover linguistic structures and syntax -for instance, masking stopwords). This hinges on the observation that, for most claims, its factuality hinges upon the correctness of its entities (and the possible relations between them), and not on how specifically the claim is phrased.", |
| "cite_spans": [ |
| { |
| "start": 132, |
| "end": 159, |
| "text": "Honnibal and Montani (2017)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Masking", |
| "sec_num": null |
| }, |
| { |
| "text": "Verification using Entailment To move beyond na\u00efvely matching predicted and gold tokens, we leverage a textual entailment model from Al-lenNLP to validate our LM predictions. Note that textual entailment models predict the directional truth relation between a text pair (i.e. \"sentence t entails h\" if, typically, a human reading t would infer that h is most likely true).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Masking", |
| "sec_num": null |
| }, |
| { |
| "text": "Full-pipeline steps Detailed steps for our end-toend model (Fig. 2) are as follows:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 59, |
| "end": 67, |
| "text": "(Fig. 2)", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Automatic Masking", |
| "sec_num": null |
| }, |
| { |
| "text": "1. Masked the last named entity found by the NER model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Masking", |
| "sec_num": null |
| }, |
| { |
| "text": "2. Get the top-1 predicted token from the LM, and fill in the [MASK] accordingly to create the \"evidence\" sentence. 3. Using the claim and generated \"evidence\" sentence, obtain entailment \"features\" using outputs from the last layer of the pretrained entailment model (before the softmax).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Masking", |
| "sec_num": null |
| }, |
| { |
| "text": "4. Input the entailment features into a multi-layer perceptron (MLP) for final fact-verification prediction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Automatic Masking", |
| "sec_num": null |
| }, |
| { |
| "text": "We conduct our experiments on the FEVER claim verification dataset (Thorne et al., 2018a) using the standard provided splits. We use the publicly available 24-layer BERT-Large as our language model, which was pre-trained on Wikipedia in 2018. 3 The MLP was optimized using Adam, and trained with a mini-batch size of 32. The learning rate was set to 0.001 with max epoch size 200 and epoch patience of 30. The embedding size of the entailment features (from the pre-trained entailment model) was 400, and our MLP classifier had hidden size of 100.", |
| "cite_spans": [ |
| { |
| "start": 67, |
| "end": 89, |
| "text": "(Thorne et al., 2018a)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 243, |
| "end": 244, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment setup", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The traditional pipeline was evaluated using FEVER scoring, which is a stricter form of scoring that treats predictions to be correct only when correct evidences were retrieved. Since our pipeline does not utilize an external knowledge base, and does not have an evidence retriever, we only examine the correctness of the final verification step using precision, recall, F1 and accuracy. We leave generating evidences with language models for future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation Metric", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We introduce two language model baselines for comparison. The first baseline, BERT f reeze , uses an MLP layer on top of a frozen BERT encoder to make predictions (gradients backpropagate to the MLP layer only). In this baseline, we aim to extract the already stored knowledge within BERT model as an embedding vector, and avoid finetuning the internal layers, in order to disentangle BERT's knowledge from it's ability to serve as a high-capacity classifier.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The second baseline, BERT f inetune , allows all the model layers to be updated based on the factverification loss from the MLP layer. This baseline captures BERT's ability as both a language model, and a high-capacity text encoder.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Note that the dataset is evenly distributed among the three classes, therefore a random baseline would yield an accuracy of 33%. Also note that the Fever-baseline model introduced by the task organizers achieves accuracy score of 48.8% (Thorne et al., 2018b) .", |
| "cite_spans": [ |
| { |
| "start": 236, |
| "end": 258, |
| "text": "(Thorne et al., 2018b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The results of the three models are reported in Table 1. We observe that our proposed approach (BERT asKB) outperforms BERT f reeze on all metrics suggesting that querying language models in QA style is a better approach for extracting their encoded knowledge. Similarly, BERT asKB model achieves an accuracy score of 49% which is comparable to Fever-baseline at 48.8%, except without the need for explicit document retrieval and evidence selection. This suggests that language models, used as sources of knowledge for fact checking, are at least as effective as standard baselines. However, there is still much room for future research, as the state-of-the-art model on the Fever shared task achieves an accuracy score of 68.21% (Thorne et al., 2018b) .", |
| "cite_spans": [ |
| { |
| "start": 730, |
| "end": 752, |
| "text": "(Thorne et al., 2018b)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "On the other hand, we find that BERT asKB lags behind BERT f inetune , as expected, on most metrics. We hypothesize this is due to the high capacity of the model, in comparison, and to the effectiveness of BERT models in text classification. Upon examining the results of these two models closely, we find that BERT asKB struggles mightily with the NEI category (F1 score of 0.24 vs 0.53) indicating that our current approach might need specific modules to better tackle that category. As both models seem to be equally adept in identifying the support class (0.57 vs 0.59 F1), indicating that BERT asKB is unable to distinguish between refute and NEI classes. Future work can further investigate techniques to identify these two categories.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Interestingly, the BERT f reeze achieves an accuracy score of 38% which is slightly better than a random baseline which achieves 33%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In this section, we provide some examples of tokens predicted from BERT to understand the performance of \"evidence generation\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Token Prediction Results", |
| "sec_num": "7" |
| }, |
| { |
| "text": "First two examples in Table 2 (a, b) are examples with correct fact-check labels from zeroshot setting. When a claim has enough context, and contains rather rare names such as \"Sarawak\", BERT manages to predict correct tokens. We also provide detailed analysis on the error cases to facilitate future work in making further improvements:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 22, |
| "end": 29, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis of Token Prediction Results", |
| "sec_num": "7" |
| }, |
| { |
| "text": "\u2022 One common form of errors is that, the entity type of token prediction is biased towards the way how the training data was written. For example, sentence c from Table 2 illustrates a common claim structure in FEVER dataset which talks about the birth-year of a person (e.g., Tim Roth). However, 100% of our test samples with such structure always predict city/country (e.g., London). The reason is, in Wikipedia, the birth-years are always written in the following structure \"PER-SON (born DATE)\" (e.g., \"Tim Roth (born 14 May 1961)\"), and birth city/country written in \"PERSON was born in city/country\" structure (e.g., \"Roth was born in Dulwich, London\"). Therefore, to obtain birth-year, the claim had to be written as Tim Roth (born [MASK]) to predict correctly.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 163, |
| "end": 170, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis of Token Prediction Results", |
| "sec_num": "7" |
| }, |
| { |
| "text": "\u2022 Sentence d is another example that the entity type of token prediction is hard to control. \"is a...\" is a very general prefix phrase, making it hard for BERT model to correctly predict correct entity type.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Token Prediction Results", |
| "sec_num": "7" |
| }, |
| { |
| "text": "\u2022 There are lots of short claims in FEVER test set (approx. 1100 samples) which has less than 5 tokens (e.g. sentence e). Since there is very little context, BERT struggles to predict correctly.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Token Prediction Results", |
| "sec_num": "7" |
| }, |
| { |
| "text": "One of the the main insight we get from these analysis is that, the way the language model is initially pre-trained, greatly determines the way it should be \"queried\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Token Prediction Results", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In this paper, we explored a new fact-checking pipeline that use language models as knowledge bases. Unlike previous pipelines that required dedicated components for document retrieval and sentence scoring, our approach simply translates a given claim into a fill-in-the-blank type query and relies on a BERT language model to generate the \"evidence\". Our experiment shows that this approach is comparable to the standard baselines on the FEVER dataset, though not enough to beat the state-of-the-art using the traditional pipeline. However, we believe our approach has strong potential for improvement, and future work can explore using stronger models for generating evidences, or improving the way how we mask claims.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions & Future Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "In the future, we will investigate sequence-tosequence language models such as BART or T5 (Raffel et al., 2019) , that have recently shown to be effective on generative questionanswering (Roberts et al., 2020) . Similarly, our proposed approach seem to struggle with correctly identifying NEI cases, and we plan to investigate adding specific modules to deal with NEI. Lastly, we plan to explore new ways of pre-training language models to better store and encode knowledge.", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 111, |
| "text": "(Raffel et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 187, |
| "end": 209, |
| "text": "(Roberts et al., 2020)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions & Future Work", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Both participants had NLP background, and both were familiar with FEVER and the fact-checking task. We also assumed both participants were capable of selecting the optimal position to mask.2 We omit examples for which the masked token is not in BERT's vocab.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "It's possible the model was trained on a later Wikipedia dump than what's released as part of FEVER, but pre-training BERT from scratch is beyond the scope of this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank Fabio Petroni for the helpful discussion and inspiration.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Allennlp: A deep semantic natural language processing platform", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Grus", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Oyvind", |
| "middle": [], |
| "last": "Tafjord", |
| "suffix": "" |
| }, |
| { |
| "first": "Pradeep", |
| "middle": [], |
| "last": "Dasigi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nelson", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Schmitz", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1803.07640" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson Liu, Matthew Pe- ters, Michael Schmitz, and Luke Zettlemoyer. 2018. Allennlp: A deep semantic natural language process- ing platform. arXiv preprint arXiv:1803.07640.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "spacy 2: Natural language understanding with bloom embeddings, convolutional neural networks and incremental parsing", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Honnibal", |
| "suffix": "" |
| }, |
| { |
| "first": "Ines", |
| "middle": [], |
| "last": "Montani", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "7", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Honnibal and Ines Montani. 2017. spacy 2: Natural language understanding with bloom embed- dings, convolutional neural networks and incremen- tal parsing. To appear, 7(1).", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal ; Abdelrahman Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Ves", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1910.13461" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov, and Luke Zettlemoyer. 2019. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1802.05365" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. arXiv preprint arXiv:1802.05365.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Language models as knowledge bases? arXiv preprint", |
| "authors": [ |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Petroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rockt\u00e4schel", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Anton", |
| "middle": [], |
| "last": "Bakhtin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuxiang", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "H" |
| ], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.01066" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fabio Petroni, Tim Rockt\u00e4schel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, Alexander H Miller, and Se- bastian Riedel. 2019. Language models as knowl- edge bases? arXiv preprint arXiv:1909.01066.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Improving language understanding by generative pre-training", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Karthik", |
| "middle": [], |
| "last": "Narasimhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Salimans", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language understanding by generative pre-training. URL https://s3-us-west-2. amazonaws. com/openai- assets/researchcovers/languageunsupervised/language understanding paper. pdf.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Language models are unsupervised multitask learners", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "OpenAI Blog", |
| "volume": "1", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI Blog, 1(8):9.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
| "authors": [ |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Raffel", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharan", |
| "middle": [], |
| "last": "Narang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Matena", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanqi", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter J", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1910.10683" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2019. Exploring the limits of transfer learning with a unified text-to-text trans- former. arXiv preprint arXiv:1910.10683.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "How much knowledge can you pack into the parameters of a language model", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Raffel", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2002.08910" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Roberts, Colin Raffel, and Noam Shazeer. 2020. How much knowledge can you pack into the pa- rameters of a language model? arXiv preprint arXiv:2002.08910.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Christos Christodoulopoulos, and Arpit Mittal", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Thorne", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Fever: a large-scale dataset for fact extraction and verification", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1803.05355" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Thorne, Andreas Vlachos, Christos Christodoulopoulos, and Arpit Mittal. 2018a. Fever: a large-scale dataset for fact extraction and verification. arXiv preprint arXiv:1803.05355.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Christos Christodoulopoulos, and Arpit Mittal. 2018b. The fact extraction and verification (fever) shared task", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Thorne", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| }, |
| { |
| "first": "Oana", |
| "middle": [], |
| "last": "Cocarascu", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1811.10971" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Thorne, Andreas Vlachos, Oana Cocarascu, Christos Christodoulopoulos, and Arpit Mittal. 2018b. The fact extraction and verification (fever) shared task. arXiv preprint arXiv:1811.10971.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "text": "Traditional fact-checking pipeline (left) vs. Our LM-based pipeline (right)", |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "text": "Detailed illustration of our pipeline", |
| "type_str": "figure", |
| "num": null |
| }, |
| "TABREF1": { |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Performance comparison between BERT-as-encoder models (BERT f reeze , BERT f inetune ) and BERTas-LM model (BERT asKB) (*We report fact-checking label accuracy, not FEVER score -a stricter form of scoring", |
| "html": null |
| }, |
| "TABREF3": { |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Examples of token predictions from BERT in zeroshot setting. a, b are correctly fact-checked examples, and c, d, f are wrongly fact-checked examples.", |
| "html": null |
| } |
| } |
| } |
| } |