| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:42:15.577304Z" |
| }, |
| "title": "Modeling Entity Knowledge for Fact Verification", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Cognitive Services Research", |
| "location": { |
| "addrLine": "1 Microsoft Way", |
| "settlement": "Redmond", |
| "region": "WA", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Chenguang", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Cognitive Services Research", |
| "location": { |
| "addrLine": "1 Microsoft Way", |
| "settlement": "Redmond", |
| "region": "WA", |
| "country": "USA" |
| } |
| }, |
| "email": "chezhu@microsoft.com" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Zeng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Cognitive Services Research", |
| "location": { |
| "addrLine": "1 Microsoft Way", |
| "settlement": "Redmond", |
| "region": "WA", |
| "country": "USA" |
| } |
| }, |
| "email": "nzeng@microsoft.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Fact verification is a challenging task of identifying the truthfulness of given claims based on the retrieval of relevant evidence texts. Many claims require understanding and reasoning over external entity information for precise verification. In this paper, we propose a novel fact verification model using entity knowledge to enhance its performance. We retrieve descriptive text from Wikipedia for each entity, and then encode these descriptions by a smaller lightweight network to be fed into the main verification model. Furthermore, we boost model performance by adopting and predicting the relatedness between the claim and each evidence as additional signals. We demonstrate experimentally on a large-scale benchmark dataset FEVER that our framework achieves competitive results with a FEVER score of 72.89% on the test set.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Fact verification is a challenging task of identifying the truthfulness of given claims based on the retrieval of relevant evidence texts. Many claims require understanding and reasoning over external entity information for precise verification. In this paper, we propose a novel fact verification model using entity knowledge to enhance its performance. We retrieve descriptive text from Wikipedia for each entity, and then encode these descriptions by a smaller lightweight network to be fed into the main verification model. Furthermore, we boost model performance by adopting and predicting the relatedness between the claim and each evidence as additional signals. We demonstrate experimentally on a large-scale benchmark dataset FEVER that our framework achieves competitive results with a FEVER score of 72.89% on the test set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The rapid development of online applications provides open and efficient platforms for spreading information. However, false information, including fake news and online rumors, have also been growing and spreading widely over the past several years. Vosoughi et al. (2018) shows that false news travels even faster, deeper and broader than the truth. To prevent harm from this false information, automatically verifying the truthfulness of textual contents is becoming an urgent need for our society. In this work, we study fact verification with the goal of automatically assessing the veracity of a textual claim given supporting evidence.", |
| "cite_spans": [ |
| { |
| "start": 250, |
| "end": 272, |
| "text": "Vosoughi et al. (2018)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Most existing methods consider fact verification as a natural language inference task (Angeli and Manning, 2014) . Usually, these systems concatenate claim and its supporting evidence sentences, and then feed them into a classification model (Nie et al., 2019) . Alternatively, previous studies construct graph structures based on claim and evidence, and reason over this graph with graph neural networks (Zhou et al., 2019; Liu et al., 2020) or Transformer models (Zhong et al., 2020) , which are used in top systems in the FEVER challenge (Thorne et al., 2018) . While these studies focus on reasoning based on claim and evidence text, we believe entity knowledge is also important for precise fact verification. For example, given the first claim from FEVER dataset in Table 1 , making the correct verification requires a model to understand what is \"Wii U\" and \"OS X\" and know the fact that they are not Microsoft and Sony platforms. Similarly, for the second claim, the knowledge that \"New York City\" is in United States can also be potentially useful for verifying the claim. This information is not included in the gold evidence provided by the dataset.", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 112, |
| "text": "(Angeli and Manning, 2014)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 242, |
| "end": 260, |
| "text": "(Nie et al., 2019)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 405, |
| "end": 424, |
| "text": "(Zhou et al., 2019;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 425, |
| "end": 442, |
| "text": "Liu et al., 2020)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 465, |
| "end": 485, |
| "text": "(Zhong et al., 2020)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 541, |
| "end": 562, |
| "text": "(Thorne et al., 2018)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 772, |
| "end": 779, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we present a fact verification model that can effectively incorporate external entity information. Given a claim and its evidence sentences, we first recognize named entities from them, linking them with Wikipedia articles, and then retrieve the lead sections of these articles as the entity descriptions. To make the most of this entity knowledge while not introducing noisy information, we propose a lightweight entity knowledge encoder module for representing external entity knowledge. Our large fact verification network then accesses this knowledge by a unidirectional attention mechanism at each encoding layer. Meanwhile, since the input evidence sentences are obtained by an upstream retrieval module, some evidence may be irrelevant to the claim. Thus, we predict and adopt this relatedness between each evidence and the claim as an auxiliary signal to train our model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We experiment with our approach on FEVER (Thorne et al., 2018) , one influential benchmark dataset for fact verification. FEVER contains over 185k labeled claims and each verifiable claim is paired with several natural language sentences from Wikipedia as their Claim #1: Assassin's Creed has only ever been released on a Microsoft and Sony platform.", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 62, |
| "text": "(Thorne et al., 2018)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The main video game series consists of nine games , developed by Ubisoft, released on PlayStation 3, PlayStation 4, Xbox 360, Xbox One, Wii U, Microsoft Windows, and OS X platforms . Entity Knowledge: Wii U: The Wii U is a home video game console developed by Nintendo as the successor to the Wii. OS X: macOS (previously Mac OS X and later OS X) is a series of proprietary graphical operating systems developed and marketed by Apple Inc. Verdict: REFUTED Claim #2: Beastie Boys was formed in Australia. Gold Evidence: The Beastie Boys were an American hip hop group from New York City, formed in 1981. Entity Knowledge: New York City: New York City (NYC), often called simply New York, is the most populous city in the United States. Verdict: REFUTED Table 1 : Two motivating examples for fact checking and the FEVER task. Identifying the truthfulness of claims requires understanding and reasoning of entity knowledge within the claim and the evidence sentences. The bold phrases are named entities. Underlined entities are linked to their Wikipedia descriptions, which can potentially provide useful knowledge for verifying the claim. supporting evidence. Our system achieves the state-of-the-art result on label accuracy and competitive result on FEVER score. Ablation study shows that the integration of entity knowledge and auxiliary relatedness signal can effectively improve performance. We then provide a detailed error analysis for our system. In summary, we list our contributions as follows.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 752, |
| "end": 759, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Gold Evidence:", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 We propose to enhance fact verification models with external entity knowledge.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gold Evidence:", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 We design an entity knowledge encoder module and employ unidirectional attention to effectively incorporate entity descriptions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gold Evidence:", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 Empirical results show that our approach achieves competitive performance on the FEVER dataset, and ablation study shows that incorporating entity knowledge is useful for fact verification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gold Evidence:", |
| "sec_num": null |
| }, |
| { |
| "text": "2 Related Work", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gold Evidence:", |
| "sec_num": null |
| }, |
| { |
| "text": "Fact checking is a challenging task aiming to automatically verify the truthfulness of claims. A claim can be a plain text or a triple of (subject, predicate, object) (Nakashole and Mitchell, 2014) , and different fact checking datasets usually provide different evidence sources. Vlachos and Riedel (2014) propose a fact verification dataset by collecting 106 labeled political claims and providing the journalists' analysis material as the evidence. Ferreira and Vlachos (2016) Recently, the FEVER shared task 1.0 (Thorne et al., 2018) attracts attention from the research community. It is a challenge that requires participants to develop automatic fact verification systems to check the truthfulness of human-generated claims by extracted evidence from Wikipedia. Many systems were proposed for this challenging task. Nie et al. (2019) design a Neural Semantic Matching Network that takes the concatenation of all evidence sentences as input. They also propose a two-hop evidence enhancement process where they apply sentence selection twice to retrieve more related evidence sentences. Stammbach and Neumann (2019) propose a two-staged selection process with two different retrieval models for selecting evidence sentences. Yoneda et al. (2018) infer the veracity of each claim-evidence pair and make final prediction by aggregating multiple predicted labels. Hanselowski et al. (2018) encode each claim-evidence pair separately, and use a pooling function to aggregate features for prediction. Zhou et al. (2019) formulates claim verification as a graph reasoning task and propose a new model with graph neural networks. Liu et al. (2020) regards sentences as the nodes of a graph and uses Kernel Graph Attention Network (KGAT) to aggregate information. Zhong et al. (2020) further constructs a semantic-level graph for input claim and evidence and perform reasoning over this graph with pretrained XLNet model .", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 197, |
| "text": "(Nakashole and Mitchell, 2014)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 281, |
| "end": 306, |
| "text": "Vlachos and Riedel (2014)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 452, |
| "end": 479, |
| "text": "Ferreira and Vlachos (2016)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 516, |
| "end": 537, |
| "text": "(Thorne et al., 2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1229, |
| "end": 1249, |
| "text": "Yoneda et al. (2018)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1365, |
| "end": 1390, |
| "text": "Hanselowski et al. (2018)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1500, |
| "end": 1518, |
| "text": "Zhou et al. (2019)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 1627, |
| "end": 1644, |
| "text": "Liu et al. (2020)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fact Verification", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Similar to our work, some previous systems also focus on using entity information for fact verification. Taniguchi et al. (2018) first extract entities from the claim and propose to use a simple entity-linking system based on text match to retrieve evidence documents. Nooralahzadeh and \u00d8vrelid (2018) select evidence documents by finding article titles which contain the entities and noun phrases of the claim.", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 128, |
| "text": "Taniguchi et al. (2018)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 269, |
| "end": 301, |
| "text": "Nooralahzadeh and \u00d8vrelid (2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fact Verification", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "The usage of external knowledge, like WordNet, Wikipedia and knowledge graph, has benefited many natural language processing tasks including natural language inference and fact verification. Jijkoun et al. 2005uses WordNet to measure word similarity to obtain a better textual entailment recognizer. Chen et al. (2018) proposes a neural network model for natural language inference equipped with several external knowledge. finds that utilizing ConceptNet as an external knowledge source can benefit entailment model in scientific domain. Chen et al. (2020b) proposes WIKINLI, a large-scale naturally annotated dataset constructed from Wikipedia category graph. And they show that model pretrained on this dataset can achieve better performance on downstream natural language entailment tasks.", |
| "cite_spans": [ |
| { |
| "start": 300, |
| "end": 318, |
| "text": "Chen et al. (2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 539, |
| "end": 558, |
| "text": "Chen et al. (2020b)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Modeling External Knowledge in NLP", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In this paper, we tackle the large-scale challenge for fact extraction and verification: the FEVER Challenge (Thorne et al., 2018) . It contains 185,445 claims generated by altering sentences extracted from Wikipedia. To verify a claim in FEVER, a model typically follows a three-step pipeline framework, i.e. document retrieval, sentence selection and claim verification. In document retrieval, a system matches the claim to Wikipedia articles by extracted named entities and phrases using a search engine built on Wikipedia. In sentence selection, a system ranks the sentences from retrieved articles by their similarity scores against the claim. The similarity score can be calculated by a trainable regression model, like Enhanced LSTM (Chen et al., 2017) , or pretrained language models like BERT and RoBERTa (Devlin et al., 2019; . In claim verification, a system classifies the truthfulness of the claim based on top-ranked sentences from the previous step, also known as the evidence sentences. Like most participants in this challenge, we adopt existing approaches for document retrieval and sentence selection, while mainly focusing on the claim verification model.", |
| "cite_spans": [ |
| { |
| "start": 109, |
| "end": 130, |
| "text": "(Thorne et al., 2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 740, |
| "end": 759, |
| "text": "(Chen et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 814, |
| "end": 835, |
| "text": "(Devlin et al., 2019;", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "FEVER Challenge", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this section, we first formalize the fact verification problem in Section 4.1 and then introduce our model for incorporating entity knowledge in Section 4.2. Finally, we present our complete solution to the FEVER Challenge including document retrieval, evidence selection and entity description collection in Section 4.3.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Given the input claim and its retrieved evidence sentences, our approach predicts the truthfulness of the claim. As defined in FEVER dataset, we frame the prediction as a three-way classification, i.e. the prediction is 'SUPPORTED', 'REFUTED' or 'NOT ENOUGH INFO (NEI)'. Furthermore, we require the model to predict the relatedness of the evidence sentences as an auxiliary task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Formally, the input to our model is", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "[C, E 1 , E 2 , E 3 , \u2022 \u2022 \u2022 , E n ],", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where C is the claim and E i is the i-th evidence. The evidence sentences are obtained by an upstream retrieval module. The claim and each evidence are composed of a list of tokens:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "C = [w c 1 , w c 2 , \u2022 \u2022 \u2022 , w c |C| ], E i = [w e i 1 , w e i 2 , \u2022 \u2022 \u2022 , w e i |E i | ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The target output is the claim truthfulness label y c . Also, the FEVER dataset provides a relatedness label for each evidence sentence as auxiliary targets, i.e. y e i \u2208 {'RELATED', 'NOT RELATED'}.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Problem Formulation", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The general architecture of our fact verification model is shown in Figure 1 . It is a classification neural network based on RoBERTa , a Transformer-based model (Vaswani et al., 2017) pretrained on large corpora with a masked language modeling objective.", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 184, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 68, |
| "end": 76, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Architecture", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We concatenate the claim with evidences as input to the model. Following the default configuration of RoBERTa, we insert a [CLS] token at the start of the input; the output representation of this token is used to aggregate information from the whole sequence. And we insert a token [SEP] before each evidence sentence as an indicator of sentence boundaries. We use the output vectors of these [SEP] tokens as features for the evidence sentence after it.", |
| "cite_spans": [ |
| { |
| "start": 282, |
| "end": 287, |
| "text": "[SEP]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Architecture", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The modified text is then represented as a sequence of tokens", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Architecture", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "X = [w 1 , w 2 , \u2022 \u2022 \u2022 , w n ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Architecture", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Each token w i is assigned three types of embeddings: token embeddings indicate the meaning of each token, position embeddings indicate the position of each token within the text sequence, and segmentation embeddings are used to discriminate between the claim and the evidence sentences 1 . These three embeddings are summed into a single input vector x i and fed to a bidirectional Transformer with multiple layers:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Architecture", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "H l = LN(H l\u22121 + Att(H l\u22121 , H l\u22121 )) (1) H l = LN(H l + FFN(H l ))", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Model Architecture", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where H 0 = x are the input vectors; LN is the layer normalization operation (Ba et al., 2016) ; Att is the multi-head self-attention operation (Vaswani et al., 2017) ; the superscript l indicates the depth of the stacked layers. On the top layer, RoBERTa generates an output vector for each token with rich contextual information for fact verification.", |
| "cite_spans": [ |
| { |
| "start": 77, |
| "end": 94, |
| "text": "(Ba et al., 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 144, |
| "end": 166, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Architecture", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "As shown in the motivating examples in Table 1 , making the correct prediction needs good understanding and reasoning of the entities in claim and evidence. Thus, we collect entity knowledge from Wikipedia and encode them by a decomposable entity encoder. The result is attended by the previous fact verification module. In the follows, we will first introduce the main fact verification module and then the entity knowledge encoder.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 39, |
| "end": 46, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Architecture", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Suppose that in the output contextual embeddings from RoBERTa, c is the vector for the [CLS] token and e i is the vector for the i-th [SEP] token. To predict the truthfulness of the claim, we apply a three-way softmax classification layer over c:", |
| "cite_spans": [ |
| { |
| "start": 87, |
| "end": 92, |
| "text": "[CLS]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fact Verification Module", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y c = softmax(cW c + b c )", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Fact Verification Module", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "where W c and b c are weight and bias. We adopt the cross entropy loss for claim truthfulness classification against the ground-truth label y c . Since the input evidence sentences are obtained by an upstream retrieval module, some of them may be irrelevant to the claim. Therefore, as an auxiliary training task, we also predict the relatedness of each evidence sentence, which has been shown to be effective in Yin and Roth (2018) .", |
| "cite_spans": [ |
| { |
| "start": 413, |
| "end": 432, |
| "text": "Yin and Roth (2018)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fact Verification Module", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "To do that, we apply a sigmoid classification layer over each e i : y e i = \u03c3(e i W e + b e ) (", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fact Verification Module", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "where W e and b e are weight and bias, and \u03c3 is the sigmoid function. Likewise, we adopt cross entropy loss for this binary classification of evidence relatedness against the ground-truth label y e i . The final loss L for our fact verification module is the weighted summation of the claim loss L c and the evidence loss L e :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fact Verification Module", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "L = \u03bbL c + (1 \u2212 \u03bb)L e (5)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fact Verification Module", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "where \u03bb is searched from {0.1, 0.3, 0.5, 0.7, 0.9} based on model performance on the development set. It is set to 0.5 to achieve the best performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fact Verification Module", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "To augment fact verification model with external entity knowledge, we first identify all named entities in the claim and evidence sentences with an external named entity recognizer. We then link these entities to Wikipedia articles with a trained entity linker (more details in Section 4.3). The lead section of the corresponding Wikipedia article is used as the description of an entity. While a straightforward approach is to append these descriptions to the input claim and evidence, it may lead to two potential issues. First, since entity descriptions are retrieved from Wikipedia articles, they could contain irrelevant noisy information and degrade the model performance. Second, many descriptions are very long and can reduce our model's efficiency in both training and inference. Therefore, we propose a decomposable entity knowledge encoder module to represent this external entity information in a compact semantic space.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposable Entity Knowledge Encoder", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "We denote the fact verification module in Section 4.2.1 as T m . We co-train a lightweight entity knowledge encoder module T e initialized with the distilled RoBERTa-base (Sanh et al., 2019) . Thus, T e has less parameters and fewer layers than T m and the hidden state dimension of T e , i.e. d e , is smaller than that of T m , i.e. d m .", |
| "cite_spans": [ |
| { |
| "start": 171, |
| "end": 190, |
| "text": "(Sanh et al., 2019)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposable Entity Knowledge Encoder", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "We concatenate descriptions of entities in the claim and evidence sentences and feed the concatenated text into T e . We denote the input hidden states to the l-th layer in T m as H l\u22121 m and the input hidden states to the l-th layer in T e as H l\u22121 e . Then, the fact verification module T m employs a unidirectional attention to access outputs from T e to adopt entity knowledge for fact verification. Since T m and T e have different hidden sizes, we first apply a linear transformation to the outputs of T e :\u0125 l\u22121", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposable Entity Knowledge Encoder", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "e i = h l\u22121 e i W l\u22121 e + b l\u22121 e (6)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposable Entity Knowledge Encoder", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "where h l\u22121 e i is the i-th output of the (l \u2212 1)-th layer of T e , and W l\u22121 e \u2208 R de\u00d7dm , b l\u22121 e \u2208 R dm are weight and bias.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposable Entity Knowledge Encoder", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "Then the fact verification module T m conducts unidirectional attention to\u0124 l\u22121 e = {\u0125 l\u22121 e i }, along with its self-attention, to produce the output H l m .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposable Entity Knowledge Encoder", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "H l m =LN(H l\u22121 m + Att(H l\u22121 m , [H l\u22121 m ,\u0124 l\u22121 e ]))", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Decomposable Entity Knowledge Encoder", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "H l m =LN(H l m + FFN(H l m )),", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Decomposable Entity Knowledge Encoder", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "where [ * , * ] indicates the element-wise concatenation of two lists of vectors. And the entity knowledge encoder T e carries out its self-attention as in standard Transformer models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposable Entity Knowledge Encoder", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "H l e =LN(H l\u22121 e + Att(H l\u22121 e , H l\u22121 e )) (9) H l e =LN(H l e + FFN(H l e ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposable Entity Knowledge Encoder", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "Since T m has more encoding layers than T e , the unidirectional attention only works on the lower layers of T m where it has a corresponding layer in T e .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposable Entity Knowledge Encoder", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "In this way, the fact verification module can efficiently reason about the truthfulness of the claim with the compact representations of rich entity information from the entity knowledge encoder.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Decomposable Entity Knowledge Encoder", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "In this section, we introduce our complete solution to the FEVER Challenge of fact verification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Complete Solution", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We adopt the same document retrieval module as in (Hanselowski et al., 2018; Liu et al., 2020) . For a given claim, it first utilizes the constituency parser in AllenNLP (Gardner et al., 2018) to extract all phrases which potentially indicate entities. Then it uses these phrases as queries to find relevant Wikipedia pages through the online MediaWiki API. Then the highestranked results are retrieved and further filtered by a set of rules.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 76, |
| "text": "(Hanselowski et al., 2018;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 77, |
| "end": 94, |
| "text": "Liu et al., 2020)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 170, |
| "end": 192, |
| "text": "(Gardner et al., 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Document Retrieval", |
| "sec_num": null |
| }, |
| { |
| "text": "We use the evidence selection module from Liu et al. (2020) to select related sentences from the retrieved Wikipedia pages. The module consists of a regression model based on BERT to score the claim and evidence sentence pair. For each claim, we use the top 5 ranked sentences as evidence.", |
| "cite_spans": [ |
| { |
| "start": 42, |
| "end": 59, |
| "text": "Liu et al. (2020)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evidence Selection", |
| "sec_num": null |
| }, |
| { |
| "text": "Entity Descriptions We first use Flair (Akbik et al., 2019) as the NER tool to extract entities from input claim and evidence sentences. We then use the entity linking system REL (van Hulst et al., 2020) to link entities to Wikipedia articles, and take the first section of the linked article as the entity description. We limit the length of any entity description to 100 tokens, and the total length of all descriptions for one instance to 512 tokens.", |
| "cite_spans": [ |
| { |
| "start": 39, |
| "end": 59, |
| "text": "(Akbik et al., 2019)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 179, |
| "end": 203, |
| "text": "(van Hulst et al., 2020)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evidence Selection", |
| "sec_num": null |
| }, |
| { |
| "text": "Claim Verification Finally, the claim, evidence sentences and entity descriptions are fed into our model in Section 4.2 to verify the claim's truthfulness.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evidence Selection", |
| "sec_num": null |
| }, |
| { |
| "text": "We evaluate our model on FEVER 1.0 (Thorne et al., 2018) , a large-scale benchmark dataset for fact extraction and verification. Detailed statistics of FEVER are shown in Table 2 . Each instance in FEVER 1.0 consists of a human-written claim, a set of ground-truth evidence sentences from Wikipedia and a label (i.e., 'SUPPORTED', 'REFUTED' or 'NOT ENOUGH INFO'), indicating the truthfulness of claim. FEVER also provides a Wikipedia dump containing 5,416,537 pre-processed articles for machine learning models to select evidence sentences.", |
| "cite_spans": [ |
| { |
| "start": 35, |
| "end": 56, |
| "text": "(Thorne et al., 2018)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 171, |
| "end": 178, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset and Evaluation Metrics", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Models are evaluated by two metrics: label accuracy and FEVER score. Label accuracy measures the accuracy of model's prediction for claim truthfulness. FEVER score considers whether both the predicted claim truthfulness and the selected evidence sentences are correct.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset and Evaluation Metrics", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We implement our model with Huggingface Transformers (Wolf et al., 2020) . The training batch size is set to 32. We use the Adam optimizer (Kingma and Ba, 2014) with a learning rate of 1e-5 and a warm-up proportion of 0.1. The main encoder is initialized with RoBERTa-large. It has 355M parameters with 24 layers and a Transformer hidden size of 1,024. The entity encoder is initialized with distilled RoBERTa-base with 82M parameters, 6 layers and a Transformer hidden size of 768. We train our models for 10 epochs and the model achieving the highest label accuracy Data Split SUPPORTED REFUTED NEI Train 80,035 29,775 35,639 Dev 6,666 6,666 6,666 Test 6,666 6,666 6,666 ", |
| "cite_spans": [ |
| { |
| "start": 53, |
| "end": 72, |
| "text": "(Wolf et al., 2020)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 568, |
| "end": 670, |
| "text": "Data Split SUPPORTED REFUTED NEI Train 80,035 29,775 35,639 Dev 6,666 6,666 6,666 Test 6,666", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Implementation details", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We compare our system to the following topperforming systems on the FEVER shared task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 Athene (Hanselowski et al., 2018 ) models each claim-evidence pair separately and applies a pooling operation for feature aggregation.", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 34, |
| "text": "(Hanselowski et al., 2018", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 UCL MRG (Yoneda et al., 2018) uses Convolutional Neural Network as the encoder for claim and evidence. Label aggregation is used for final prediction.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 31, |
| "text": "(Yoneda et al., 2018)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 UNC NLP (Nie et al., 2019 ) designs a semantic matching neural model for both sentence selection and claim verification.", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 27, |
| "text": "(Nie et al., 2019", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 GEAR (Zhou et al., 2019 ) constructs a graph with each evidence sentence as a node and uses a graph neural network over this graph for prediction.", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 25, |
| "text": "(Zhou et al., 2019", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 DREAM (Zhong et al., 2020) is built upon a graph derived from semantic role labeling and embeds a graph-based module into the pretrained XLNet model.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 28, |
| "text": "(Zhong et al., 2020)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 KGAT (Liu et al., 2020) uses Kernel Graph Attention Network over a graph with evidence sentences as nodes. The model is based on the pretrained RoBERTa-large model.", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 25, |
| "text": "(Liu et al., 2020)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\u2022 LOREN (Chen et al., 2020a) uses design a neural network that can aggregate probabilistic version of the logic rules for fact verification. the table includes systems using pretrained models. The last block includes results from our framework. For a fair comparison with previous systems, we also implemented a version of our model based on BERT-Large. As shown, our approach achieves the best performance on label accuracy and competitive results on FEVER score in both development and test set, proving the effectiveness of our entity knowledge-based approach. We also show that this improvement is consistent across different underlying pretrained models. For instance, our approach of knowledge integration outperforms KGAT when the language understanding model is initialized with either BERT-Large or RoBERTa-Large.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 28, |
| "text": "(Chen et al., 2020a)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Ablation Study To further investigate how our proposed system improves fact verification, we conduct ablation study of different model components. Table 4 presents the result on the development set after removing different components in our model based on RoBERTa-Large.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 147, |
| "end": 154, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "As shown, when the entity knowledge encoder is removed and the entity description is concatenated with the claim and evidence sentences as input, the label accuracy drops 0.5%. This proves the necessity of using a separate module to represent external knowledge. When evidence loss is removed, the label accuracy drops almost 1.3%. When entity description is not used at all, the FEVER score drops 1%. When evidence loss is further removed, the FEVER score drop increases 1.6%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "These results show that our proposed entity descriptions, entity knowledge encoder and evidence loss all contribute to the effectiveness of our model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "To take a deeper investigation into current fact verification systems, we manually analyze 100 randomly selected cases that are incorrectly predicted by our model. We summarize several primary error types in this section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "The first error type is the failure of inference over multiple sentences. About 44% claims in FEVER development set have more than one gold evidence sentences. Identifying the truthfulness of these claims sometimes requires multi-sentence inference. For example, to verify the claim \"Hourglass was released 6 years after New Moon Shine.\", we need to infer over two evidence sentences: \"It built upon the success of his previous effort, New Moon Shine.\" and \"Taylor's first studio album in six years was released in 1997 to glowing notices.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "Another primary error type is mistakes in semantic matching. Given the claim \"Valencia is in a country.\", although model successfully retrieved the gold evidence \"Valencia is the capital of the autonomous community of Valencia and the third largest city in Spain ...\", it still fails to predict it correctly. We believe one possible reason is that model doesn't realize being a city of a country is synonymous with being in a country. This suggests we need more powerful language representation models to tackle fact verification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "The third error type is caused by ambiguity of concepts in the claim. For example, the claim describes \"Bones is a movie.\", and our model predicts its to be true based on retrieved evidence \"Bones is a 2001 American horror film directed ...\". However, there are different definitions of \"Bones\" in Wikipedia and the human annotator was referring to the TV series also named \"Bones\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "5.5" |
| }, |
| { |
| "text": "In this paper, we present a novel framework for fact verification. When assessing the truthfulness of a claim, we first identify the entities within the claim and evidence, and then retrieve external entity descriptions from Wikipedia. We design a decomposable entity knowledge encoder with unidirectional attention for effectively incorporating entity knowledge. Furthermore, we propose to use the prediction of input evidence sentences' relatedness as an auxiliary task. Experimental results show that our model achieves competitive results on the large-scale fact verification dataset FEVER. And we conduct ablation studies to showcase the effectiveness of our proposed components.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "RoBERTa does not use segmentation embeddings in pretraining, but we found it is useful in finetuning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "FLAIR: An easy-to-use framework for state-of-theart NLP", |
| "authors": [ |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Akbik", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanja", |
| "middle": [], |
| "last": "Bergmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Duncan", |
| "middle": [], |
| "last": "Blythe", |
| "suffix": "" |
| }, |
| { |
| "first": "Kashif", |
| "middle": [], |
| "last": "Rasul", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Schweter", |
| "suffix": "" |
| }, |
| { |
| "first": "Roland", |
| "middle": [], |
| "last": "Vollgraf", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", |
| "volume": "", |
| "issue": "", |
| "pages": "54--59", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alan Akbik, Tanja Bergmann, Duncan Blythe, Kashif Rasul, Stefan Schweter, and Roland Vollgraf. 2019. FLAIR: An easy-to-use framework for state-of-the- art NLP. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations), pages 54-59, Minneapolis, Minnesota.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Nat-uralLI: Natural logic inference for common sense reasoning", |
| "authors": [ |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "534--545", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gabor Angeli and Christopher D. Manning. 2014. Nat- uralLI: Natural logic inference for common sense reasoning. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Process- ing (EMNLP), pages 534-545, Doha, Qatar.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Loren: Logic enhanced neural reasoning for fact verification", |
| "authors": [ |
| { |
| "first": "Jiangjie", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiaoben", |
| "middle": [], |
| "last": "Bao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiaze", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Changzhi", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanghua", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2012.13577" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiangjie Chen, Qiaoben Bao, Jiaze Chen, Changzhi Sun, Hao Zhou, Yanghua Xiao, and Lei Li. 2020a. Loren: Logic enhanced neural reasoning for fact ver- ification. arXiv preprint arXiv:2012.13577.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Mining knowledge for natural language inference from Wikipedia categories", |
| "authors": [ |
| { |
| "first": "Mingda", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zewei", |
| "middle": [], |
| "last": "Chu", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Stratos", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mingda Chen, Zewei Chu, Karl Stratos, and Kevin Gimpel. 2020b. Mining knowledge for natural lan- guage inference from Wikipedia categories. In Find- ings of the Association for Computational Linguis- tics: EMNLP 2020, Online.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Neural natural language inference models enhanced with external knowledge", |
| "authors": [ |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian Chen, Xiaodan Zhu, Zhen-Hua Ling, Diana Inkpen, and Si Wei. 2018. Neural natural language inference models enhanced with external knowledge. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), Melbourne, Australia.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Enhanced LSTM for natural language inference", |
| "authors": [ |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Inkpen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1657--1668", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian Chen, Xiaodan Zhu, Zhen-Hua Ling, Si Wei, Hui Jiang, and Diana Inkpen. 2017. Enhanced LSTM for natural language inference. In Proceedings of the 55th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 1657-1668, Vancouver, Canada.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Tabfact: A large-scale dataset for table-based fact verification", |
| "authors": [ |
| { |
| "first": "Wenhu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongmin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianshu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yunkai", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shiyang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiyou", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "Yang" |
| ], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the ICLR Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenhu Chen, Hongmin Wang, Jianshu Chen, Yunkai Zhang, Hong Wang, Shiyang Li, Xiyou Zhou, and William Yang Wang. 2020c. Tabfact: A large-scale dataset for table-based fact verification. In Proceed- ings of the ICLR Conference.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Emergent: a novel data-set for stance classification", |
| "authors": [ |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Ferreira", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William Ferreira and Andreas Vlachos. 2016. Emer- gent: a novel data-set for stance classification. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, San Diego, California.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "AllenNLP: A deep semantic natural language processing platform", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Grus", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Oyvind", |
| "middle": [], |
| "last": "Tafjord", |
| "suffix": "" |
| }, |
| { |
| "first": "Pradeep", |
| "middle": [], |
| "last": "Dasigi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nelson", |
| "middle": [ |
| "F" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Schmitz", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of Workshop for NLP Open Source Software (NLP-OSS)", |
| "volume": "", |
| "issue": "", |
| "pages": "1--6", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Gardner, Joel Grus, Mark Neumann, Oyvind Tafjord, Pradeep Dasigi, Nelson F. Liu, Matthew Pe- ters, Michael Schmitz, and Luke Zettlemoyer. 2018. AllenNLP: A deep semantic natural language pro- cessing platform. In Proceedings of Workshop for NLP Open Source Software (NLP-OSS), pages 1-6, Melbourne, Australia.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "UKP-athene: Multi-sentence textual entailment for claim verification", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Hanselowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zile", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniil", |
| "middle": [], |
| "last": "Sorokin", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Schiller", |
| "suffix": "" |
| }, |
| { |
| "first": "Claudia", |
| "middle": [], |
| "last": "Schulz", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the First Workshop on Fact Extraction and VERification (FEVER)", |
| "volume": "", |
| "issue": "", |
| "pages": "103--108", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Hanselowski, Hao Zhang, Zile Li, Daniil Sorokin, Benjamin Schiller, Claudia Schulz, and Iryna Gurevych. 2018. UKP-athene: Multi-sentence textual entailment for claim verification. In Pro- ceedings of the First Workshop on Fact Extraction and VERification (FEVER), pages 103-108, Brus- sels, Belgium.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Recognizing textual entailment using lexical similarity", |
| "authors": [ |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "Valentin Jijkoun", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "De Rijke", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the PASCAL Challenges Workshop on Recognising Textual Entailment", |
| "volume": "", |
| "issue": "", |
| "pages": "73--76", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Valentin Jijkoun, Maarten de Rijke, et al. 2005. Rec- ognizing textual entailment using lexical similarity. In Proceedings of the PASCAL Challenges Work- shop on Recognising Textual Entailment, pages 73- 76. Citeseer.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the ICLR Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. Proceedings of the ICLR Conference.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Roberta: A robustly optimized BERT pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized BERT pretraining ap- proach. CoRR, abs/1907.11692.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Fine-grained fact verification with kernel graph attention network", |
| "authors": [ |
| { |
| "first": "Zhenghao", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenyan", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "7342--7351", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhenghao Liu, Chenyan Xiong, Maosong Sun, and Zhiyuan Liu. 2020. Fine-grained fact verification with kernel graph attention network. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7342-7351.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Language-aware truth assessment of fact candidates", |
| "authors": [ |
| { |
| "first": "Ndapandula", |
| "middle": [], |
| "last": "Nakashole", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1009--1019", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ndapandula Nakashole and Tom M. Mitchell. 2014. Language-aware truth assessment of fact candidates. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 1009-1019, Baltimore, Maryland.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Combining fact extraction and verification with neural semantic matching networks", |
| "authors": [ |
| { |
| "first": "Yixin", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| }, |
| { |
| "first": "Haonan", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "33", |
| "issue": "", |
| "pages": "6859--6866", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yixin Nie, Haonan Chen, and Mohit Bansal. 2019. Combining fact extraction and verification with neu- ral semantic matching networks. In Proceedings of the AAAI Conference on Artificial Intelligence, vol- ume 33, pages 6859-6866.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Siriusltg: An entity linking approach to fact extraction and verification", |
| "authors": [ |
| { |
| "first": "Farhad", |
| "middle": [], |
| "last": "Nooralahzadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Lilja", |
| "middle": [], |
| "last": "\u00d8vrelid", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the First Workshop on Fact Extraction and VERification (FEVER)", |
| "volume": "", |
| "issue": "", |
| "pages": "119--123", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Farhad Nooralahzadeh and Lilja \u00d8vrelid. 2018. Sirius- ltg: An entity linking approach to fact extraction and verification. In Proceedings of the First Work- shop on Fact Extraction and VERification (FEVER), pages 119-123.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter", |
| "authors": [ |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1910.01108" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Team domlin: Exploiting evidence enhancement for the fever shared task", |
| "authors": [ |
| { |
| "first": "Dominik", |
| "middle": [], |
| "last": "Stammbach", |
| "suffix": "" |
| }, |
| { |
| "first": "Guenter", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Second Workshop on Fact Extraction and VERification (FEVER)", |
| "volume": "", |
| "issue": "", |
| "pages": "105--109", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dominik Stammbach and Guenter Neumann. 2019. Team domlin: Exploiting evidence enhancement for the fever shared task. In Proceedings of the Sec- ond Workshop on Fact Extraction and VERification (FEVER), pages 105-109.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Integrating entity linking and evidence ranking for fact extraction and verification", |
| "authors": [ |
| { |
| "first": "Motoki", |
| "middle": [], |
| "last": "Taniguchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomoki", |
| "middle": [], |
| "last": "Taniguchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Takumi", |
| "middle": [], |
| "last": "Takahashi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yasuhide", |
| "middle": [], |
| "last": "Miura", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomoko", |
| "middle": [], |
| "last": "Ohkuma", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the First Workshop on Fact Extraction and Verification (FEVER)", |
| "volume": "", |
| "issue": "", |
| "pages": "124--126", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Motoki Taniguchi, Tomoki Taniguchi, Takumi Taka- hashi, Yasuhide Miura, and Tomoko Ohkuma. 2018. Integrating entity linking and evidence ranking for fact extraction and verification. In Proceedings of the First Workshop on Fact Extraction and Verifica- tion (FEVER), pages 124-126.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "FEVER: a large-scale dataset for fact extraction and VERification", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Thorne", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| }, |
| { |
| "first": "Christos", |
| "middle": [], |
| "last": "Christodoulopoulos", |
| "suffix": "" |
| }, |
| { |
| "first": "Arpit", |
| "middle": [], |
| "last": "Mittal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "809--819", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Thorne, Andreas Vlachos, Christos Christodoulopoulos, and Arpit Mittal. 2018. FEVER: a large-scale dataset for fact extraction and VERification. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 809-819, New Orleans, Louisiana.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Rel: An entity linker standing on the shoulders of giants", |
| "authors": [ |
| { |
| "first": "Faegheh", |
| "middle": [], |
| "last": "Johannes M Van Hulst", |
| "suffix": "" |
| }, |
| { |
| "first": "Koen", |
| "middle": [], |
| "last": "Hasibi", |
| "suffix": "" |
| }, |
| { |
| "first": "Krisztian", |
| "middle": [], |
| "last": "Dercksen", |
| "suffix": "" |
| }, |
| { |
| "first": "Arjen P De", |
| "middle": [], |
| "last": "Balog", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vries", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "2197--2200", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Johannes M van Hulst, Faegheh Hasibi, Koen Derck- sen, Krisztian Balog, and Arjen P de Vries. 2020. Rel: An entity linker standing on the shoulders of gi- ants. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 2197-2200.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, volume 30.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Fact checking: Task definition and dataset construction", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the ACL 2014 Workshop on Language Technologies and Computational Social Science", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Vlachos and Sebastian Riedel. 2014. Fact checking: Task definition and dataset construction. In Proceedings of the ACL 2014 Workshop on Lan- guage Technologies and Computational Social Sci- ence, Baltimore, MD, USA.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "The spread of true and false news online", |
| "authors": [ |
| { |
| "first": "Soroush", |
| "middle": [], |
| "last": "Vosoughi", |
| "suffix": "" |
| }, |
| { |
| "first": "Deb", |
| "middle": [], |
| "last": "Roy", |
| "suffix": "" |
| }, |
| { |
| "first": "Sinan", |
| "middle": [], |
| "last": "Aral", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Science", |
| "volume": "359", |
| "issue": "6380", |
| "pages": "1146--1151", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Soroush Vosoughi, Deb Roy, and Sinan Aral. 2018. The spread of true and false news online. Science, 359(6380):1146-1151.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "liar, liar pants on fire\": A new benchmark dataset for fake news detection", |
| "authors": [ |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wang", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "422--426", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William Yang Wang. 2017. \"liar, liar pants on fire\": A new benchmark dataset for fake news detection. In Proceedings of the 55th Annual Meeting of the As- sociation for Computational Linguistics (Volume 2: Short Papers), pages 422-426, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Improving natural language inference using external knowledge in the science questions domain", |
| "authors": [ |
| { |
| "first": "Xiaoyan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavan", |
| "middle": [], |
| "last": "Kapanipathi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Musa", |
| "suffix": "" |
| }, |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartik", |
| "middle": [], |
| "last": "Talamadupula", |
| "suffix": "" |
| }, |
| { |
| "first": "Ibrahim", |
| "middle": [], |
| "last": "Abdelaziz", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Achille", |
| "middle": [], |
| "last": "Fokoue", |
| "suffix": "" |
| }, |
| { |
| "first": "Bassem", |
| "middle": [], |
| "last": "Makni", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicholas", |
| "middle": [], |
| "last": "Mattei", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "33", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaoyan Wang, Pavan Kapanipathi, Ryan Musa, Mo Yu, Kartik Talamadupula, Ibrahim Abdelaziz, Maria Chang, Achille Fokoue, Bassem Makni, Nicholas Mattei, et al. 2019. Improving natural lan- guage inference using external knowledge in the sci- ence questions domain. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Transformers: State-of-the-art natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "Remi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Davison", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Shleifer", |
| "suffix": "" |
| }, |
| { |
| "first": "Clara", |
| "middle": [], |
| "last": "Patrick Von Platen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yacine", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Jernite", |
| "suffix": "" |
| }, |
| { |
| "first": "Canwen", |
| "middle": [], |
| "last": "Plu", |
| "suffix": "" |
| }, |
| { |
| "first": "Teven", |
| "middle": [ |
| "Le" |
| ], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sylvain", |
| "middle": [], |
| "last": "Scao", |
| "suffix": "" |
| }, |
| { |
| "first": "Mariama", |
| "middle": [], |
| "last": "Gugger", |
| "suffix": "" |
| }, |
| { |
| "first": "Quentin", |
| "middle": [], |
| "last": "Drame", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Lhoest", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "38--45", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, Remi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Trans- formers: State-of-the-art natural language process- ing. In Proceedings of the 2020 Conference on Em- pirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Russ", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5753--5763", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural in- formation processing systems, pages 5753-5763.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "TwoWingOS: A two-wing optimization strategy for evidential claim verification", |
| "authors": [ |
| { |
| "first": "Wenpeng", |
| "middle": [], |
| "last": "Yin", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "105--114", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenpeng Yin and Dan Roth. 2018. TwoWingOS: A two-wing optimization strategy for evidential claim verification. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Process- ing, pages 105-114, Brussels, Belgium.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Ucl machine reading group: Four factor framework for fact finding (hexaf)", |
| "authors": [ |
| { |
| "first": "Takuma", |
| "middle": [], |
| "last": "Yoneda", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Johannes", |
| "middle": [], |
| "last": "Welbl", |
| "suffix": "" |
| }, |
| { |
| "first": "Pontus", |
| "middle": [], |
| "last": "Stenetorp", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Riedel", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the First Workshop on Fact Extraction and VERification (FEVER)", |
| "volume": "", |
| "issue": "", |
| "pages": "97--102", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Takuma Yoneda, Jeff Mitchell, Johannes Welbl, Pon- tus Stenetorp, and Sebastian Riedel. 2018. Ucl ma- chine reading group: Four factor framework for fact finding (hexaf). In Proceedings of the First Work- shop on Fact Extraction and VERification (FEVER), pages 97-102, Brussels, Belgium.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Reasoning over semantic-level graph for fact checking", |
| "authors": [ |
| { |
| "first": "Wanjun", |
| "middle": [], |
| "last": "Zhong", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingjing", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Duyu", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zenan", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiahai", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Yin", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "6170--6180", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wanjun Zhong, Jingjing Xu, Duyu Tang, Zenan Xu, Nan Duan, Ming Zhou, Jiahai Wang, and Jian Yin. 2020. Reasoning over semantic-level graph for fact checking. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 6170-6180, Online.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "GEAR: Graph-based evidence aggregating and reasoning for fact verification", |
| "authors": [ |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Cheng", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Changcheng", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "892--901", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jie Zhou, Xu Han, Cheng Yang, Zhiyuan Liu, Lifeng Wang, Changcheng Li, and Maosong Sun. 2019. GEAR: Graph-based evidence aggregating and rea- soning for fact verification. In Proceedings of the 57th Annual Meeting of the Association for Compu- tational Linguistics, pages 892-901, Florence, Italy.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Architecture of our fact verification system enhanced with entity knowledge. The left part is the fact verification module based on RoBERTa and the right part is the entity encoder based on distilled RoBERTa. The dotted green arrows indicate unidirectional attention mechanism which the fact verification module uses to access outputs from the entity encoder. The final loss of our model is the combination of claim loss and auxiliary evidence loss.", |
| "type_str": "figure", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td>on development set of FEVER is selected. All</td></tr><tr><td>source codes of this work are available at https:</td></tr><tr><td>//github.com/nlpyang/FeverEntity.</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "text": "Statistics of the FEVER dataset.", |
| "num": null |
| }, |
| "TABREF2": { |
| "content": "<table><tr><td>shows the results on both the development</td></tr><tr><td>set and blind test set of FEVER. The first block</td></tr><tr><td>in the table includes the results of systems with-</td></tr><tr><td>out using pretrained models. The second block in</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "text": "", |
| "num": null |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td colspan=\"5\">Entity Description Entity Knowledge Encoder Evidence Loss Label Accuracy FEVER Score</td></tr><tr><td/><td/><td/><td>81.43</td><td>78.65</td></tr><tr><td/><td>\u00d7</td><td/><td>80.89</td><td>77.93</td></tr><tr><td/><td/><td>\u00d7</td><td>80.15</td><td>77.30</td></tr><tr><td>\u00d7</td><td>\u00d7</td><td/><td>80.44</td><td>77.65</td></tr><tr><td>\u00d7</td><td>\u00d7</td><td>\u00d7</td><td>79.87</td><td>76.99</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "text": "Evaluation of fact verification systems on FEVER dataset. Variants of a system using different pretrained models are listed.", |
| "num": null |
| }, |
| "TABREF4": { |
| "content": "<table/>", |
| "type_str": "table", |
| "html": null, |
| "text": "Ablation study on FEVER development set. Our model is based on RoBERTa-Large in this experiment.", |
| "num": null |
| } |
| } |
| } |
| } |