| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:42:32.958587Z" |
| }, |
| "title": "Stance Prediction and Claim Verification: An Arabic Perspective", |
| "authors": [ |
| { |
| "first": "Jude", |
| "middle": [], |
| "last": "Khouja", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "jude@latynt.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This work explores the application of textual entailment in news claim verification and stance prediction using a new corpus in Arabic. The publicly available corpus comes in two perspectives: a version consisting of 4,547 true and false claims and a version consisting of 3,786 pairs (claim, evidence). We describe the methodology for creating the corpus and the annotation process. Using the introduced corpus, we also develop two machine learning baselines for two proposed tasks: claim verification and stance prediction. Our best model utilizes pretraining (BERT) and achieves 76.7 F1 on the stance prediction task and 64.3 F1 on the claim verification task. Our preliminary experiments shed some light on the limits of automatic claim verification that relies on claims text only. Results hint that while the linguistic features and world knowledge learned during pretraining are useful for stance prediction, such learned representations from pretraining are insufficient for verifying claims without access to context or evidence.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This work explores the application of textual entailment in news claim verification and stance prediction using a new corpus in Arabic. The publicly available corpus comes in two perspectives: a version consisting of 4,547 true and false claims and a version consisting of 3,786 pairs (claim, evidence). We describe the methodology for creating the corpus and the annotation process. Using the introduced corpus, we also develop two machine learning baselines for two proposed tasks: claim verification and stance prediction. Our best model utilizes pretraining (BERT) and achieves 76.7 F1 on the stance prediction task and 64.3 F1 on the claim verification task. Our preliminary experiments shed some light on the limits of automatic claim verification that relies on claims text only. Results hint that while the linguistic features and world knowledge learned during pretraining are useful for stance prediction, such learned representations from pretraining are insufficient for verifying claims without access to context or evidence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Although fake news is not an emerging phenomenon and has been documented throughout history, the prevalence and wide spread of misinformation over the internet has captured significant proportion of public attention in recent years. This is in part linked to the low barrier for content generation through the advent of the internet and social media (Allcott and Gentzkow, 2017) and the fact that false news spread faster than true news (Vosoughi et al., 2018) rendering it increasingly dangerous to public discourse. The widespread exposure in the U.S. for example has been reported by researchers who found that the average American encountered between one and three stories from known publishers of fake news during the month before the 2016 election (Allcott and Gentzkow, 2017) .", |
| "cite_spans": [ |
| { |
| "start": 350, |
| "end": 378, |
| "text": "(Allcott and Gentzkow, 2017)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 437, |
| "end": 460, |
| "text": "(Vosoughi et al., 2018)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 754, |
| "end": 782, |
| "text": "(Allcott and Gentzkow, 2017)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Since manual fact-checking by human experts does not scale well with the amount of information shared on the web, there is a growing body of work in recent years aimed at developing automatic tools to target fake news, misinformation and credibility of content on social media in general (Rubin et al., 2016; El Ballouli et al., 2017; Baly et al., 2018a,b; Wang et al., 2018; Saleh et al., 2019; Zhang et al., 2019) . Several datasets were developed to further aid research on this topic 1 (Darwish et al., 2017; Wang, 2017; Baly et al., 2018b; . We refer readers to Pierri and Ceri, 2019) for a more comprehensive overview of recent research on fake news, propaganda and misinformation.", |
| "cite_spans": [ |
| { |
| "start": 288, |
| "end": 308, |
| "text": "(Rubin et al., 2016;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 309, |
| "end": 334, |
| "text": "El Ballouli et al., 2017;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 335, |
| "end": 356, |
| "text": "Baly et al., 2018a,b;", |
| "ref_id": null |
| }, |
| { |
| "start": 357, |
| "end": 375, |
| "text": "Wang et al., 2018;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 376, |
| "end": 395, |
| "text": "Saleh et al., 2019;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 396, |
| "end": 415, |
| "text": "Zhang et al., 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 490, |
| "end": 512, |
| "text": "(Darwish et al., 2017;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 513, |
| "end": 524, |
| "text": "Wang, 2017;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 525, |
| "end": 544, |
| "text": "Baly et al., 2018b;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 567, |
| "end": 589, |
| "text": "Pierri and Ceri, 2019)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Despite the increased attention, most of the work has been focusing on the English language. Tools, resources and datasets available in Arabic are limited (Darwish et al., 2017; Baly et al., 2018b; Elsayed et al., 2019) . As such, this work contributes to recent efforts targeting Arabic by introducing a new publicly available corpus in Arabic that is suitable to study claim verification and semantic entailment (Katz, 1972) .", |
| "cite_spans": [ |
| { |
| "start": 155, |
| "end": 177, |
| "text": "(Darwish et al., 2017;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 178, |
| "end": 197, |
| "text": "Baly et al., 2018b;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 198, |
| "end": 219, |
| "text": "Elsayed et al., 2019)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 414, |
| "end": 426, |
| "text": "(Katz, 1972)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In recent years, there has been rapid progress in developing systems and tools for automatic fact checking and claim verification. Various approaches were developed which relied on a diverse set of methods and information to verify claims. Most relevant to this work are approaches that used content such as textual information in the title and/or body of the claims to predict their veracity. Among this direction of research those that considered a machine learning approach (Potthast et al., Given a news title, write two news titles that:", |
| "cite_spans": [ |
| { |
| "start": 477, |
| "end": 494, |
| "text": "(Potthast et al.,", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A-Paraphrase the original title: Has same meaning but is worded differently by rephrasing and changing Syntax, using verb synonyms, using different words to describe the same information such as locations, counts and dates. B-Contradict the original title: Looks similar to the original title but has contradicting meaning (both cannot be true in the same context) by reversing meaning without negating main verb, using antonym of main verb with rephrasing, changing key information using world knowledge such as locations, counts and dates. 2017; Wang et al., 2018; Alzanin and Azmi, 2019) including deep learning techniques (Hanselowski et al., 2017; Baly et al., 2018b; Popat et al., 2018; Chawla et al., 2019; Helwe et al., 2019; Lv et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 548, |
| "end": 566, |
| "text": "Wang et al., 2018;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 567, |
| "end": 590, |
| "text": "Alzanin and Azmi, 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 626, |
| "end": 652, |
| "text": "(Hanselowski et al., 2017;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 653, |
| "end": 672, |
| "text": "Baly et al., 2018b;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 673, |
| "end": 692, |
| "text": "Popat et al., 2018;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 693, |
| "end": 713, |
| "text": "Chawla et al., 2019;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 714, |
| "end": 733, |
| "text": "Helwe et al., 2019;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 734, |
| "end": 750, |
| "text": "Lv et al., 2019)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Datasets: There are limited but growing datasets related to claim verification (Al Zaatari et al., 2016; Darwish et al., 2017; Wang, 2017; Baly et al., 2018b; Alkhair et al., 2019; Alzanin and Azmi, 2019; Elsayed et al., 2019) . However, datasets focusing on Arabic remain scarce (Darwish et al., 2017; Baly et al., 2018b; Elsayed et al., 2019) . Recently, work on the application of textual entailment for claim verfication has been explored and new datasets combining stance prediction and claim verfication were introduced (Baly et al., 2018b; .", |
| "cite_spans": [ |
| { |
| "start": 79, |
| "end": 104, |
| "text": "(Al Zaatari et al., 2016;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 105, |
| "end": 126, |
| "text": "Darwish et al., 2017;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 127, |
| "end": 138, |
| "text": "Wang, 2017;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 139, |
| "end": 158, |
| "text": "Baly et al., 2018b;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 159, |
| "end": 180, |
| "text": "Alkhair et al., 2019;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 181, |
| "end": 204, |
| "text": "Alzanin and Azmi, 2019;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 205, |
| "end": 226, |
| "text": "Elsayed et al., 2019)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 280, |
| "end": 302, |
| "text": "(Darwish et al., 2017;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 303, |
| "end": 322, |
| "text": "Baly et al., 2018b;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 323, |
| "end": 344, |
| "text": "Elsayed et al., 2019)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 526, |
| "end": 546, |
| "text": "(Baly et al., 2018b;", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "This work is most in line with that direction. We developed a new corpus in Arabic that can be used jointly for claim verification and textual entailment recognition. However, our new corpus differs from the aforementioned datasets in that it is at the sentence level, hence, we are disentangling the tasks of claim verification and textual entailment from the task of evidence extraction (Information Retrieval) and focusing on the former. We also start from real news titles and generate true/false claims from them. Our aim is to mitigate one type of bias that results from starting with fake news collected in the wild: bias in the distribution of topics among the true/false claims. While some forms of biases about the world are useful in determining the veracity of a claim, some can be problematic. We can imagine a dataset that contains more positive 2 news in the \"fake\" class than in the \"true\" class.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "2 Positive here refers to sentiment A system trained on such data could predict the class \"fake\" with higher confidence for any claim that has a positive tone compared to one that has a negative or neutral tone. Such surface level biases in topics and linguistic styles could arguably result in models that do not generalize well.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this part, we describe our Arabic News Stance (ANS) corpus. 3 We derived two perspectives of the corpus suitable for claim verification and stance classification. Please refer to Appendix A to read our data statement about the corpus.", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 64, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The corpus", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In contrast to Baly et al. (2018b) and more in line with Thorne et al. 2018, we start with true news titles (reference) and generate fake/true claims from them. The corpus generating process can be summarized in two stages: 1) generating true/false modifications of existing news titles through crowdsourcing; and 2) validating the generated claims by annotating them in a separate phase.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 34, |
| "text": "Baly et al. (2018b)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Collection", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We derive our corpus by sampling a subset of news titles from the most recent version of the Arabic News Texts (ANT) corpus (Chouigui et al., 2017) ; A collection of Arabic news from multiple news media sources in the Middle East. The dataset was suitable for our task as it covers several topics of news (politics, sports, etc.) sourced from several credible mainstream news outlets (BBC, CNN, Al Arabiya, etc.). The following is an example of a news title from this dataset:", |
| "cite_spans": [ |
| { |
| "start": 124, |
| "end": 147, |
| "text": "(Chouigui et al., 2017)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Collection", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\"Facts about the falling of a boulder weighing 100 kg. of the west wall in Jerusalem.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Collection", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Generating true/false claims We used crowdsourcing to generate true/false claims. Starting from a news title, we recruited annotators to modify each news title into a new claim. For true claims, annotators were asked to paraphrase the original sentence by changing its syntax and wording while maintaining the integrity of the information. We allowed for the use of world knowledge to modify the information. For example, replacing cities with countries and celebrities and politicians with their nationalities. For false claims, to insure that the modification results in meaningful mutation of the semantic information, the instructions (Table 1) stated that the modified sentence should contradict the original title in such a way that both cannot simultaneously be true in the same context. Annotators were asked to avoid simple negation and were encouraged to use different strategies for modifying the sentences. Our analysis of a sample of the collected data showed that different annotators utilized different strategies at different rates. For example, some annotators predominantly altered years, counts and locations that appeared in the original titles while others modified the semantics of the modified sentences to have opposite meaning (detained vs. released, supported vs. opposed, etc.).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 639, |
| "end": 648, |
| "text": "(Table 1)", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Collection", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We relied on Amazon Mechanical Turk 4 and Upwork 5 to recruit annotators. We only considered Arabic native speakers for news title rewriting. All annotators had to pass a language qualification test similar to our task. Data was randomly assigned to annotators in batches of 500. To insure the quality of the generated data, we sampled data during the annotation from each batch and re-annotated any batch containing errors in more than 10% of the sample by resending the batch to the annotator after explaining the errors. See Table 2 for examples of generated claims using different modification strategies.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 528, |
| "end": 535, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Collection", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "To evaluate the quality of our data, we performed a second round of annotation on the generated news titles. We derived a new task in which annotators were presented with a pair of sentences and asked to supply a hypothesis about how they are semantically related. This task is related to the the semantic concepts of entailment and contradiction (Katz, 1972; Bowman et al., 2015) but with the aim of validating our generated ture/fake claims. We highlight a notable difference compared to other work on stance classification. In contrast to the commonly used four classes adopted in other datasets 6 (agree, contradict, discuss, unrelated), we elect to merge labels (discuss, unrelated) into one (other/not enough information) resulting in three classes -paraphrase, contradiction, other/not enough information for each pair of news titles. Our motivation is that despite the general value of discriminating between irrelevant documents 7 (unrelated) and documents that are related to the claim but do not make a stance about the claim (discuss), both classes represent the same position in the context of stance prediction. We, therefore, treat them as one class. We found that this is also similar to the approach by .", |
| "cite_spans": [ |
| { |
| "start": 347, |
| "end": 359, |
| "text": "(Katz, 1972;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 360, |
| "end": 380, |
| "text": "Bowman et al., 2015)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Validation And Analysis", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To present annotators with a small set of the third class (other), we first considered randomly pairing news titles from our corpus. We hypothesized that randomly paired news titles will be discussing unrelated news and would naturally be assigned the label other by annotators. However and upon examining examples of this method, we noticed that telling the (other) class apart from the two classes was dis-proportionally trivial since the randomly paired sentences differed significantly (discussed different topics and contained no overlapping words) compared to pairs from the paraphrase, contradict classes. Predicting this class, therefore, can be reduced to checking for the absence of overlap in words from the paired titles. As an alternative selection criteria to random pairing, we used a similarity metric to select pairs that look more similar. We calculate the F1 score of overlapping ngrams in the paired titles weighted by the ngram size similar to Trinh and Le (2018). In our case however, we consider ngrams at the character level given the short length of the sentences. We included ngrams of size 2 to 6 and set the minimum score to 0.1. A total of 4,259 pairs were labeled by 3 to 5 annotators. We considered the author's rewritten sentences as labels (for the paraphrase and contradict classes). Table 3 shows the annotation statistics. The Fleiss k scores (calculated separately for examples labeled by 3, 4 and 5 annotators) show overall a very high level of agreement (> 0.81) suggesting that the quality of the dataset is sufficiently high. For the final data, we included only pairs with inter annotator agreement of 75% or higher, hence, dismissing all data with 2 out of 3 majority vote or worse.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1318, |
| "end": 1325, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data Validation And Analysis", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Figures 1 and 2 provide some details about the length of the written claims in the final dataset compared to the original reference sentences. We noticed that on average, claims are shorter than the original references with contradicting claims being shorter than paraphrasing claims. This could be due to workers aiming to minimize time spent per each example. Another likely explanation is the fact that contradicting a statements by replacing or removing key words is easier than paraphrasing a statement. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Validation And Analysis", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In this section, to demonstrate the utility of the corpus, we derive two tasks useful for evaluating news veracity and stance prediction and develop two baselines to evaluate on the proposed tasks. We describe the proposed tasks and details of the baselines in this section and the results in section 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Claim Only Verification: In this setting, we explore the task of verifying claims based only on information in the claims themselves. In our corpus, we assess the veracity of a claim c i from our corpus D based solely on the textual information of the claim. The task is, hence, a binary classification where an estimator needs to map an input to a label Y which can be either fake or not fake: Class # % Not Fake 3072 67.6% Fake 1475 32.4% ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tasks", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "p(Y |c i ), c i \u2208 D", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tasks", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We consider all original news titles (reference sentences) in our corpus to belong to the not fake class. We rely on the fact that the reference sentences originated from reputable mainstream media in the Middle East. Our fake class examples consist of the sentences corrupted by annotators that passed the data validation process described in Section 3.1. Table 4 shows the distribution of classes for this task.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 357, |
| "end": 364, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Tasks", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "It is important to discern the limited scope in defining news veracity in this work: the incorrectness of the corrupted sentence is not a universal statement about the claim. We note the fact that several of the corrupted sentences can be factual/not fake in other contexts. As such, we consider them fake in regards to the related event/context -in this case our reference sentence (original news titles). Further analysis exposed two instances where the modified sentences matched other original news titles. Both examples were excluded from the corpus for this task. However, such cases hint at the limits of claim verification using claim text only. We further explore this in section 5 and share some insights.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tasks", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Stance Prediction This task is a direct reflection of our annotation process. Given a reference sentence r i and a claim c i , predict the label Y (Agree, Contradict, Other/Not enough information) from the claim/reference pair (c i , r i ). Table 5 shows the distribution of classes in our corpus for the stance prediction task.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 241, |
| "end": 248, |
| "text": "Table 5", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Tasks", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "p(Y |c i , r i ), (c i , r i ) \u2208 D", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tasks", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We evaluate two baselines on both tasks. For modeling, we considered two classes of models that have been largely adopted by the NLP community. The models are described in the next section. (Hochreiter and Schmidhuber, 1997) as the main building block to encode the input. LSTM models encode the input sequentially and can model temporal dependencies useful to semantic tasks. In our implementation for both tasks, we consider both character level and word level representations of the input sentence(s) separately. In each case, we represent every input word/character with a unique d-dimensional vector that is learned during training. These vectors are then passed through the LSTM layer in sequence and the output of the last step (at the end of the sentence) is used as the encoding of the sentence(s). For the claim verification task, the claim encoding \u2212 \u2192 h t can be described by:", |
| "cite_spans": [ |
| { |
| "start": 190, |
| "end": 224, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2212 \u2192 h t = \u2212\u2212\u2212\u2212\u2192 LST M ( \u2212 \u2192 h t\u22121 , x t ) t = 1, ..., M i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Where M i is the length size of the sentence corresponding to example i and x t is the character/word at position t.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In stance prediction, the input consists of a pair of sentences (reference r, claim c). Each is encoded using the same LSTM layer to obtain their encoding:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2212 \u2192 r t = \u2212\u2212\u2212\u2212\u2192 LST M ( \u2212 \u2192 r t\u22121 , x t ) t = 1, ..., M r i \u2212 \u2192 c t = \u2212\u2212\u2212\u2212\u2192 LST M ( \u2212 \u2192 c t\u22121 , x t ) t = 1, ..., M k i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To obtain the interaction representation \u2212 \u2192 h t , \u2212 \u2192 r t and \u2212 \u2192 c t are multiplied element-wise. We experimented with cosine similarity and concatenation and found the element-wise multiplication and concatenation to work slightly better than cosine similarity:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2212 \u2192 h t = ( \u2212 \u2192 r t \u2022 \u2212 \u2192 k t )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The resulting encoding in both tasks \u2212 \u2192 h t is then passed through a linear layer with non-linearity Table 6 : Results for the claim verification and stance prediction Tasks.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 102, |
| "end": 109, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "(ReLu) followed by a sof tmax function to convert the output to probabilities for each class:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "p(Y = c|h i ) = sof tmax(ReLu(W c \u2212 \u2192 h i + b c ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "W c and b c are learnable parameters associated with each class c in the corresponding task. Prediction in both tasks is done by selecting the label with the highest probability:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "arg max c p(Y = c|h i )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Pretrained Transformer: Pretraining and transfer learning (Devlin et al., 2018a; Peters et al., 2018; Radford et al., 2019) has recently gained attention as a popular approach to acquiring universal linguistic features useful in many downstream NLP tasks and was shown to be successful in improving on the state of the art in many downstream NLP tasks with minimal fine-tuning. Lv et al. (2019) have successfully explored BERT for the task of fake news detection in English and proposed an extension that improves on fine-tuned BERT. In addition to the aforementioned supervised methods, we evaluate BERT (Devlin et al., 2018a) on both tasks in our corpus. We are not aware of any other work that explored pretraining for claim verification and stance prediction in Arabic.", |
| "cite_spans": [ |
| { |
| "start": 58, |
| "end": 80, |
| "text": "(Devlin et al., 2018a;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 81, |
| "end": 101, |
| "text": "Peters et al., 2018;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 102, |
| "end": 123, |
| "text": "Radford et al., 2019)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 378, |
| "end": 394, |
| "text": "Lv et al. (2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 605, |
| "end": 627, |
| "text": "(Devlin et al., 2018a)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "BERT is based on the Transformer model first introduced by Vaswani et al. (2017) . Transformerbased models have recently become common in many NLP tasks including question answering and entailment classification (Devlin et al., 2018b; Radford, 2018) . For both tasks, we utilize a publicly available implementation that has been trained on a multilingual dataset including Arabic. 8 . We elect to adhere to the proposed approach recommended by Devlin et al. (2018a) for future reproducibility. Since our implementation is identical to the one provided by the authors, we will omit the detailed description of the model architecture and refer readers to (Vaswani et al., 2017) Table 7 : Results of using pretraining (BERT) on claim verification and stance prediction tasks.", |
| "cite_spans": [ |
| { |
| "start": 59, |
| "end": 80, |
| "text": "Vaswani et al. (2017)", |
| "ref_id": null |
| }, |
| { |
| "start": 212, |
| "end": 234, |
| "text": "(Devlin et al., 2018b;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 235, |
| "end": 249, |
| "text": "Radford, 2018)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 444, |
| "end": 465, |
| "text": "Devlin et al. (2018a)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 653, |
| "end": 675, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 676, |
| "end": 683, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methods", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "For the recurrent perspective models, we trained all models for 100 epochs using Adam optimizer (Kingma and Ba, 2014) with 0.001 learning rate. We conducted hyper-parameter tuning on the de- velopment set. For the pretrained BERT model, we fine-tune on our data for 3 epochs using BERT BPE units. Table 6 shows the top results of all experiments for both tasks. We report the accuracy and F 1 (Macro unweighted average). In the claim verification task, results show that in general, word based models perform comparably to character based models but we note that all results do not provide significant gains (53.1 vs. 40.2 F 1 ) compared to the baseline (majority class) which could be explained by the small training data size but might hint at an ill-defined task. We explore this further below. In the stance prediction task, experiments show word based models outperform character based models (37.8 vs 27.5 F 1 ). This could be due to the limited size of our corpus which is not sufficient for character based models to learn words and phrases from scratch and capture the semantic representation needed for stance prediction.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 297, |
| "end": 304, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Results for the pretraining experiments (shown in Table 7 ) show significant improvement of the pretrained model over the models trained only on our corpus. This is similar to findings by Lv et al. (2019) . However, the improvement is disproportionally larger in the stance prediction task (76.7 vs. 37.8 F 1 ) and the large gains do not carry over to the claim verification task (64.3 vs. 53.1 F 1 ). The imbalance in gains also confirms our intuition about the limitation of claim only verification which we discuss next.", |
| "cite_spans": [ |
| { |
| "start": 188, |
| "end": 204, |
| "text": "Lv et al. (2019)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 50, |
| "end": 57, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We briefly mentioned in Section 4.1 the limited scope of claim verification in a setting were the decision about the veracity of a claim can be made using only the text of the claim. We hypothesize that the task might not be learnable through a direct mapping from the claim text to the veracity space. Given that the initial results of the fine-tuned BERT model supported this intuition, we elected to manually inspect a sample of the predictions and noticed that in many cases the model was predicting the same label for claims that look similar but are semantically different. We share a sample of these cases in Table 8 . This suggests that while the linguistic features learned during pretraining were useful for textual entailment (stance prediction task), the veracity of a claim cannot be made using only implicit world knowledge learned during pretraining. A simple example highlighting this limitation is the reference news title \"Gold prices increase amidst a falling dollar.\"' and its contradicted rewritten version \" \"Gold prices fall globally\". Here, it is easy to argue that the contradiction can be true in another context and hence, a decision about the veracity of this claim should only be made in reference to a particular context/event. We believe that explicitly associating each claim with evidence or context is the more appropriate approach for claim verification.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 616, |
| "end": 623, |
| "text": "Table 8", |
| "ref_id": "TABREF11" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Limits Of Claim Only Verification:", |
| "sec_num": null |
| }, |
| { |
| "text": "These initial experiments suggest that discriminate models trained using claim only information might rely on biases in the topics, linguistic styles, tones and implicit world-knowledge learned from training data to make predictions. Results of the performance of such models could, therefore, be inflated if the training data is not uniformly distributed across languages, topics, writing styles, political ideologies etc. While we believe that our dataset collection process which yields classes that share the same distribution of topics and news sources mitigated these types of biases, we also note that the annotation process and human factor introduced other types of biases that could be present in the data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Limits Of Claim Only Verification:", |
| "sec_num": null |
| }, |
| { |
| "text": "In this work we presented a new publicly available corpus for textual entailment and its use in studying misinformation in the Arabic language. We shared some insights about the creation of the corpus and the baselines developed to evaluate the corpus. We further explored the use of pretraining (Devlin et al., 2018a) and developed a strong baseline for our tasks. Our experiments additionally shed light on the limits of \"claim-only\" misinformation detection methods that rely solely on the stated claims without use of accompanying evidence. We hope to explore this further in future work. As we plan to also explore the use of generated data in studying the robustness of misinformation detection methods against adversarial data with varying linguistic styles, political ideologies and world-knowledge. news sources (BBC, Al Arabiya, CNN, Sky News, France24) and 6 categories (culture, economy, international news, Middle East, sport, technology) collected from February 2018 to October 2018. Data also includes rewritten versions of the news titles by the annotators following the provided guidelines (see Table 1 ).", |
| "cite_spans": [ |
| { |
| "start": 296, |
| "end": 318, |
| "text": "(Devlin et al., 2018a)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1112, |
| "end": 1119, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "FNC: http://www.fakenewschallenge.org/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Data available at: https://github.com/latynt/ans", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www.mturk.com 5 https://www.upwork.com/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For example: Fake News Challenge (FNC) 7 Documents in this case refer to sentences but could be any body of text. Hence, in this work we use both terms interchangeably.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We use BERT-Base, Multilingual Cased: 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters 9 See also: http://nlp.seas.harvard.edu/2018/04/03/attention.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We are grateful for Ayah Zirikly, Bart Desmet and Ren\u00e9 F. Kizilcec for their valuable insights, critical commentary and helpful discussions during the course of this work. We also thank our anonymous reviewers for their thoughtful comments and suggestions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| }, |
| { |
| "text": "In line with recent efforts addressing ethical issues that can result from the use of data and technology and following the recommendations of Bender and Friedman (2018), we are sharing the following information that we believed is relevant to our dataset and the collection process. We encourage future use of the data to include a summary of this information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Data Statement", |
| "sec_num": null |
| }, |
| { |
| "text": "To study and build tools in the areas of stance prediction and claim verification. Data was selected from news titles and rewritten by annotators for the purpose of generating statements and statement pairs. Part of the dataset was a random subset of the ANT corpus which was created through webcrawling news sources in the Middle East. As different tools and annotation were included in the creation of the data, we expect the distribution of topics, opinions and language to incorporate different types and levels of bias. To the best of our knowledge, the data is in Standard Arabic ('arb') with few exceptions such as abbreviations. At least Latin script ('Latn') is present.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.1 Language Variety", |
| "sec_num": null |
| }, |
| { |
| "text": "A total of 8 crowd-source workers mostly from the Middle East contributed to the annotations. Annotators were selected based on their fluency in the Arabic language. Demographic information was not available at the time annotation for all recruited individuals. Of the information available, we are aware of at least 1 woman, 2 men and 3 individuals who are Arabic native speakers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Annotator Demographic", |
| "sec_num": null |
| }, |
| { |
| "text": "The dataset includes a subset of the news titles from ANT news corpus (v2.1) which included 5", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.3 Text Characteristics", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Khaled Shaban, Nizar Habash, and Emad Yahya", |
| "authors": [ |
| { |
| "first": "Ayman", |
| "middle": [], |
| "last": "Al Zaatari", |
| "suffix": "" |
| }, |
| { |
| "first": "Rim", |
| "middle": [ |
| "El" |
| ], |
| "last": "Ballouli", |
| "suffix": "" |
| }, |
| { |
| "first": "Shady", |
| "middle": [], |
| "last": "Elbassouni", |
| "suffix": "" |
| }, |
| { |
| "first": "Wassim", |
| "middle": [], |
| "last": "El-Hajj", |
| "suffix": "" |
| }, |
| { |
| "first": "Hazem", |
| "middle": [], |
| "last": "Hajj", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
| "volume": "", |
| "issue": "", |
| "pages": "4396--4401", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ayman Al Zaatari, Rim El Ballouli, Shady ELbassouni, Wassim El-Hajj, Hazem Hajj, Khaled Shaban, Nizar Habash, and Emad Yahya. 2016. Arabic corpora for credibility analysis. In Proceedings of the Tenth In- ternational Conference on Language Resources and Evaluation (LREC'16), pages 4396-4401.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "An arabic corpus of fake news: Collection, analysis and classification", |
| "authors": [ |
| { |
| "first": "Maysoon", |
| "middle": [], |
| "last": "Alkhair", |
| "suffix": "" |
| }, |
| { |
| "first": "Karima", |
| "middle": [], |
| "last": "Meftouh", |
| "suffix": "" |
| }, |
| { |
| "first": "Kamel", |
| "middle": [], |
| "last": "Sma\u00efli", |
| "suffix": "" |
| }, |
| { |
| "first": "Nouha", |
| "middle": [], |
| "last": "Othman", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Arabic Language Processing: From Theory to Practice", |
| "volume": "", |
| "issue": "", |
| "pages": "292--302", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maysoon Alkhair, Karima Meftouh, Kamel Sma\u00efli, and Nouha Othman. 2019. An arabic corpus of fake news: Collection, analysis and classification. In Ara- bic Language Processing: From Theory to Practice, pages 292-302. Springer International Publishing.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Social media and fake news in the 2016 election", |
| "authors": [ |
| { |
| "first": "Hunt", |
| "middle": [], |
| "last": "Allcott", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Gentzkow", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "J. Econ. Perspect", |
| "volume": "31", |
| "issue": "2", |
| "pages": "211--236", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hunt Allcott and Matthew Gentzkow. 2017. Social me- dia and fake news in the 2016 election. J. Econ. Per- spect., 31(2):211-236.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Rumor detection in arabic tweets using semi-supervised and unsupervised expectation-maximization", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Samah", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Alzanin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Azmi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samah M Alzanin and Aqil M Azmi. 2019. Rumor detection in arabic tweets using semi-supervised and unsupervised expectation-maximization.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Knowledge-Based Systems", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "185", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Knowledge-Based Systems, 185:104945.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Predicting factuality of reporting and bias of news media sources", |
| "authors": [ |
| { |
| "first": "Ramy", |
| "middle": [], |
| "last": "Baly", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgi", |
| "middle": [], |
| "last": "Karadzhov", |
| "suffix": "" |
| }, |
| { |
| "first": "Dimitar", |
| "middle": [], |
| "last": "Alexandrov", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ramy Baly, Georgi Karadzhov, Dimitar Alexandrov, James Glass, and Preslav Nakov. 2018a. Predict- ing factuality of reporting and bias of news media sources.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Integrating stance detection and fact check", |
| "authors": [ |
| { |
| "first": "Ramy", |
| "middle": [], |
| "last": "Baly", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitra", |
| "middle": [], |
| "last": "Mohtarami", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| }, |
| { |
| "first": "Lluis", |
| "middle": [], |
| "last": "Marquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Moschitti", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ramy Baly, Mitra Mohtarami, James Glass, Lluis Marquez, Alessandro Moschitti, and Preslav Nakov. 2018b. Integrating stance detection and fact check- ing in a unified corpus.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Data statements for natural language processing: Toward mitigating system bias and enabling better science", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Bender", |
| "suffix": "" |
| }, |
| { |
| "first": "Batya", |
| "middle": [], |
| "last": "Friedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "6", |
| "issue": "0", |
| "pages": "587--604", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily Bender and Batya Friedman. 2018. Data state- ments for natural language processing: Toward mit- igating system bias and enabling better science. Transactions of the Association for Computational Linguistics, 6(0):587-604.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Learning natural language inference from a large annotated corpus", |
| "authors": [ |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Samuel R Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "632--642", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R Bowman, Gabor Angeli, Christopher Potts, and Christopher D Manning. 2015. Learning natu- ral language inference from a large annotated corpus. In Proceedings of the 2015 Conference on Empiri- cal Methods in Natural Language Processing, pages 632-642.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "SimpleLSTM: A Deep-Learning approach to Simple-Claims classification", |
| "authors": [ |
| { |
| "first": "Piyush", |
| "middle": [], |
| "last": "Chawla", |
| "suffix": "" |
| }, |
| { |
| "first": "Diego", |
| "middle": [], |
| "last": "Esteves", |
| "suffix": "" |
| }, |
| { |
| "first": "Karthik", |
| "middle": [], |
| "last": "Pujar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jens", |
| "middle": [], |
| "last": "Lehmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Progress in Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "244--255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piyush Chawla, Diego Esteves, Karthik Pujar, and Jens Lehmann. 2019. SimpleLSTM: A Deep- Learning approach to Simple-Claims classification. In Progress in Artificial Intelligence, pages 244-255.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "ANT corpus: An arabic news text collection for textual classification", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Chouigui", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "O B Khiroun", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Elayeb", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "2017 IEEE/ACS 14th International Conference on Computer Systems and Applications (AICCSA)", |
| "volume": "", |
| "issue": "", |
| "pages": "135--142", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Springer International Publishing. A Chouigui, O B Khiroun, and B Elayeb. 2017. ANT corpus: An arabic news text collection for textual classification. In 2017 IEEE/ACS 14th International Conference on Computer Systems and Applications (AICCSA), pages 135-142.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Improved stance prediction in a user similarity feature space", |
| "authors": [ |
| { |
| "first": "Kareem", |
| "middle": [], |
| "last": "Darwish", |
| "suffix": "" |
| }, |
| { |
| "first": "Walid", |
| "middle": [], |
| "last": "Magdy", |
| "suffix": "" |
| }, |
| { |
| "first": "Tahar", |
| "middle": [], |
| "last": "Zanouda", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kareem Darwish, Walid Magdy, and Tahar Zanouda. 2017. Improved stance prediction in a user sim- ilarity feature space. In Proceedings of the 2017", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "IEEE/ACM international conference on advances in social networks analysis and mining 2017", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "145--148", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "IEEE/ACM international conference on advances in social networks analysis and mining 2017, pages 145-148.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018a. BERT: Pre-training of deep bidirectional transformers for language under- standing.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "BERT: pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018b. BERT: pre-training of deep bidirectional transformers for language under- standing. CoRR, abs/1810.04805.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "CAT: Credibility analysis of Arabic content on twitter", |
| "authors": [ |
| { |
| "first": "Rim", |
| "middle": [ |
| "El" |
| ], |
| "last": "Ballouli", |
| "suffix": "" |
| }, |
| { |
| "first": "Wassim", |
| "middle": [], |
| "last": "El-Hajj", |
| "suffix": "" |
| }, |
| { |
| "first": "Ahmad", |
| "middle": [], |
| "last": "Ghandour", |
| "suffix": "" |
| }, |
| { |
| "first": "Shady", |
| "middle": [], |
| "last": "Elbassuoni", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Third Arabic Natural Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "62--71", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-1308" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rim El Ballouli, Wassim El-Hajj, Ahmad Ghandour, Shady Elbassuoni, Hazem Hajj, and Khaled Shaban. 2017. CAT: Credibility analysis of Arabic content on twitter. In Proceedings of the Third Arabic Nat- ural Language Processing Workshop, pages 62-71, Valencia, Spain. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Overview of the CLEF-2019 CheckThat! lab: Automatic identification and verification of claims", |
| "authors": [ |
| { |
| "first": "Tamer", |
| "middle": [], |
| "last": "Elsayed", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Barr\u00f3n-Cede\u00f1o", |
| "suffix": "" |
| }, |
| { |
| "first": "Maram", |
| "middle": [], |
| "last": "Hasanain", |
| "suffix": "" |
| }, |
| { |
| "first": "Reem", |
| "middle": [], |
| "last": "Suwaileh", |
| "suffix": "" |
| }, |
| { |
| "first": "Giovanni", |
| "middle": [], |
| "last": "Da San", |
| "suffix": "" |
| }, |
| { |
| "first": "Pepa", |
| "middle": [], |
| "last": "Martino", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Atanasova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tamer Elsayed, Preslav Nakov, Alberto Barr\u00f3n- Cede\u00f1o, Maram Hasanain, Reem Suwaileh, Gio- vanni Da San Martino, and Pepa Atanasova. 2019. Overview of the CLEF-2019 CheckThat! lab: Auto- matic identification and verification of claims.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Description of the system developed by team athene in the fnc-1", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Hanselowski", |
| "suffix": "" |
| }, |
| { |
| "first": "P V S", |
| "middle": [], |
| "last": "Avinesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Schiller", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Caspelherr", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andreas Hanselowski, P V S Avinesh, Benjamin Schiller, and Felix Caspelherr. 2017. Description of the system developed by team athene in the fnc-1. Fake News Challenge.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Assessing arabic weblog credibility via deep co-learning", |
| "authors": [ |
| { |
| "first": "Chadi", |
| "middle": [], |
| "last": "Helwe", |
| "suffix": "" |
| }, |
| { |
| "first": "Shady", |
| "middle": [], |
| "last": "Elbassuoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Ayman", |
| "middle": [ |
| "Al" |
| ], |
| "last": "Zaatari", |
| "suffix": "" |
| }, |
| { |
| "first": "Wassim", |
| "middle": [], |
| "last": "El-Hajj", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourth Arabic Natural Language Processing Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "130--136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chadi Helwe, Shady Elbassuoni, Ayman Al Zaatari, and Wassim El-Hajj. 2019. Assessing arabic we- blog credibility via deep co-learning. In Proceed- ings of the Fourth Arabic Natural Language Process- ing Workshop, pages 130-136.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Long shortterm memory", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Comput", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S Hochreiter and J Schmidhuber. 1997. Long short- term memory. Neural Comput., 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Semantic Theory", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Jerrold", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Katz", |
| "suffix": "" |
| } |
| ], |
| "year": 1972, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jerrold J Katz. 1972. Semantic Theory. New York: Harper & Row.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "AUTOHOME-ORCA at SemEval-2019 task 8: Application of BERT for fact-checking in community forums", |
| "authors": [ |
| { |
| "first": "Zhengwei", |
| "middle": [], |
| "last": "Lv", |
| "suffix": "" |
| }, |
| { |
| "first": "Duoxing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Lei", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhizhong", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Feng", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Workshop on Semantic Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "870--876", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhengwei Lv, Duoxing Liu, Haifeng Sun, Xiao Liang, Tao Lei, Zhizhong Shi, Feng Zhu, and Lei Yang. 2019. AUTOHOME-ORCA at SemEval-2019 task 8: Application of BERT for fact-checking in com- munity forums. In Proceedings of the 13th Inter- national Workshop on Semantic Evaluation, pages 870-876.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "False news on social media: A Data-Driven survey", |
| "authors": [ |
| { |
| "first": "Francesco", |
| "middle": [], |
| "last": "Pierri", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefano", |
| "middle": [], |
| "last": "Ceri", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francesco Pierri and Stefano Ceri. 2019. False news on social media: A Data-Driven survey.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "DeClarE: Debunking fake news and false claims using Evidence-Aware deep learning", |
| "authors": [ |
| { |
| "first": "Kashyap", |
| "middle": [], |
| "last": "Popat", |
| "suffix": "" |
| }, |
| { |
| "first": "Subhabrata", |
| "middle": [], |
| "last": "Mukherjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Yates", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerhard", |
| "middle": [], |
| "last": "Weikum", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kashyap Popat, Subhabrata Mukherjee, Andrew Yates, and Gerhard Weikum. 2018. DeClarE: Debunking fake news and false claims using Evidence-Aware deep learning.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Janek Bevendorff, and Benno Stein. 2017. A stylometric inquiry into hyperpartisan and fake news", |
| "authors": [ |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Potthast", |
| "suffix": "" |
| }, |
| { |
| "first": "Johannes", |
| "middle": [], |
| "last": "Kiesel", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Reinartz", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin Potthast, Johannes Kiesel, Kevin Reinartz, Janek Bevendorff, and Benno Stein. 2017. A sty- lometric inquiry into hyperpartisan and fake news.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Improving language understanding by generative pre-training", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford. 2018. Improving language understand- ing by generative pre-training.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Language models are unsupervised multitask learners", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "OpenAI Blog", |
| "volume": "1", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI Blog, 1(8):9.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Fake news or truth? using satirical cues to detect potentially misleading news", |
| "authors": [ |
| { |
| "first": "Victoria", |
| "middle": [], |
| "last": "Rubin", |
| "suffix": "" |
| }, |
| { |
| "first": "Niall", |
| "middle": [], |
| "last": "Conroy", |
| "suffix": "" |
| }, |
| { |
| "first": "Yimin", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Cornwell", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Second Workshop on Computational Approaches to Deception Detection", |
| "volume": "", |
| "issue": "", |
| "pages": "7--17", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W16-0802" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victoria Rubin, Niall Conroy, Yimin Chen, and Sarah Cornwell. 2016. Fake news or truth? using satiri- cal cues to detect potentially misleading news. In Proceedings of the Second Workshop on Computa- tional Approaches to Deception Detection, pages 7- 17, San Diego, California. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Team QCRI-MIT at SemEval-2019 task 4: Propaganda analysis meets hyperpartisan news detection", |
| "authors": [ |
| { |
| "first": "Abdelrhman", |
| "middle": [], |
| "last": "Saleh", |
| "suffix": "" |
| }, |
| { |
| "first": "Ramy", |
| "middle": [], |
| "last": "Baly", |
| "suffix": "" |
| }, |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Barr\u00f3n-Cede\u00f1o", |
| "suffix": "" |
| }, |
| { |
| "first": "Giovanni", |
| "middle": [], |
| "last": "Da San", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitra", |
| "middle": [], |
| "last": "Martino", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Mohtarami", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abdelrhman Saleh, Ramy Baly, Alberto Barr\u00f3n- Cede\u00f1o, Giovanni Da San Martino, Mitra Mo- htarami, Preslav Nakov, and James Glass. 2019. Team QCRI-MIT at SemEval-2019 task 4: Propa- ganda analysis meets hyperpartisan news detection.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Automated fact checking: Task formulations, methods and future directions", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Thorne", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Thorne and Andreas Vlachos. 2018. Automated fact checking: Task formulations, methods and fu- ture directions.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "FEVER: a large-scale dataset for fact extraction and VERification", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Thorne", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Vlachos", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Thorne, Andreas Vlachos, Christos Christodoulopoulos, and Arpit Mittal. 2018. FEVER: a large-scale dataset for fact extraction and VERification.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "A simple method for commonsense reasoning", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Trieu", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Trinh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Trieu H Trinh and Quoc V Le. 2018. A simple method for commonsense reasoning.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "The spread of true and false news online", |
| "authors": [ |
| { |
| "first": "Soroush", |
| "middle": [], |
| "last": "Vosoughi", |
| "suffix": "" |
| }, |
| { |
| "first": "Deb", |
| "middle": [], |
| "last": "Roy", |
| "suffix": "" |
| }, |
| { |
| "first": "Sinan", |
| "middle": [], |
| "last": "Aral", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Science", |
| "volume": "359", |
| "issue": "6380", |
| "pages": "1146--1151", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Soroush Vosoughi, Deb Roy, and Sinan Aral. 2018. The spread of true and false news online. Science, 359(6380):1146-1151.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "liar, liar pants on fire\": A new benchmark dataset for fake news detection", |
| "authors": [ |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wang", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William Yang Wang. 2017. \"liar, liar pants on fire\": A new benchmark dataset for fake news detection.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Eann: Event adversarial neural networks for multi-modal fake news detection", |
| "authors": [ |
| { |
| "first": "Yaqing", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Fenglong", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiwei", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ye", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Guangxu", |
| "middle": [], |
| "last": "Xun", |
| "suffix": "" |
| }, |
| { |
| "first": "Kishlay", |
| "middle": [], |
| "last": "Jha", |
| "suffix": "" |
| }, |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery Data Mining, KDD '18", |
| "volume": "", |
| "issue": "", |
| "pages": "849--857", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3219819.3219903" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yaqing Wang, Fenglong Ma, Zhiwei Jin, Ye Yuan, Guangxu Xun, Kishlay Jha, Lu Su, and Jing Gao. 2018. Eann: Event adversarial neural networks for multi-modal fake news detection. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery Data Mining, KDD '18, page 849-857, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Length of sentences in dataset (rewritten vs. reference)" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Comparison in rewritten sentences" |
| }, |
| "TABREF0": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "text": "Guidelines for rewriting news titles." |
| }, |
| "TABREF1": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>Type</td><td>Translation</td><td>Arabic</td></tr><tr><td>Reference</td><td>Wall Street records largest losses in 6 weeks</td><td/></tr><tr><td>Paraphrase</td><td>Death of a journalist in mysterious circumstances after he reported</td><td/></tr><tr><td/><td>on Russian Mercenaries in Syria</td><td/></tr><tr><td colspan=\"2\">Contradiction Death of a journalist after battling with illness</td><td/></tr><tr><td>Reference</td><td>5.5 Billion withdrawn from emerging markets by investors in one week</td><td/></tr><tr><td>Paraphrase</td><td>Nearly 6 Billion withdrawn in a week from emerging markets</td><td/></tr><tr><td>Contradiction</td><td>Almost a million in withdrawals from emerging markets</td><td/></tr></table>", |
| "text": "Paraphrase Losses in Wall Street are the highest in 6 weeks Contradiction Profits in Wall Street in the last six weeks Reference Death of a journalist who reported on Russian Mercenaries in Syria in mysterious circumstances" |
| }, |
| "TABREF2": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "text": "Examples of modifications by annotators. Green highlights a change in line with reference. Red highlights a conflicting part of the sentence with the reference sentence." |
| }, |
| "TABREF4": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "text": "Statistics for the annotation results. The author's label is the label obtained from the worker who rewrote the news title. Majority label is the consensus of 75% or higher of the annotators." |
| }, |
| "TABREF5": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "text": "Class distribution for claim verification. (#: total number of examples. %: percentage of all data)" |
| }, |
| "TABREF7": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>Recurrent Perspective Matching: Our first</td></tr><tr><td>baseline is a simple RNN model that uses Long</td></tr><tr><td>Short Term Memory (LSTM)</td></tr></table>", |
| "text": "Class distribution for stance prediction. (#: total number of examples. %: percentage of all data)" |
| }, |
| "TABREF9": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td>Task</td><td colspan=\"2\">Prec. Rec.</td><td>F1</td></tr><tr><td>Claim Verification</td><td/><td/><td/></tr><tr><td>Fake</td><td>51</td><td>55</td><td>53</td></tr><tr><td>Not Fake</td><td>77</td><td>75</td><td>76</td></tr><tr><td>Macro Avg.</td><td>64.1</td><td colspan=\"2\">64.6 64.3</td></tr><tr><td>Stance Detection</td><td/><td/><td/></tr><tr><td>Agree</td><td>65</td><td>63</td><td>64</td></tr><tr><td>Disagree</td><td>80</td><td>81</td><td>80</td></tr><tr><td>Other</td><td>86</td><td>86</td><td>86</td></tr><tr><td>Macro Avg.</td><td>76.8</td><td colspan=\"2\">76.6 76.7</td></tr></table>", |
| "text": "9 ." |
| }, |
| "TABREF11": { |
| "html": null, |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>", |
| "text": "Examples of claim verification task predictions using fine-tuned BERT highlighting the model's invariant labels for similar sentences with different meanings." |
| } |
| } |
| } |
| } |