| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:47:23.275239Z" |
| }, |
| "title": "A Dashboard for Mitigating the COVID-19 Misinfodemic", |
| "authors": [ |
| { |
| "first": "Zhengyuan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Texas", |
| "location": { |
| "settlement": "Arlington" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Meng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Massachusetts Institute of Technology", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Josue", |
| "middle": [], |
| "last": "Caraballo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Texas", |
| "location": { |
| "settlement": "Arlington" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Israa", |
| "middle": [], |
| "last": "Jaradat", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Texas", |
| "location": { |
| "settlement": "Arlington" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Texas", |
| "location": { |
| "settlement": "Arlington" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Zeyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Texas", |
| "location": { |
| "settlement": "Arlington" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Farahnaz", |
| "middle": [], |
| "last": "Akrami", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Texas", |
| "location": { |
| "settlement": "Arlington" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Haojin", |
| "middle": [], |
| "last": "Liao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Texas", |
| "location": { |
| "settlement": "Arlington" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Fatma", |
| "middle": [], |
| "last": "Arslan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Texas", |
| "location": { |
| "settlement": "Arlington" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Damian", |
| "middle": [], |
| "last": "Jimenez", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Texas", |
| "location": { |
| "settlement": "Arlington" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Mohammed", |
| "middle": [ |
| "Samiul" |
| ], |
| "last": "Saeef", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Texas", |
| "location": { |
| "settlement": "Arlington" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Paras", |
| "middle": [], |
| "last": "Pathak", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Texas", |
| "location": { |
| "settlement": "Arlington" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Chengkai", |
| "middle": [], |
| "last": "Li", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The University of Texas", |
| "location": { |
| "settlement": "Arlington" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes the current milestones achieved in our ongoing project that aims to understand the surveillance of, impact of, and effective interventions against the COVID-19 misinfodemic on Twitter. Specifically, it introduces a public dashboard which, in addition to displaying case counts in an interactive map and a navigational panel, also provides some unique features not found in other places. Particularly, the dashboard uses a curated catalog of COVID-19 related facts and debunks of misinformation, and it displays the most prevalent information from the catalog among Twitter users in user-selected U.S. geographic regions. The paper explains how to use BERT-based models to match tweets with the facts and misinformation and to detect their stance towards such information. The paper also discusses the results of preliminary experiments on analyzing the spatiotemporal spread of misinformation.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes the current milestones achieved in our ongoing project that aims to understand the surveillance of, impact of, and effective interventions against the COVID-19 misinfodemic on Twitter. Specifically, it introduces a public dashboard which, in addition to displaying case counts in an interactive map and a navigational panel, also provides some unique features not found in other places. Particularly, the dashboard uses a curated catalog of COVID-19 related facts and debunks of misinformation, and it displays the most prevalent information from the catalog among Twitter users in user-selected U.S. geographic regions. The paper explains how to use BERT-based models to match tweets with the facts and misinformation and to detect their stance towards such information. The paper also discusses the results of preliminary experiments on analyzing the spatiotemporal spread of misinformation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Alongside the COVID-19 pandemic, there is a raging global misinfodemic (Mian and Khan, 2020; Roozenbeek et al., 2020) just as deadly. As fear grows, false information related to the pandemic goes viral on social media and threatens to affect an overwhelmed population. Such misinformation misleads the public on how the virus is transmitted, how authorities and people are responding to the pandemic, as well as its symptoms, treatments, and so on. This onslaught exacerbates the vicious impact of the virus, as the misinformation drowns out credible information, interferes with measures to contain the outbreak, depletes resources needed by those at risk, and overloads the health care system. Although health misinformation is not new (Oyeyemi et al., 2014) , such a dangerous interplay between a pandemic and a misinfodemic is unprecedented. It calls for studying not only the outbreak but also its related misinformation; the fight on these two fronts must go hand-in-hand.", |
| "cite_spans": [ |
| { |
| "start": 71, |
| "end": 92, |
| "text": "(Mian and Khan, 2020;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 93, |
| "end": 117, |
| "text": "Roozenbeek et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 738, |
| "end": 760, |
| "text": "(Oyeyemi et al., 2014)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This demo paper describes the current milestones achieved in our ongoing project that aims to understand the surveillance of, impact of, and effective interventions against the COVID-19 misinfodemic. 1) For surveillance, we seek to discover the patterns by which different types of COVID-19 misinformation spread. 2) To understand the impact of misinformation, we aim to compare the spreading of the SARS-CoV-2 virus and misinformation and derive their correlations. 3) To understand what types of interventions are effective in containing misinformation, we will contrast the spreading of misinformation before and after debunking efforts. 4) To understand whether the outcomes related to 1), 2) and 3) differ by geographical locations and demographic groups, we will study the variability of misinformation and debunking efforts across geographical and demographic groups.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "While we continue to pursue these directions, we have built an online dashboard at https:// idir.uta.edu/covid-19/ to directly benefit the public. A screencast video of the dashboard is at bit.ly/3c6v5xf. The dashboard provides a map, a navigation panel, and timeline charts for looking up numbers of cases, deaths, and recoveries, similar to a number of COVID-19 tracking dashboards. 123 However, our dashboard also provides several features not found in other places. 1) It displays the most prevalent factual information among Twitter users in any user-selected U.S. geographic region. 2) The \"factual information\" comes from a catalog that we manually curated. It includes statements from authoritative organizations, verdicts, debunks, and explanations of (potentially false) factual claims from fact-checking websites, and FAQs from credible sources. The catalog's entries are further organized into a taxonomy. For simplicity, we refer to it as the catalog and taxonomy of COVID-19 facts or just facts in ensuing discussion. 3) The dashboard displays COVID-19 related tweets from local authorities of user-selected geographic regions. 4) It embeds a chatbot built specifically for COVID-19 related questions. 5) It shows casestatistics from several popular sources which sometimes differ.", |
| "cite_spans": [ |
| { |
| "start": 385, |
| "end": 388, |
| "text": "123", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The codebase of the dashboard's frontend, backend, and data collection tools are opensourced at https://github.com/idirlab/covid19. All collected data are at https://github.com/ idirlab/covid19data. Particularly, the catalog and taxonomy of facts are also available through a SPARQL endpoint at https://cokn.org/ deliverables/7-covid19-kg/ and the corresponding RDF dataset can be requested there.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "What is particularly worth noting about the underlying implementation of the dashboard is the adaptation of state-of-the-art textual semantic similarity and stance detection models. Tweets are first passed through a claim-matching model, which selects the tweets that semantically match the facts in our catalog. Then, the stance detection model determines whether the tweets agree with, disagree with, or merely discuss these facts. This enables us to pinpoint pieces of misinformation (i.e., tweets that disagree with known facts) and analyze their spread.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A few studies analyzed and quantified the spread of COVID-19 misinformation on Twitter (Kouzy et al., 2020; Memon and Carley, 2020; Al-Rakhami and Al-Amri, 2020) and other social media platforms (Brennen et al., 2020) . However, these studies conducted mostly manual inspection of small datasets, while our system automatically sifts through millions of tweets and matches tweets with our catalog of facts. Figure 1 shows the dashboard's user interface, with its components highlighted.", |
| "cite_spans": [ |
| { |
| "start": 87, |
| "end": 107, |
| "text": "(Kouzy et al., 2020;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 108, |
| "end": 131, |
| "text": "Memon and Carley, 2020;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 132, |
| "end": 132, |
| "text": "", |
| "ref_id": null |
| }, |
| { |
| "start": 196, |
| "end": 218, |
| "text": "(Brennen et al., 2020)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 408, |
| "end": 416, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Geographic region selection panel (Component 1). A user can select a specific country, a U.S. state, or a U.S. county by using this panel or the interactive map (Component 2). Once a region is selected, the panel shows the counts of confirmed cases, deaths and recovered cases for the region in collapsed or expanded modes. When a region is expanded by the user, counts from all available sources are displayed; on the other hand, if it is collapsed, only counts from the default (which the user can customize) data source are displayed. These sources do not provide identical numbers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Dashboard", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Interactive map (Component 2). On each country and each U.S. state, a red circle is displayed, with an area size proportional to its number of confirmed cases. When a state is selected, the circle is replaced with its counties' polygons in different shades of red, proportional to the counties' confirmed cases.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Dashboard", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Timeline chart (Component 3). It plots the counts of the selected region over time and can be viewed in linear or logarithmic scale.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Dashboard", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Panel of facts (Component 4). For the selected region, this panel displays facts from our catalog, and the distribution of people discussing, agreeing, or disagreeing with them on Twitter. A large number of people refuting these facts would indicate wide spread of misinformation. To avoid repeating misconceptions, the dashboard displays facts from authoritative sources only.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Dashboard", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Government tweets (Component 5). It displays COVID-19 related tweets in the past seven days from officials of the user-selected geographic region. These tweets are from a curated list of 3,744 Twitter handles that belong to governments, officials, and public health authorities at U.S. federal and state levels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Dashboard", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Chatbot (Component 6). This component embeds the Jennifer Chatbot built by the New Voices project of the National Academies of Sciences, Engineering and Medicine (Li et al., 2020) , which was built specifically for answering COVID-19 related questions. As part of the collaborative team behind this chatbot, we are expanding it using the aforementioned catalog.", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 179, |
| "text": "(Li et al., 2020)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Dashboard", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The dashboard uses the following three datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "1) Counts of confirmed cases, deaths, and recoveries. We collected these counts daily from Johns Hopkins University, 4 the New York Times (NYT) 5 and the COVID Tracking Project. 6 These sources provide statistics at various geographic granularities (country, state, county).", |
| "cite_spans": [ |
| { |
| "start": 178, |
| "end": 179, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "2) Tweets. We are using a collection of approximately 250 million COVID-19 related tweets from January 1st, 2020 to May 16th, 2020, obtained from (Banda et al., 2020 ) (version 10.0). We removed tweets and Twitter handles (and their tweets) that do not have location information, resulting in 34.6 million remaining tweets. We then randomly selected 10.4% of each month's tweets, leading to 3.6 million remaining tweets. We used the OpenStreetMap (Quinion et al., 2020) API to map the locations of Twitter accounts from user-entered free text to U.S. county names. We used the ArcGIS API 7 to map the locations of tweets from longitude/latitude to counties.", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 165, |
| "text": "(Banda et al., 2020", |
| "ref_id": null |
| }, |
| { |
| "start": 447, |
| "end": 469, |
| "text": "(Quinion et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "3) A catalog and a taxonomy of COVID-19 related facts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The manually curated catalog currently has 9,512 entries from 21 credible websites, including statements from authoritative organizations (e.g., WHO, CDC), verdicts, debunks, and explanations of factual claims (of which the truthfulness varies) from fact-checking websites (e.g., the IFCN CoronaVirusFacts Alliance Database, 8 PolitiFact), and FAQs both from credible sources (e.g., FDA, NYT) and a dataset curated by (Wei et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 418, |
| "end": 436, |
| "text": "(Wei et al., 2020)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We organized the entries in this catalog into a taxonomy of categories, by integrating and consolidating the available categories from a number of source websites, placing entries from other websites into these categories or creating new categories, and organizing the categories into a hierarchical structure based on their inclusion relationship. We also stored the catalog and the taxonomy 7 https://developers.arcgis.com/python/guide/ reverse-geocoding/ 8 https://www.poynter.org/ ifcn-covid-19-misinformation/ 9 Not every level-1 or level-2 category has subcategories. Coronavirus cannot be passed by dogs or cats but they can test positive.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "There has been no evidence that pets such as dogs or cats can spread the coronavirus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Animals, Spreading", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "More people die from the flu in the U.S. in 1 day than have died of the Coronavirus across the world ever.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "agree", |
| "sec_num": "0.817" |
| }, |
| { |
| "text": "Right now, it appears that COVID-19, the disease caused by the new coronavirus, causes more cases of severe disease and more deaths than the seasonal flu.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "agree", |
| "sec_num": "0.817" |
| }, |
| { |
| "text": "Cases 0.816 disagree as an RDF dataset, in which each entry of the catalog is identified by a unique resource identifier (URI). It is connected to a mediator node that represents the multiary relation associated with the entry. For example, Figure 3 shows a question about COVID-19, its answer and source, and the lowest-level taxonomy nodes that the entry belongs to, all connected to a mediator node. This RDF dataset, with 12 relations and 78,495 triples, is published in four popular RDF formats-N-Triples, Turtle, N3, and RDF/XML. Furthermore, we have set up a SPARQL query endpoint at https://cokn.org/deliverables/7-covid19-kg/ using OpenLink Virtuoso. 10", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 241, |
| "end": 249, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "agree", |
| "sec_num": "0.817" |
| }, |
| { |
| "text": "Given the catalog of COVID-19 related facts F and the tweets T , we first employ claim-matching to locate a set of tweets t f \u2208 T that discuss each fact f \u2208 F . Next, we apply stance detection on pairs p f = {(t, f ) | t \u2208 t f } to determine whether each t is agreeing with, disagreeing with, or neutrally discussing f . Finally, aggregate results are displayed on Component 4 of the dashboard to summarize the public's view on each fact. Figure 2 depicts the overall claim-matching 10 https://virtuoso.openlinksw.com/ and stance detection pipeline. For both tasks, we employed Bidirectional Encoder Representations from Transformers (BERT) (Devlin et al., 2019) . Table 1 shows some example results of claim matching and stance detection. Claim matching. We generate sentence embeddings s t and s f , for t and f respectively, using the mean-tokens pooling strategy in Sentence-BERT (Reimers and Gurevych, 2019) . The relevance between t and f is then calculated as:", |
| "cite_spans": [ |
| { |
| "start": 641, |
| "end": 662, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 884, |
| "end": 912, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 439, |
| "end": 447, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 665, |
| "end": 672, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Matching Tweets with Facts and Stance Detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "R t,f = s t \u2022 s f s t \u00d7 s f", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Matching Tweets with Facts and Stance Detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Given R t,f , we model claim-matching as a ranking task on the relevance between facts and tweets. Thus, the output of this stage is t f = {t \u2208 T | R t,f \u2265 \u03b8} for each fact f \u2208 F , where the threshold \u03b8 is 0.8 in our implementation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Tweets with Facts and Stance Detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Stance detection. Given t f , we detect the stance that each tweet t takes toward fact f . There could be 3 classes of stance: agree (t supports f ), discuss (t neutrally discusses f ), and disagree (t refutes f ). For this task, we obtained a pre-trained BERT Base model 11 and trained it on the Fake-News Challenge Stage 1 (FNC-1) dataset. 12 We denote this model Stance-BERT.", |
| "cite_spans": [ |
| { |
| "start": 342, |
| "end": 344, |
| "text": "12", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Tweets with Facts and Stance Detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We first pre-process p f to conform with BERT input conventions by 1) applying W (\u2022), the Word-Piece tokenizer (Wu et al., 2016) , 2) applying C(a 1 , a 2 , . . . , a n ), a function that concatenates arguments in appearance order, and 3) inserting specialized BERT tokens [CLS] and [SEP] . Since BERT has a maximum input length of M = 512 and some facts can exceed this limit, we propose a sliding-window approach inspired by (Devlin et al., 2019) to form input x f :", |
| "cite_spans": [ |
| { |
| "start": 111, |
| "end": 128, |
| "text": "(Wu et al., 2016)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 273, |
| "end": 278, |
| "text": "[CLS]", |
| "ref_id": null |
| }, |
| { |
| "start": 283, |
| "end": 288, |
| "text": "[SEP]", |
| "ref_id": null |
| }, |
| { |
| "start": 427, |
| "end": 448, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Tweets with Facts and Stance Detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "x f = {C([CLS], W (t), [SEP], W (f ) [i * S,i * S+L] , [SEP]) | 0 \u2264 i < |W (f )| S } | (t, f ) \u2208 p f (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Tweets with Facts and Stance Detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where S defines the distance between successive windows and L = M \u2212 (|W (t)| + 3) is the sequence length available for each fact. If i * S + L is an out-of-bounds index for W (f ), the extra space is padded using null tokens. Each element w \u2208 x f contains a set of windows representing a tweet-fact pair. Each window w i \u2208 w is passed into Stance-BERT, which returns probability distributions (each containing 3 entries, 1 for each class)\u0177 f w i for each window. Stance aggregation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Tweets with Facts and Stance Detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For each fact f , the stance detection results are accumulated to generate scores S f C , where C \u2208 {agree, discuss, disagree} that denote the percentage of tweets that agree, discuss, and disagree with f : 13", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Tweets with Facts and Stance Detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "S f C = w\u2208x f argmax \u03c3({\u0177 f w i | wi \u2208 w}) = C |x f |", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Matching Tweets with Facts and Stance Detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where \u03c3(\u2022) is a function that averages the model's output scores for each class across all windows of tweet-fact pair. The 3 final stance scores are passed to the dashboard's panel of facts (Component 4) for display.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Matching Tweets with Facts and Stance Detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To evaluate the performance of the claim matching component, we first created a Cartesian product of the 3.6 million tweets with 500 \"facts\" from the catalog (see Section 3 for description of datasets), followed by randomly selecting 800 tweet-fact pairs from the Cartesian product. To retain a balanced dataset, 400 pairs were drawn from those pairs scored over 0.8 by the claim matching component, and another 400 pairs were drawn from the rest. To obtain the ground-truth labels on these 800 pairs, we used three human annotators. 183 pairs were labeled \"matched\" (i.e., the tweet and the fact have matching topics) and 617 pairs \"unmatched\". Table 2 shows the claim matching component's performance on these 800 pairs, measured by precision@k and nDCG@k(normalized Discounted Cumulative Gain at k). Both precision@k and nDCG@k are metrics of ranking widely used in classification problem, the order of top k prediction is considered in nDCG@k but not in precision@k.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 646, |
| "end": 653, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance of Claim Matching", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Metric @5 @10 @20 @50 @100 Table 3 shows Stance-BERT's performance on the FNC-1 competition test dataset and our tweetfact pairs, using F1 scores for all 3 classes as well as macro-F1. On FNC-1, we tested 2 variations of the same model: Stance-BERT window , which uses the sliding-window approach (Section 4), and Stance-BERT trunc , a model that truncates/discards all inputs after M tokens but is otherwise identical to Stance-BERT window . Both variants significantly outperformed the method used in (Xu et al., 2018) , one of the recent competitive methods on FNC-1.", |
| "cite_spans": [ |
| { |
| "start": 503, |
| "end": 520, |
| "text": "(Xu et al., 2018)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 27, |
| "end": 34, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance of Claim Matching", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Note that FNC-1 also includes a fourth \"unrelated\" class that we discarded, since we already have a claim-matching component. Because other recent stance detection methods Fang et al., 2019) only reported macro-F1 scores calculated using all four classes including \"unrelated\", we cannot report a direct comparison with their methods. However, we argue that our macro-F1 of 0.65 remains highly competitive. The model of (Xu et al., 2018 ) achieved a 0.98 F1 score on \"unrelated\", which suggests that \"unrelated\" (i.e., separating related and unrelated pairs) is far easier than the other 3 classes (i.e., discerning between different classes of related pairs). Given that Stance-BERT significantly outperformed (Xu et al., 2018) on all other 3 classes, it is plausible that Stance-BERT will remain a top performer under all four classes.", |
| "cite_spans": [ |
| { |
| "start": 172, |
| "end": 190, |
| "text": "Fang et al., 2019)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 420, |
| "end": 436, |
| "text": "(Xu et al., 2018", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 711, |
| "end": 728, |
| "text": "(Xu et al., 2018)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance of Claim Matching", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "To evaluate Stance-BERT's performance on our tweet-fact pairs, the three human annotators produced ground-truth labels on another set of 481 randomly selected tweet-fact pairs. 200 pairs are labeled as \"matched\". These 200 pairs are further labeled as \"agree\"/\"discuss\"/\"disagree\", in a distribution of 110/73/17 tweet-fact pairs. Ultimately, we discovered that Stance-BERT performs remarkably well on \"agree\" and \"disagree\" classes but falters on \"discuss\". Figure 4 is the cumulative timeline for the top-6 countries with the most COVID-19 misinformation tweets in the dataset. \"Misinformation tweets\" refer to tweets that go against known facts as judged by our stance detection model.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 459, |
| "end": 467, |
| "text": "Figure 4", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance of Claim Matching", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We also conducted a study on the correla-tion between misinformation tweet counts and COVID-19 case counts. We looked at the percentage of cases relative to a country's population size, and the percentage of misinformation tweets relative to the total number of tweets from a country. The Pearson correlation coefficients between them are in Table 4 . We find that the number of misinformation tweets most positively correlates with the number of confirmed cases. In contrast, its correlation with the number of recovered cases is weaker. Finally, we manually categorized the misinformation tweets based on the taxonomy (Section 3). Table 5 lists the five most frequent categories of misinformation tweets. These five categories make up 49.9% of all misinformation tweets, with the other 50.1% being spread out over the other 33 categories. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 342, |
| "end": 349, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| }, |
| { |
| "start": 633, |
| "end": 640, |
| "text": "Table 5", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Misinformation Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "This paper introduces an information dashboard constructed in the context of our ongoing project regarding the COVID-19 misinfodemic. Going forward, we will focus on developing the dashboard at scale, including more comprehensive tweet collection and catalog discovery and collection. We will also introduce more functions into the dashboard that are aligned with our project goal of studying the surveillance of, impact of, and intervention on COVID-19 misinfodemic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://www.covid19-trials.com/ 2 https://coronavirus.jhu.edu/map.html 3 https://www.cdc.gov/covid-data-tracker/index.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/CSSEGISandData/COVID-19 5 https://github.com/nytimes/covid-19-data 6 https://covidtracking.com/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/google-research/bert 12 http://www.fakenewschallenge.org/13 We use the Iverson bracket: [P ] = 1 if P is true, else 0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Lies kill, facts save: Detecting covid-19 misinformation in twitter", |
| "authors": [ |
| { |
| "first": "Atif M Al-Amri", |
| "middle": [], |
| "last": "Mabrook S Al-Rakhami", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "IEEE Access", |
| "volume": "8", |
| "issue": "", |
| "pages": "155961--155970", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mabrook S Al-Rakhami and Atif M Al-Amri. 2020. Lies kill, facts save: Detecting covid-19 misin- formation in twitter. IEEE Access, 8:155961- 155970.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Yuning Ding, Katya Artemova, Elena Tutubalin, and Gerardo Chowell. 2020. A large-scale COVID-19 twitter chatter dataset for open scientific research -an international collaboration", |
| "authors": [ |
| { |
| "first": "Juan", |
| "middle": [ |
| "M" |
| ], |
| "last": "Banda", |
| "suffix": "" |
| }, |
| { |
| "first": "Ramya", |
| "middle": [], |
| "last": "Tekumalla", |
| "suffix": "" |
| }, |
| { |
| "first": "Guanyu", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingyuan", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tuo", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.5281/zenodo.3723939" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juan M. Banda, Ramya Tekumalla, Guanyu Wang, Jingyuan Yu, Tuo Liu, Yuning Ding, Katya Arte- mova, Elena Tutubalin, and Gerardo Chowell. 2020. A large-scale COVID-19 twitter chatter dataset for open scientific research -an interna- tional collaboration.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Types, sources, and claims of COVID-19 misinformation", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Scott Brennen", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Felix", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [ |
| "N" |
| ], |
| "last": "Simon", |
| "suffix": "" |
| }, |
| { |
| "first": "Rasmus Kleis", |
| "middle": [], |
| "last": "Howard", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nielsen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J Scott Brennen, Felix M Simon, Philip N Howard, and Rasmus Kleis Nielsen. 2020. Types, sources, and claims of COVID-19 misinformation. Reuters Institute.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language un- derstanding. In NAACL, pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Neural multi-task learning for stance prediction", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| }, |
| { |
| "first": "Moin", |
| "middle": [], |
| "last": "Nadeem", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitra", |
| "middle": [], |
| "last": "Mohtarami", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP Workshop on Fact Extraction and Verification", |
| "volume": "", |
| "issue": "", |
| "pages": "13--19", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Fang, Moin Nadeem, Mitra Mohtarami, and James Glass. 2019. Neural multi-task learning for stance prediction. In EMNLP Workshop on Fact Extraction and Verification, pages 13-19.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Coronavirus goes viral: quantifying the COVID-19 misinformation epidemic on twitter", |
| "authors": [ |
| { |
| "first": "Ramez", |
| "middle": [], |
| "last": "Kouzy", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Abi Jaoude", |
| "suffix": "" |
| }, |
| { |
| "first": "Afif", |
| "middle": [], |
| "last": "Kraitem", |
| "suffix": "" |
| }, |
| { |
| "first": "Molly", |
| "middle": [ |
| "B" |
| ], |
| "last": "El Alam", |
| "suffix": "" |
| }, |
| { |
| "first": "Basil", |
| "middle": [], |
| "last": "Karam", |
| "suffix": "" |
| }, |
| { |
| "first": "Elio", |
| "middle": [], |
| "last": "Adib", |
| "suffix": "" |
| }, |
| { |
| "first": "Jabra", |
| "middle": [], |
| "last": "Zarka", |
| "suffix": "" |
| }, |
| { |
| "first": "Cindy", |
| "middle": [], |
| "last": "Traboulsi", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Elie", |
| "suffix": "" |
| }, |
| { |
| "first": "Khalil", |
| "middle": [], |
| "last": "Akl", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Baddour", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ramez Kouzy, Joseph Abi Jaoude, Afif Kraitem, Molly B El Alam, Basil Karam, Elio Adib, Jabra Zarka, Cindy Traboulsi, Elie W Akl, and Khalil Baddour. 2020. Coronavirus goes viral: quanti- fying the COVID-19 misinformation epidemic on twitter. Cureus, 12(3).", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Jennifer for COVID-19: An nlp-powered chatbot built for the people and by the people to combat misinformation", |
| "authors": [ |
| { |
| "first": "Yunyao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Tyrone", |
| "middle": [], |
| "last": "Grandison", |
| "suffix": "" |
| }, |
| { |
| "first": "Patricia", |
| "middle": [], |
| "last": "Silveyra", |
| "suffix": "" |
| }, |
| { |
| "first": "Ali", |
| "middle": [], |
| "last": "Douraghy", |
| "suffix": "" |
| }, |
| { |
| "first": "Xinyu", |
| "middle": [], |
| "last": "Guan", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Kieselbach", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengkai", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Haiqi", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ACL Workshop on Natural Language Processing for COVID-19", |
| "volume": "", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yunyao Li, Tyrone Grandison, Patricia Silveyra, Ali Douraghy, Xinyu Guan, Thomas Kieselbach, Chengkai Li, and Haiqi Zhang. 2020. Jennifer for COVID-19: An nlp-powered chatbot built for the people and by the people to combat misinforma- tion. In ACL Workshop on Natural Language Pro- cessing for COVID-19, pages 1-9.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Characterizing covid-19 misinformation communities using a novel twitter dataset", |
| "authors": [ |
| { |
| "first": "Ali", |
| "middle": [], |
| "last": "Shahan", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathleen", |
| "middle": [ |
| "M" |
| ], |
| "last": "Memon", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Carley", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2008.00791" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shahan Ali Memon and Kathleen M Carley. 2020. Characterizing covid-19 misinformation commu- nities using a novel twitter dataset. arXiv preprint arXiv:2008.00791.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Coronavirus: the spread of misinformation", |
| "authors": [ |
| { |
| "first": "Areeb", |
| "middle": [], |
| "last": "Mian", |
| "suffix": "" |
| }, |
| { |
| "first": "Shujhat", |
| "middle": [], |
| "last": "Khan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "BMC medicine", |
| "volume": "18", |
| "issue": "1", |
| "pages": "1--2", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Areeb Mian and Shujhat Khan. 2020. Coronavirus: the spread of misinformation. BMC medicine, 18(1):1-2.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Automatic stance detection using end-toend memory networks", |
| "authors": [ |
| { |
| "first": "Mitra", |
| "middle": [], |
| "last": "Mohtarami", |
| "suffix": "" |
| }, |
| { |
| "first": "Ramy", |
| "middle": [], |
| "last": "Baly", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| }, |
| { |
| "first": "Preslav", |
| "middle": [], |
| "last": "Nakov", |
| "suffix": "" |
| }, |
| { |
| "first": "Llu\u00eds", |
| "middle": [], |
| "last": "M\u00e0rquez", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Moschitti", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "767--776", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mitra Mohtarami, Ramy Baly, James Glass, Preslav Nakov, Llu\u00eds M\u00e0rquez, and Alessandro Moschitti. 2018. Automatic stance detection using end-to- end memory networks. In NAACL, pages 767- 776.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Ebola, twitter, and misinformation: a dangerous combination", |
| "authors": [ |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Sunday Oluwafemi Oyeyemi", |
| "suffix": "" |
| }, |
| { |
| "first": "Rolf", |
| "middle": [], |
| "last": "Gabarron", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wynn", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "BMJ", |
| "volume": "349", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sunday Oluwafemi Oyeyemi, Elia Gabarron, and Rolf Wynn. 2014. Ebola, twitter, and misinforma- tion: a dangerous combination?. BMJ, 349:g6178.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Metten. 2020. Nominatim: A search engine for openstreetmap data", |
| "authors": [ |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Quinion", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [ |
| "T" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian Quinion, Sarah Hoffmann, and Marc T. Met- ten. 2020. Nominatim: A search engine for open- streetmap data.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Sentencebert: Sentence embeddings using siamese bertnetworks", |
| "authors": [ |
| { |
| "first": "Nils", |
| "middle": [], |
| "last": "Reimers", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP-IJCNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "3973--3983", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- bert: Sentence embeddings using siamese bert- networks. In EMNLP-IJCNLP, pages 3973-3983.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Anne Marthe Van Der Bles, and Sander Van Der Linden. 2020. Susceptibility to misinformation about covid-19 around the world", |
| "authors": [ |
| { |
| "first": "Jon", |
| "middle": [], |
| "last": "Roozenbeek", |
| "suffix": "" |
| }, |
| { |
| "first": "Claudia", |
| "middle": [ |
| "R" |
| ], |
| "last": "Schneider", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarah", |
| "middle": [], |
| "last": "Dryhurst", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Kerr", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [ |
| "Lj" |
| ], |
| "last": "Freeman", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabriel", |
| "middle": [], |
| "last": "Recchia", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Royal Society open science", |
| "volume": "7", |
| "issue": "10", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jon Roozenbeek, Claudia R Schneider, Sarah Dry- hurst, John Kerr, Alexandra LJ Freeman, Gabriel Recchia, Anne Marthe Van Der Bles, and Sander Van Der Linden. 2020. Susceptibility to misinfor- mation about covid-19 around the world. Royal Society open science, 7(10):201199.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "What are people asking about covid-19? a question classification dataset", |
| "authors": [ |
| { |
| "first": "Jerry", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengyu", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Soroush", |
| "middle": [], |
| "last": "Vosoughi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2005.12522" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jerry Wei, Chengyu Huang, Soroush Vosoughi, and Jason Wei. 2020. What are people asking about covid-19? a question classification dataset. arXiv preprint arXiv:2005.12522.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Google's neural machine translation system: Bridging the gap between human and machine translation", |
| "authors": [ |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Norouzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Qin", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Klaus", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.08144" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. 2016. Google's neural machine translation system: Bridging the gap between hu- man and machine translation. arXiv preprint arXiv:1609.08144.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Adversarial domain adaptation for stance detection", |
| "authors": [ |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitra", |
| "middle": [], |
| "last": "Mohtarami", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian Xu, Mitra Mohtarami, and James Glass. 2018. Adversarial domain adaptation for stance detec- tion. In NeurIPS.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "The user interface of the dashboard for mitigating the COVID-19 misinfodemic", |
| "num": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Matching tweets with facts and stance detection", |
| "num": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "An entry of the catalog stored in RDF", |
| "num": null |
| }, |
| "FIGREF4": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "6 countries with the most misinformation tweets", |
| "num": null |
| }, |
| "TABREF1": { |
| "text": "", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF3": { |
| "text": "", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td colspan=\"5\">: Performance of claim matching on the 800 tweet-</td></tr><tr><td>fact pairs</td><td/><td/><td/><td/></tr><tr><td colspan=\"4\">5.2 Performance of Stance-BERT</td><td/></tr><tr><td>Model</td><td colspan=\"4\">F1 score agree discuss disagree macro</td></tr><tr><td>Stance-BERT window (FNC-1)</td><td>0.65</td><td>0.45</td><td>0.84</td><td>0.65</td></tr><tr><td>Stance-BERT trunc (FNC-1)</td><td>0.66</td><td>0.41</td><td>0.82</td><td>0.63</td></tr><tr><td>(Xu et al., 2018)(FNC-1)</td><td>0.55</td><td>0.15</td><td>0.73</td><td>0.48</td></tr><tr><td colspan=\"2\">Stance-BERT window (COVID-19) 0.75</td><td>0.03</td><td>0.58</td><td>0.45</td></tr></table>" |
| }, |
| "TABREF4": { |
| "text": "", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>: Performance of Stance-BERT on the FNC-1 test</td></tr><tr><td>dataset and 200 matched tweet-fact pairs</td></tr></table>" |
| }, |
| "TABREF6": { |
| "text": "Correlation between the percentage of confirmed/deceased/recovered cases and the percentage of misinformation tweets. The number of recovered cases in U.K. after April 13th is missing from the data source.", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF8": { |
| "text": "Most frequent categories of misinformation tweets", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |