ACL-OCL / Base_JSON /prefixN /json /nlpcovid19 /2020.nlpcovid19-2.31.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2020",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T14:38:54.032009Z"
},
"title": "Collecting Verified COVID-19 Question Answer Pairs",
"authors": [
{
"first": "Adam",
"middle": [],
"last": "Poliak",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Johns Hopkins University",
"location": {}
},
"email": ""
},
{
"first": "Max",
"middle": [],
"last": "Fleming",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Kenton",
"middle": [],
"last": "Cash Costello",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Shivani",
"middle": [],
"last": "Murray",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Darius",
"middle": [],
"last": "Pandya",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Milind",
"middle": [],
"last": "Irani",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Udit",
"middle": [],
"last": "Agarwal",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Shuo",
"middle": [],
"last": "Sharma",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Nicola",
"middle": [],
"last": "Sun",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Mahsa",
"middle": [],
"last": "Ivanov",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Lingxi",
"middle": [],
"last": "Yarmohammadi",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Kaushik",
"middle": [],
"last": "Shang",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Seolhwa",
"middle": [],
"last": "Srinivasan",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Xu",
"middle": [],
"last": "Lee",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Smisha",
"middle": [],
"last": "Han",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Jo\u00e3o",
"middle": [],
"last": "Agarwal",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "",
"middle": [],
"last": "Sedoc",
"suffix": "",
"affiliation": {},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "We release a dataset of over 2,100 COVID-19 related Frequently asked Question-Answer pairs scraped from over 40 trusted websites. We include an additional 24, 000 questions pulled from online sources that have been aligned by experts with existing answered questions from our dataset. This paper describes our efforts in collecting the dataset and summarizes the resulting data. Our dataset is automatically updated daily and available at https://github.com/JHU-COVID-QA/ scraping-qas. So far, this data has been used to develop a chatbot providing users information about COVID-19. We encourage others to build analytics and tools upon this dataset as well.",
"pdf_parse": {
"paper_id": "2020",
"_pdf_hash": "",
"abstract": [
{
"text": "We release a dataset of over 2,100 COVID-19 related Frequently asked Question-Answer pairs scraped from over 40 trusted websites. We include an additional 24, 000 questions pulled from online sources that have been aligned by experts with existing answered questions from our dataset. This paper describes our efforts in collecting the dataset and summarizes the resulting data. Our dataset is automatically updated daily and available at https://github.com/JHU-COVID-QA/ scraping-qas. So far, this data has been used to develop a chatbot providing users information about COVID-19. We encourage others to build analytics and tools upon this dataset as well.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "With the quick spread of COVID19, misinformation has rapidly spread. 1 Misinformation around the use of certain drugs for the prevention of Covid-19 has had fatal outcomes, and stigmatization guided by misinformation about certain communities as vectors of virus undermines the long-term welfare of our society. We are developing a natural language processing (NLP) backed-informational chatbot targeted at comprehensive COVID-19 information and misinformation. Users can interact with our chatbot on different platforms to access information about COVID-19, available care, and other topics of interest. 2 To aid in this effort, we aggregate factual information in the form of verified questions and answers to help answer frequently asked questions about the pandemic. We employ three main aggregation efforts in tandem: 1) generating high quality and accurate information from domain experts, i.e. Public Health researchers at Johns Hopkins University; 2) automatically scraping frequently asked questions and answers from online trusted sources, e.g. newspapers and government agencies; and 3) automatically ranking and manually aligning additional questions from social media with the scraped questions and answers in our dataset. This paper primarily describes our efforts to extract high quality content from trustworthy websites and domain experts. Our effort has resulted in a publicly available dataset that currently contains over 2,100 Questions and Answers from more than 40 webpages. The dataset is available at https://covid-19-infobot.org/data/. Since we are actively scraping more websites and rescrape all sites at least once a day, these numbers are updated daily. 3",
"cite_spans": [
{
"start": 605,
"end": 606,
"text": "2",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We create our publicly available dataset of over 2,100 question-answer pairs by aggregating FAQs from trusted news sources. 4 We choose websites to scrape based on three broad criteria: 1) the informativeness and trustworthiness of the website; 2) the ease of scraping frequently asked questionanswer pair from the website; and 3) the number of questions and answers on the website.",
"cite_spans": [
{
"start": 124,
"end": 125,
"text": "4",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Creating our FAQ Dataset",
"sec_num": "2"
},
{
"text": "We use a straightforward scraping process that enables undergraduate students to contribute to our efforts. We developed a python library for students to easily add scrapers to our project. As demonstrated in the example in Figure 1 , our library requires each question-answer (and metadata) to be stored as a simple dictionary. The library automatically adds this information to our set of question-answer pairs. Additionally, the library accordingly handles updating answers to questions in our dataset if a previously scraped website updates its information.",
"cite_spans": [],
"ref_spans": [
{
"start": 224,
"end": 232,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Creating our FAQ Dataset",
"sec_num": "2"
},
{
"text": "This has enabled students to efficiently join the project and contribute immediately. Further documentation is available at https://github.com/ JHU-COVID-QA/scraping-qas and we encourage others to join our efforts.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Creating our FAQ Dataset",
"sec_num": "2"
},
{
"text": "For each scraped question-answer pair, we extract relevant metadata for our chatbot and other NLP analytics. The metadata includes information about the source of each question-answer pair (we include both the source name and the URL) and the date when the question-answer was last scraped from or updated on the website. Additionally, if the information on the website is targeted for a specific geographic area, we include that in our metadata as well.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Metadata",
"sec_num": "2.1"
},
{
"text": "We leverage existing scrapers for collecting questions-answer pairs for COVID-19. 874 of our examples come from scrapers released by deepset. 5 Following deepset's lead, we open-source our scrapers as well.",
"cite_spans": [
{
"start": 142,
"end": 143,
"text": "5",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Leveraging existing scrapers",
"sec_num": "2.2"
},
{
"text": "As our understanding of COVID-19 rapidly evolves, trustworthy sources update the informa-5 https://github.com/deepset-ai/COVID-QA/ tion they release. Therefore, each day, we automatically re-run the web scrapers to find new information. This enables us to add new questionanswers or update answers to existing questions in our dataset.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Continuous scraping",
"sec_num": "2.3"
},
{
"text": "If a previously scraped question-answer is removed from a website, we remove that example from our dataset. 6 Question and answers that we removed from our dataset as still available in our history since we archive each day's dataset. In turn, the quality of our dataset is constantly evolving and improving.",
"cite_spans": [
{
"start": 108,
"end": 109,
"text": "6",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Continuous scraping",
"sec_num": "2.3"
},
{
"text": "The described effort resulted in a dataset that is evolving daily. The June 15th version contains over 2,100 questions and answers scraped from 40 websites. We list the number of question-answer pairs extracted from each source in Table 1 . Our dataset contains some examples in different langages besides for English, owing to deepset scraping websites in multiple languages. Figure 2 plots the number of question-answer pairs in each of the five languages: English, German, Polish, Italian, and Swedish. Roughly 70% of our examples are in English. As we release more data, we will include further analysis of the growing dataset.",
"cite_spans": [],
"ref_spans": [
{
"start": 231,
"end": 238,
"text": "Table 1",
"ref_id": null
},
{
"start": 377,
"end": 385,
"text": "Figure 2",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Data",
"sec_num": "3"
},
{
"text": "Websites might update or change how they store information. This is why the current version of our dataset contains just 1 example from the Delaware State Government webpage. The May 20th version of our dataset contains 22 examples from this website. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data",
"sec_num": "3"
},
{
"text": "Questions and Answers",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Manually Aligning Additional",
"sec_num": "4"
},
{
"text": "Since the internet contains many more questions that are not answered, we additionally collected questions and align them with the question-answer pairs in our dataset. We leverage information retrieval techniques to match these unanswered questions with questions in our dataset and then rely on domain experts to verify each aligned questionquestion-answer (QQA) pair. In this section, we provide details for each of these steps.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Manually Aligning Additional",
"sec_num": "4"
},
{
"text": "We downloaded 28 million tweets from the COVID-19 Twitter Dataset (Chen et al., 2020) , Qorona, 7 and CovidFaq 8 , extracted the questions from those resources, 9 sorted them by frequency, and discarded the questions that occurred less than four times. Then, we grouped semantically similar questions into 9, 200 clusters. Next, we extracted the centers of the clusters and, using a state-of-the-art sentence re-writer (Hu et al., 2019) , we generated three high quality paraphrases of each question. This resulted in a collection of over 27, 000 unanswered questions about COVID-19.",
"cite_spans": [
{
"start": 66,
"end": 85,
"text": "(Chen et al., 2020)",
"ref_id": "BIBREF0"
},
{
"start": 419,
"end": 436,
"text": "(Hu et al., 2019)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Online Question Extraction",
"sec_num": "4.1"
},
{
"text": "We worked with public health experts to align these unanswered questions with our verified question-7 https://github.com/allenai/Qorona 8 https://github.com/dialoguemd/ covidfaq 9 Corona and CovidFaq specifically contain questions. We extract questions from the Twitter dataset by determining whether a sentence from a tweet either ends with a question mark, or starts with a provided list of words (e.g., \"who\", \"when\", \"where\", etc). answer pairs (section 3). For each of these 27, 000 questions, we used a BM25 model (Robertson and Walker, 1994; Robertson et al., 1996) to determine the most similar answered questions in our dataset. 10 Following the EASL annotation protocol (Sakaguchi and Van Durme, 2018) , for each unanswered twitter question, we presented public health experts with the five most similar QA's from our dataset. Based on a formal protocol developed by a senior Public Health researcher on our team (Figure 4) , we asked the experts to determine, on a scale from 0 to 100, how relevant or similar the QA from our dataset is to the unanswered question.",
"cite_spans": [
{
"start": 520,
"end": 548,
"text": "(Robertson and Walker, 1994;",
"ref_id": "BIBREF4"
},
{
"start": 549,
"end": 572,
"text": "Robertson et al., 1996)",
"ref_id": "BIBREF3"
},
{
"start": 680,
"end": 711,
"text": "(Sakaguchi and Van Durme, 2018)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [
{
"start": 923,
"end": 933,
"text": "(Figure 4)",
"ref_id": null
}
],
"eq_spans": [],
"section": "Aligning Extracted Questions with Existing Questions and Answers",
"sec_num": "4.2"
},
{
"text": "For this annotation effort, we leveraged Turkle, open-sourced, locally hosted clone of Amazon Mechanical Turk developed by the JHU Human Language Technology Center of Excellence. 11 Figure 5 and Figure 6 illustrate our annotation interface.",
"cite_spans": [],
"ref_spans": [
{
"start": 182,
"end": 190,
"text": "Figure 5",
"ref_id": null
},
{
"start": 195,
"end": 203,
"text": "Figure 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "Aligning Extracted Questions with Existing Questions and Answers",
"sec_num": "4.2"
},
{
"text": "As part of this protocol, expert annotators could indicate whether a question was not relevant to COVID-19 or whether an existing answer was no longer correct. We removed such labeled examples from our set. This effort results in 24, 240 annotated QQAs. Figure 3 plots the distribution of labels annotated for QQAs. Over 18, 000 examples were judged to be less than 1% relevant, indicating that the majority of the questions extracted from twitter are irrelevant to the answered questions in our dataset. These additional examples can be used to further train a chatbot to answer questions about COVID-19.",
"cite_spans": [],
"ref_spans": [
{
"start": 254,
"end": 262,
"text": "Figure 3",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Aligning Extracted Questions with Existing Questions and Answers",
"sec_num": "4.2"
},
{
"text": "We have presented our growing dataset of over 2,100 question-answers that has been created by scraping over 40 websites. We also discussed other data we collected and annotated that may be beneficial to others in the community as well. Our evolving dataset is complementary to other recent COVID-19 QA datasets, e.g. Tang ",
"cite_spans": [],
"ref_spans": [
{
"start": 317,
"end": 321,
"text": "Tang",
"ref_id": null
}
],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "https://www.newsguardtech.com/ coronavirus-misinformation-tracking-center/ 2 https://covid-19-infobot.org/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "The dataset's statistics described in this paper are based on a snapshot of the data as of June 25th, 2020, corresponding with https://github. com/JHU-COVID-QA/scraping-qas/tree/ a446c00c318e02cad5188cec359b9d649d8c49334 We additionally have over 300 question-answer pairs manually created by Public Health experts. We plan on including these in our publicly available dataset at a later date.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We assume that a website will remove information about COVID-19 that is no longer accurate.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We trained the BM25 model by using the answers that previously were manually aligned by experts with the candidate questions. We then calculated scores between the terms in the input question and terms in the candidate answers. We used the implementation in Elasticsearch and relied on the default parameters.11 https://github.com/hltcoe/turkle",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "We thank the reviewers for their insightful comments. This work was supported in part by DARPA KAIROS (FA8750-19-2-0034). The views and conclusions contained in this work are those of the authors and should not be interpreted as representing official policies or endorsements by DARPA or the U.S. Government.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgments",
"sec_num": null
},
{
"text": " Table 1 : Number of question-answer pairs for each source in the dataset scraped on June 25th. Some of these sources contain more than one website. The bottom half represents the websources in our dataset that we extract using deepset's scrapers. ",
"cite_spans": [],
"ref_spans": [
{
"start": 1,
"end": 8,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "annex",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Covid-19: The first public coronavirus twitter dataset",
"authors": [
{
"first": "Emily",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Kristina",
"middle": [],
"last": "Lerman",
"suffix": ""
},
{
"first": "Emilio",
"middle": [],
"last": "Ferrara",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Emily Chen, Kristina Lerman, and Emilio Ferrara. 2020. Covid-19: The first public coronavirus twit- ter dataset.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Improved lexically constrained decoding for translation and monolingual rewriting",
"authors": [
{
"first": "J",
"middle": [
"Edward"
],
"last": "Hu",
"suffix": ""
},
{
"first": "Huda",
"middle": [],
"last": "Khayrallah",
"suffix": ""
},
{
"first": "Ryan",
"middle": [],
"last": "Culkin",
"suffix": ""
},
{
"first": "Patrick",
"middle": [],
"last": "Xia",
"suffix": ""
},
{
"first": "Tongfei",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Matt",
"middle": [],
"last": "Post",
"suffix": ""
},
{
"first": "Benjamin",
"middle": [],
"last": "Van Durme",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the Annual Meeting of the North American Association of Computational Linguistics (NAACL)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J. Edward Hu, Huda Khayrallah, Ryan Culkin, Patrick Xia, Tongfei Chen, Matt Post, and Benjamin Van Durme. 2019. Improved lexically constrained de- coding for translation and monolingual rewriting. In Proceedings of the Annual Meeting of the North American Association of Computational Linguistics (NAACL).",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Covid-qa: A question answer dataset for covid-19",
"authors": [
{
"first": "Timo",
"middle": [],
"last": "M\u00f6ller",
"suffix": ""
},
{
"first": "Anthony",
"middle": [],
"last": "Reina",
"suffix": ""
},
{
"first": "Raghavan",
"middle": [],
"last": "Jayakumar",
"suffix": ""
},
{
"first": "Malte",
"middle": [],
"last": "Pietsch",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Timo M\u00f6ller, Anthony Reina, Raghavan Jayakumar, and Malte Pietsch. 2020. Covid-qa: A question an- swer dataset for covid-19.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Okapi at trec-4. NIST special publication",
"authors": [
{
"first": "",
"middle": [],
"last": "Se Robertson",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Walker",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Beaulieu",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Gatford",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Payne",
"suffix": ""
}
],
"year": 1996,
"venue": "",
"volume": "",
"issue": "",
"pages": "73--96",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "SE Robertson, S Walker, MM Beaulieu, M Gatford, and A Payne. 1996. Okapi at trec-4. NIST special publication, (500236):73-96.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Some simple effective approximations to the 2-poisson model for probabilistic weighted retrieval",
"authors": [
{
"first": "E",
"middle": [],
"last": "Stephen",
"suffix": ""
},
{
"first": "Steve",
"middle": [],
"last": "Robertson",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Walker",
"suffix": ""
}
],
"year": 1994,
"venue": "SI-GIR'94",
"volume": "",
"issue": "",
"pages": "232--241",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stephen E Robertson and Steve Walker. 1994. Some simple effective approximations to the 2-poisson model for probabilistic weighted retrieval. In SI- GIR'94, pages 232-241. Springer.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Efficient online scalar annotation with bounded support",
"authors": [
{
"first": "Keisuke",
"middle": [],
"last": "Sakaguchi",
"suffix": ""
},
{
"first": "Benjamin",
"middle": [],
"last": "Van Durme",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "208--218",
"other_ids": {
"DOI": [
"10.18653/v1/P18-1020"
]
},
"num": null,
"urls": [],
"raw_text": "Keisuke Sakaguchi and Benjamin Van Durme. 2018. Efficient online scalar annotation with bounded sup- port. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 208-218, Melbourne, Australia. Association for Computational Linguis- tics.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Rapidly bootstrapping a question answering dataset for covid-19",
"authors": [
{
"first": "Raphael",
"middle": [],
"last": "Tang",
"suffix": ""
},
{
"first": "Rodrigo",
"middle": [],
"last": "Nogueira",
"suffix": ""
},
{
"first": "Edwin",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Nikhil",
"middle": [],
"last": "Gupta",
"suffix": ""
},
{
"first": "Phuong",
"middle": [],
"last": "Cam",
"suffix": ""
},
{
"first": "Kyunghyun",
"middle": [],
"last": "Cho",
"suffix": ""
},
{
"first": "Jimmy",
"middle": [],
"last": "Lin",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Raphael Tang, Rodrigo Nogueira, Edwin Zhang, Nikhil Gupta, Phuong Cam, Kyunghyun Cho, and Jimmy Lin. 2020. Rapidly bootstrapping a question answer- ing dataset for covid-19.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "What are people asking about covid-19? a question classification dataset",
"authors": [
{
"first": "Jerry",
"middle": [],
"last": "Wei",
"suffix": ""
},
{
"first": "Chengyu",
"middle": [],
"last": "Huang",
"suffix": ""
},
{
"first": "Soroush",
"middle": [],
"last": "Vosoughi",
"suffix": ""
},
{
"first": "Jason",
"middle": [],
"last": "Wei",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jerry Wei, Chengyu Huang, Soroush Vosoughi, and Ja- son Wei. 2020. What are people asking about covid- 19? a question classification dataset.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"type_str": "figure",
"uris": null,
"num": null,
"text": "Screenshot of our documentation describing the data and metadata stored for each scraped questionanswer pair."
},
"FIGREF1": {
"type_str": "figure",
"uris": null,
"num": null,
"text": "Number of question/answers for each language in our dataset."
},
"FIGREF2": {
"type_str": "figure",
"uris": null,
"num": null,
"text": "Histogram of number of QQAs (Y-axis) annotated with a score at most the corresponding x-axis. Over 17.5K examples are labeled between 0 and 1."
},
"FIGREF3": {
"type_str": "figure",
"uris": null,
"num": null,
"text": "et al. (2020)'s 124 question-article pairs, Wei et al. (2020) 1,690 questions and 403 answers, and M\u00f6ller et al. (2020)'s dataset."
}
}
}
}