| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:40:40.037980Z" |
| }, |
| "title": "Towards building a Robust Industry-scale Question Answering System", |
| "authors": [ |
| { |
| "first": "Rishav", |
| "middle": [], |
| "last": "Chakravarti", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research AI Yorktown Heights", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Ferritto", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research AI Yorktown Heights", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "aferritto@ibm.com" |
| }, |
| { |
| "first": "Bhavani", |
| "middle": [], |
| "last": "Iyer", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research AI Yorktown Heights", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "bsiyer@us.ibm.com" |
| }, |
| { |
| "first": "Lin", |
| "middle": [], |
| "last": "Pan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research AI Yorktown Heights", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "panl@us.ibm.com" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Florian", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research AI Yorktown Heights", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "raduf@us.ibm.com" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research AI Yorktown Heights", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "roukos@us.ibm.com" |
| }, |
| { |
| "first": "Avirup", |
| "middle": [], |
| "last": "Sil", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research AI Yorktown Heights", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Marley", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research AI Yorktown Heights", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Christmas", |
| "middle": [], |
| "last": "Carol", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "IBM Research AI Yorktown Heights", |
| "location": { |
| "region": "NY" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Industry-scale NLP systems necessitate two features. 1. Robustness: \"zero-shot transfer learning\" (ZSTL) performance has to be commendable and 2. Efficiency: systems have to train efficiently and respond instantaneously. In this paper, we introduce the development of a production model called GAAMA (Go Ahead Ask Me Anything) which possess the above two characteristics. For robustness, it trains on the recently introduced Natural Questions (NQ) dataset. NQ poses additional challenges over older datasets like SQuAD: (a) QA systems need to read and comprehend an entire Wikipedia article rather than a small passage, and (b) NQ does not suffer from observation bias during construction, resulting in less lexical overlap between the question and the article. GAAMA consists of Attention-over-Attention, diversity among attention heads, hierarchical transfer learning, and synthetic data augmentation while being computationally inexpensive. Building on top of the powerful BERT QA model, GAAMA provides a \u223c2.0% absolute boost in F 1 over the industry-scale state-of-the-art (SOTA) system on NQ. Further, we show that GAAMA transfers zero-shot to unseen real life and important domains as it yields respectable performance on two benchmarks: the BioASQ and the newly introduced CovidQA datasets.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Industry-scale NLP systems necessitate two features. 1. Robustness: \"zero-shot transfer learning\" (ZSTL) performance has to be commendable and 2. Efficiency: systems have to train efficiently and respond instantaneously. In this paper, we introduce the development of a production model called GAAMA (Go Ahead Ask Me Anything) which possess the above two characteristics. For robustness, it trains on the recently introduced Natural Questions (NQ) dataset. NQ poses additional challenges over older datasets like SQuAD: (a) QA systems need to read and comprehend an entire Wikipedia article rather than a small passage, and (b) NQ does not suffer from observation bias during construction, resulting in less lexical overlap between the question and the article. GAAMA consists of Attention-over-Attention, diversity among attention heads, hierarchical transfer learning, and synthetic data augmentation while being computationally inexpensive. Building on top of the powerful BERT QA model, GAAMA provides a \u223c2.0% absolute boost in F 1 over the industry-scale state-of-the-art (SOTA) system on NQ. Further, we show that GAAMA transfers zero-shot to unseen real life and important domains as it yields respectable performance on two benchmarks: the BioASQ and the newly introduced CovidQA datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "A relatively new task in open domain question answering (QA) is machine reading comprehension (MRC), which aims to read and comprehend a given text and then answer questions based on it. Recent work on transfer learning, from large pre-trained language models like BERT and XLNet has practically solved SQuAD (Rajpurkar et al., 2016; Rajpurkar et al., 2018) , the most widely used MRC benchmark. This necessitates harder QA benchmarks for the field to advance. Additionally, SQuAD and other existing datasets like NarrativeQA (Ko\u010disk\u1ef3 et al., 2018) and HotpotQA suffer from observation bias: annotators had read the passages before creating their questions.", |
| "cite_spans": [ |
| { |
| "start": 309, |
| "end": 333, |
| "text": "(Rajpurkar et al., 2016;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 334, |
| "end": 357, |
| "text": "Rajpurkar et al., 2018)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 526, |
| "end": 548, |
| "text": "(Ko\u010disk\u1ef3 et al., 2018)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In industry research, there is an urgent demand to build a usable MRC QA system that not only provides very good performance on academic benchmarks but also real life industry applications (Tang et al., 2020) in a ZSTL environment. In this paper, to build such a system, we first focus on Natural Questions (NQ) (Kwiatkowski et al., 2019) : a MRC benchmark dataset over Wikipedia articles where questions (see Figure 1) were sampled from Google search logs. This key difference from past datasets eliminates annotator observation bias. Also, NQ requires systems to extract both a short (SA, one or more entities) and a long answer (LA, typically a paragraph that contains the short answer when both exist). The dataset shows human upper bounds of 76% and 87% on the short and long answer selection tasks respectively (for a \"super-annotator\" composed of 5 human annotators). The authors show that systems designed for past datasets perform poorly on NQ. We propose GAAMA that possesses several MRC technologies that are necessary to perform well on NQ and achieve significant boosts over another industry setting competitor system (Alberti et al., 2019a) pre-trained on a large language model (LM) and then over millions of synthetic examples. Specifically, GAAMA builds on top of a large pre-trained LM and focusses on two broad dimensions:", |
| "cite_spans": [ |
| { |
| "start": 189, |
| "end": 208, |
| "text": "(Tang et al., 2020)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 312, |
| "end": 338, |
| "text": "(Kwiatkowski et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1131, |
| "end": 1154, |
| "text": "(Alberti et al., 2019a)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 410, |
| "end": 419, |
| "text": "Figure 1)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "1. Improved Attention: With the reduction of observation bias in NQ, we find a distinct lack of lexical and grammatical alignment between answer contexts and the questions. For example, here is a question to identify the date of an event from the SQuAD 2.0 dataset: According to business journalist Kimberly Amadeo, when did the first signs of decline in real estate occur? This question can be aligned almost perfectly with the text in the answering Wikipedia paragraph in order to extract the year 2006: Business journalist Kimberly Amadeo reports: \"The first signs of decline in residential real estate occurred in 2006.\" In contrast, as shown in Example 1 from Figure 1 , a question from NQ to identify the date of marley's death requires parsing through a number of related sub clauses to extract the answer December 24, 1836 from the context. This need for improved alignment leads us to explore two additional attention mechanisms.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 665, |
| "end": 673, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Attention-over-Attention (AoA) (Cui et al., 2017) : on top of BERT's existing layer stack, we introduce a two-headed AoA layer which combines query-to-document and document-to-query attention.", |
| "cite_spans": [ |
| { |
| "start": 33, |
| "end": 51, |
| "text": "(Cui et al., 2017)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Attention Diversity (AD) Motivated by (Li et al., 2018) , we explore a mechanism that maximizes diversity among BERT attention heads. Intuitively, we want different attention heads to capture information from different semantic subspaces, which BERT currently does not enforce. Finally, we experiment combining the two strategies, yielding a gain of \u223c1.5% for both short and long answers.", |
| "cite_spans": [ |
| { |
| "start": 40, |
| "end": 57, |
| "text": "(Li et al., 2018)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "2. Data Augmentation: Given the data hungry nature of BERT-based models, we explore three strategies for data augmentation (DA). Crowd-sourced DA introduces human annotated Q&A pairs from prior MRC datasets. Synthetic DA introduces large amounts of machine generated QA pairs, inspired by the prior successes of (Alberti et al., 2019a; Dong et al., 2019) . Unlike previous work, which predominantly relied on computationally expensive beam search decoding, we apply fast and diversity-promoting nucleus sampling (Holtzman et al., 2019) to generate 4M questions from a transformer-based question generator (Sultan et al., 2020) . Adversarial DA performs a novel sentence-order-shuffling to perturb the native NQ data so as to tackle the inherent positional bias in Wikipedia-based MRC as shown by (Min et al., 2019; Kwiatkowski et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 312, |
| "end": 335, |
| "text": "(Alberti et al., 2019a;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 336, |
| "end": 354, |
| "text": "Dong et al., 2019)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 512, |
| "end": 535, |
| "text": "(Holtzman et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 605, |
| "end": 626, |
| "text": "(Sultan et al., 2020)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 796, |
| "end": 814, |
| "text": "(Min et al., 2019;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 815, |
| "end": 840, |
| "text": "Kwiatkowski et al., 2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We find that, contrary to previous industry research SOTA (Alberti et al., 2019a) on NQ, it is not necessary to perform large scale synthetic DA. Instead we achieve better results with a well aligned Pre-Training (PT, a gain of 1.3-1.6%).", |
| "cite_spans": [ |
| { |
| "start": 58, |
| "end": 81, |
| "text": "(Alberti et al., 2019a)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Most QA applications in an industry involve multiples domains e.g. Amazon Kendra 1 for Enterprise Search, Google Search, and IBM Watson Assistant 2 for Customer Service. Hence, there exists a need to develop one robust QA system that would work with ZSTL on a plethora of domains. Of course, one could futher fine-tune the system on the new domain to achieve better performance. However, the process is rather expensive as it demands manual human annotation which in real world applications is very scarce . Hence, we explore GAAMA's ZSTL effectiveness on two publicly available benchmark bio-medical datasets: BioASQ (Tsatsaronis et al., 2015) and the newly introduced CoVIDQA (Tang et al., 2020) . The former is an annual shared task for QA over biomedical documents involving factoid questions. The latter is built on top of the CORD-19 corpus (Wang et al., 2020) consisting of questions asked by humans about the Covid-19 disease. The COVID-19 pandemic has caused an abundance of research to be published on a daily basis. Providing the capability to ask questions on research is vital for ensuring that important and recent information is not overlooked and available to everyone. GAAMA consistently delivers competitive performance when compared to baselines either trained on the target domain or zero-shot transferred to the target.", |
| "cite_spans": [ |
| { |
| "start": 618, |
| "end": 644, |
| "text": "(Tsatsaronis et al., 2015)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 678, |
| "end": 697, |
| "text": "(Tang et al., 2020)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 847, |
| "end": 866, |
| "text": "(Wang et al., 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Overall, our contributions can be summarized as follows: 1. We propose a novel system that investigates several improved attention and enhanced data augmentation strategies, 2. Outperforms the previous industry-scale QA system on NQ, 3. Provides ZSTL capabilities on two unseen domains and 4. Achieves competitive performance compared to the respective corresponding baselines.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Most recent MRC systems either achieve SOTA by adding additional components on top of BERT (Devlin et al., 2019) such as syntax or perform attention fusion (Wang et al., 2018) without using BERT. However, we argue that additional attention mechanisms should be explored on top of BERT such as computing additional cross-attention between the question and the passage and maximizing the diversity among different attention heads in BERT. Our work is also generic enough to be applied on recently introduced transformer based language models such as ALBERT (Lan et al., 2019) and REFORMER (Kitaev et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 156, |
| "end": 175, |
| "text": "(Wang et al., 2018)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 555, |
| "end": 573, |
| "text": "(Lan et al., 2019)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 587, |
| "end": 608, |
| "text": "(Kitaev et al., 2020)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Another common technique is DA (Zhang and Bansal, 2019) by artificially generating more questions to enhance the training data or in a MTL setup (Yatskar, 2018; Dhingra et al., 2018; . (Alberti et al., 2019a; Alberti et al., 2019b) combine models of question generation with answer extraction and filter results to ensure round-trip consistency to get the SOTA on NQ. Contrary to this, we explore several strategies for DA that either involve diverse question generation from a dynamic nucleus (Holtzman et al., 2019) of the probability distribution over question tokens or shuffling the existing dataset to produce adversarial examples.", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 55, |
| "text": "(Zhang and Bansal, 2019)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 145, |
| "end": 160, |
| "text": "(Yatskar, 2018;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 161, |
| "end": 182, |
| "text": "Dhingra et al., 2018;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 185, |
| "end": 208, |
| "text": "(Alberti et al., 2019a;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 209, |
| "end": 231, |
| "text": "Alberti et al., 2019b)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 494, |
| "end": 517, |
| "text": "(Holtzman et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Recently Min et al., 2019) focus on \"open\" NQ, a modified version of the full NQ dataset for document retrieval QA that discards unanswerable questions. Contrary to that, we specifically focus on the full NQ dataset and believe there is room for improvement from a MRC research standpoint.", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 26, |
| "text": "Min et al., 2019)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this section, we first describe BERT QA , GAAMA's underlying QA model, and two additional attention layers on top of it. Figure 2 shows our overall model architecture with details explained below.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 124, |
| "end": 132, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Architecture", |
| "sec_num": "3" |
| }, |
| { |
| "text": "QA model: BERT QA Given a token sequence X = [x 1 , x 2 , . . . , x T ]: BERT, a deep Transformer (Vaswani et al., 2017) net- work, outputs a sequence of contextualized token representations H L = [h L 1 , h L 2 , . . . , h L T ]. h L 1 , . . . , h L T = BERT (x 1 , . . . , x T )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Underlying", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "BERT QA adds three dense layers followed by a softmax on top of BERT for answer extraction: ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Underlying", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "b = sof tmax(W 1 H L ), e = sof tmax(W 2 H L ), and a = sof tmax(W 3 h L [CLS] ) -where W 1 , W 2 \u2208 [CLS] Q Tokens C", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Underlying", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "R 1\u00d71024 , W 3 \u2208 R 5\u00d71024 , H L \u2208 R N \u00d71024 , and h L [CLS] \u2208 R 1024 . t b", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Underlying", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "and t e denote the probability of the t th token in the sequence being the answer beginning and end, respectively. These three layers are trained during the finetuning stage. The NQ task requires not only a prediction for short answer beginning/end offsets, but also a (containing) longer span of text that provides the necessary context for that short answer. Inspired by prior work from (Alberti et al., 2019b), we only optimize for short answer spans and then identify the bounds of the containing HTML span as the long answer prediction 3 . We use the hidden state of the [CLS] token to classify the answer type \u2208 [short, long, yes, no, null], so y a denotes the probability of the y th answer type being correct. Our loss function is the averaged cross entropy on the two answer pointers and the answer type classifier:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Underlying", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "L N Q = \u2212 1 3 T t=1 (1(b t ) log t b + 1(e t ) log t e ) + Y y=1 1(a y ) log y a \uf8f6 \uf8f8", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Underlying", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where 1(b) and 1(e) are one-hot vectors for the ground-truth beginning and end positions, and 1(a) for the ground-truth answer type. During decoding, the span over argmax of b and argmax of e is picked as the predicted short answer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Underlying", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In this section, we outline our investigation of the attention mechanisms on top of the above BERT QA model. Our main question: BERT already computes self-attention over the question and the passage in several layers-can we improve on top that?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "3.2.1 Attention-over-Attention (AoA)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Our first approach is AoA: originally designed (Cui et al., 2017) for cloze-style question answering, where a phrase in a short passage of text is removed in forming a question. We seek to explore whether AoA helps in a more traditional MRC setting. Let Q be a sequence of question tokens [q 1 , . . . , q m ], and C a sequence of context tokens [c 1 , . . . , c n ]. AoA first computes an attention matrix:", |
| "cite_spans": [ |
| { |
| "start": 47, |
| "end": 65, |
| "text": "(Cui et al., 2017)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "M = CQ T ,", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where C \u2208 R n\u00d7h , Q \u2208 R m\u00d7h , and M \u2208 R n\u00d7m . In our case, the hidden dimension is h = 1024. Next, it separately performs on M a column-wise softmax \u03b1 = sof tmax(M T ) and a row-wise softmax \u03b2 = sof tmax(M). Each row i of matrix \u03b1 represents the document-level attention regarding q i (queryto-document attention), and each row j of matrix \u03b2 represents the query-level attention regarding c j (document-to-query attention). To combine the two attentions, \u03b2 is first row-wise averaged:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u03b2 = 1 n n j=1 \u03b2 j (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The resulting vector can be viewed as the average importance of each q i with respect to C. This tokento-sequence attention encoded in AoA is a key difference from BERT attention. \u03b2 is then used to weigh the document-level attention \u03b1. s = \u03b1 T \u03b2 T", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "(3) The final attention vector s \u2208 R N represents document-level attention weighted by the importance of query words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Since the output of AoA is a vector of document length, to use it for answer start and end prediction we add a two-headed AoA layer into the BERT QA model and this layer is trained together with the answer extraction layer during the finetuning stage. Concretely, the combined question and context hidden representation H L from BERT is first separated to H Q and H C 4 , followed by two linear projections of H Q and H C respectively to H Q i and H C i , i \u2208 {1, 2}:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "H Q i = H Q W Q i ,", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "H C i = H C W C i , (5) where H Q , H Q i \u2208 R M \u00d71024 ; H C , H C i \u2208 R N \u00d71024 ; and W Q i , W C i \u2208 R 1024\u00d71024", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": ". Therefore, the AoA layer adds about 2.1 million parameters on top of BERT which already has 340 million. Next, we feed H C", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "1 and H Q 1 into the AoA calculation specified in Equations (1) -(3) to get the attention vector s 1 for head 1. The same procedure is applied to H Q 2 and H C 2 to get s 2 for head 2. Lastly, s 1 and s 2 are combined with b and e respectively via two weighted sum operations for answer extraction.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Strategies", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "It has been shown through ablation studies (Kovaleva et al., 2019; Michel et al., 2019 ) that removing BERT attention heads can achieve comparable or better performance on some tasks. Our objective is to find out if we can diversify the information captured and train a better BERT model by enforcing diversity among the attention heads.", |
| "cite_spans": [ |
| { |
| "start": 43, |
| "end": 66, |
| "text": "(Kovaleva et al., 2019;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 67, |
| "end": 86, |
| "text": "Michel et al., 2019", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Diversity (AD) layer", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "In a Transformer model, (Li et al., 2018 ) examine a few methods to enforce such diversity and see an improvement on machine translation tasks. Contrary to that we start with a pre-trained BERT model, take the attention output from scaled dot-product attention and compute the cosine similarity between all pairs of heads:", |
| "cite_spans": [ |
| { |
| "start": 24, |
| "end": 40, |
| "text": "(Li et al., 2018", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Diversity (AD) layer", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "D = Head i=1 Head j=1 O i \u2022 O j ||O i ||||O j || .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Diversity (AD) layer", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "We then average D for the per-token similarity and add it as an additional loss term. For each token, there are 16 + 15 + ... + 2 total similarity calculations, 16 being the number of heads in BERT QA . Figure 3 shows the modified structure of Multi-head Attention in the Transformer architecture. We apply this technique during finetuning on NQ and to the last layer of BERT only. It will be interesting to see how this additional training objective affects BERT pretraining, which we leave as future work.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 203, |
| "end": 211, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Attention Diversity (AD) layer", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "Our models follow the now common approach of starting with the pre-trained BERT language model and then finetune over the NQ dataset with an additional QA sequence prediction layer as described in section 3.1. Note that unless we specify otherwise, we are referring to the pre-trained \"large\" version of BERT with Whole Word Masking (BERT W ). BERT W has the same model structure as the original BERT model, but masks whole words instead of word pieces for the Masked Language Model pre-training task and we empirically find this to be a better starting point for the NQ task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Training", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Model performance in MRC has benefited from training with labeled examples from human annotated or synthetic data augmentation from similar tasks. This includes the prior SOTA on NQ by (Alberti et al., 2019a) where 4 million synthetically generated QA pairs are introduced. In this paper, we similarly adapt and evaluate three different approaches for data augmentation: Crowd-sourced, Synthetic, and Adversarial. Crowd-sourced DA: We leverage the previously released SQuAD 2.0 MRC dataset that obtained \u223c130k crowd-sourced question, answer training pairs over Wikipedia paragraphs. Note that we present results using a \"pre-training\" (PT) strategy where we first train on the augmentation data and, finally, perform fine-tuning exclusively on the NQ domain. We also experimented with a multi-task-learning setup as in (Ruder et al., 2019; Xu et al., 2018) , but omit those experimental results for brevity since PT consistently proved to be a better augmentation strategy. Synthetic DA: We also pre-train a model on 4M automatically generated QA examples. The generation works as follows: similar to (Dong et al., 2019) , we first fine-tune a masked LM for question generation using SQuAD1.1 training examples-we choose RoBERTa for its extended LM pretraining. Then a SQuAD MRC model trained on ten predefined question types-e.g. what, how, when, and how many, as opposed to full-length questions-is used to identify potential answer phrases in NQ training passages. Finally, we use diversity-promoting nucleus sampling (Holtzman et al., 2019 ) with a nucleus mass of .95 to sample questions from these passage-answer pairs, which has been shown to yield better QA training examples than standard beam search (Sultan et al., 2020). Adversarial DA: Sentence Order Shuffling (SOS) The SOS strategy shuffles the ordering of sentences within paragraphs from the NQ training set. The strategy is based on an observation in the preliminary BERT QA model that predictions favored earlier rather than later text spans. As noted by (Kwiatkowski et al., 2019 ), this appears to reflect a natural bias in Wikipedia that earlier texts tend to be more informative for general questions (a default long answer classifier predicting the first paragraph gets a LA F1 of 27.8%). Hence, our perturbation of the sentence ordering is similar in spirit to the types of perturbations introduced by for SQuAD 2.0 based on observed biases in the dataset.", |
| "cite_spans": [ |
| { |
| "start": 185, |
| "end": 208, |
| "text": "(Alberti et al., 2019a)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 819, |
| "end": 839, |
| "text": "(Ruder et al., 2019;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 840, |
| "end": 856, |
| "text": "Xu et al., 2018)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1101, |
| "end": 1120, |
| "text": "(Dong et al., 2019)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1521, |
| "end": 1543, |
| "text": "(Holtzman et al., 2019", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 2024, |
| "end": 2049, |
| "text": "(Kwiatkowski et al., 2019", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Augmentation (DA)", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Source Domain We choose NQ as our source dataset. It provides 307,373 training queries, 7,830 development queries, and 7,842 test queries (with the test set only being accessible through a public leaderboard submission). For each question, crowd sourced annotators also provide start and end offsets for short answer spans 5 within the Wikipedia article, if available, as well as long answer spans (which is generally the most immediate HTML paragraph, table, or list span containing the short answer), if available. The dataset also forces models to make an attempt at \"knowing what they don't know\" (Rajpurkar et al., 2018) by requiring a confidence score with each prediction. For evaluation, we report the offset-based F1 overlap score. For additional details on the data and evaluation see (Kwiatkowski et al., 2019) . Target Domain To test GAAMA's ZSTL transfer capability, we choose two academic 6 benchmark datasets on a related domain: Bio-medical. The first one uses a subset of the questions and annotations from task 8b of the BioASQ competition (Tsatsaronis et al., 2015) . Specifically, we extract 1,266 factoid biomedical questions for which exact answers can be extracted from one of the PubMED abstracts marked as relevant by the annotators. We report the Factoid Mean Reciprocal Rank (MRR) as the evaluation metric. Secondly, we choose the very recent CovidQA (Tang et al., 2020) benchmark to illustrate GAAMA's performance on a globally important transfer learning dataset. This is a QA dataset specifically designed for COVID-19 and manually annotated from knowledge gathered from Kaggle's COVID-19 Open Research Dataset Challenge. It is the first publicly available QA resource on the pandemic intended as a stopgap measure for guiding research until more substantial evaluation resources become available. It consists of 124 question-article pairs (v0.1) and hence does not have sufficient examples for supervised machine learning. CovidQA evaluates the zero-shot transfer capabilities of existing models on topics specifically related to COVID-19. One difference of CovidQA from the other QA datasets we evaluate is that it requires systems to predict the correct sentence that answers the question. Hence we intuitively report the P@1, R@3, and MRR based on the official evaluation metric.", |
| "cite_spans": [ |
| { |
| "start": 601, |
| "end": 625, |
| "text": "(Rajpurkar et al., 2018)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 795, |
| "end": 821, |
| "text": "(Kwiatkowski et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1058, |
| "end": 1084, |
| "text": "(Tsatsaronis et al., 2015)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1378, |
| "end": 1397, |
| "text": "(Tang et al., 2020)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We compare GAAMA against three strong competitors from the industry research: 1) A hybrid of a decomposable attention model for Natural Language Inference (Parikh et al., 2016) and DrQA , a retrieve and rank QA model, which obtains commendable results on SQuAD.", |
| "cite_spans": [ |
| { |
| "start": 155, |
| "end": 176, |
| "text": "(Parikh et al., 2016)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Competitors", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "2) The NQ baseline system (Alberti et al., 2019b) and 3) The current industry SOTA on NQ (Alberti et al., 2019a) which utilizes 4 million synthetic examples as pre-training. Architecturally, the latter is similar to us but we propose more technical novelty in terms of both improved attention and data augmentation. We note there is very recent academic work (Zheng et al., 2020) which we omit as GAAMA outperforms them on short answers and more importantly we compare against large scale industry SOTA for the scope of this paper. Since their work is more academic, their model enjoys being computationally more expensive for accuracy than GAAMA as they involve computing graph attentions that are typically more difficult to be run in parallel if we want to do whole graph propagation (Veli\u010dkovi\u0107 et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 26, |
| "end": 49, |
| "text": "(Alberti et al., 2019b)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 89, |
| "end": 112, |
| "text": "(Alberti et al., 2019a)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 359, |
| "end": 379, |
| "text": "(Zheng et al., 2020)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 787, |
| "end": 812, |
| "text": "(Veli\u010dkovi\u0107 et al., 2018)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Competitors", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "6 Results:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Competitors", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Attention Strategies: Both the AoA and AD strategies provide a meaningful (0.7 \u2212 0.9%) improvement over a baseline BERT W model as shown in Table 1 . Note that our baseline BERT W already achieves a stronger baseline than previously published SOTA by (Alberti et al., 2019a) by relying on the stronger whole-word-masking pre-training mechanism for the underlying BERT model. Combining both attention strategies with the SQUAD 2 PT yields the best single model performance, though the improvements are primarily on LA performance rather than SA. Exploring why only LA improves is left as part of our future work once we start with even larger, better pre-trained models. Data Augmentation: As seen in Table 2 , using a (well aligned) crowd-sourced dataset (SQuAD 2) for pre-training proves to be quite effective. It provides the largest data augmentation gain in SA F1, \u223c1.6%, as well as a \u223c1% gain in LA F1. Employing 4 million synthetic question answer pairs also provide similar gains in SA F1 and an even better gain (\u223c2.3%) in LA F1. From an efficiency perspective, however, SQuAD 2 PT only introduces 130K additional examples to the training process, whereas synthetic data augmentation requires training over 4M additional examples (on top of the training required for the data generator). We also find that it was unhelpful to combine SQUAD 2 PT with 4M synthetic examples for improving single model performance; so we evaluate our best performing model architectures only using the SQuAD 2 PT strategy.", |
| "cite_spans": [ |
| { |
| "start": 251, |
| "end": 274, |
| "text": "(Alberti et al., 2019a)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 140, |
| "end": 147, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 700, |
| "end": 707, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Competitors", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Short Answer F1 Long Answer F1 Human Performance Single human 57.5 73.4 Super-annotator 75.7 87.2 Prior Work (Industry Research on NQ) DecAtt + Doc Reader (Parikh et al., 2016) 31.4 54.8 BERTL w/ SQuAD 1.1 PT (Alberti et al., 2019b) 52.7 64.7 BERTL w/ 4M Synthetic (Alberti et al., 2019a) 55.1 65.9 This Work GAAMA: BERTW + AoA + AD + SQuAD 2 PT 57.0 68.6 Table 2 : Performance of various Data Augmentation strategies. SQuAD helps short but synthetic helps long answers.", |
| "cite_spans": [ |
| { |
| "start": 155, |
| "end": 176, |
| "text": "(Parikh et al., 2016)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 209, |
| "end": 232, |
| "text": "(Alberti et al., 2019b)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 265, |
| "end": 288, |
| "text": "(Alberti et al., 2019a)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 356, |
| "end": 363, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Competitors", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We create a random train (75%) and test split (25%) of the BioASQ 8b annotated questions in order to assess the performance of GAAMA with and without training. Comparison with more heavily fine-tuned prior art (Yoon et al., 2019) is left as part of future work and beyond the scope of this work as they focus on fine-tuned large language models e.g. BioBERT (Lee et al., 2020) with more extensive vocabularies. Note again, that our work focuses on minimizing these steps for new target domains. Hence, since our objective is not to keep retraining GAAMA for every new domain, we refrain from changing the underlying pre-trained LM. We observe that the GAAMA's ZSTL config performs competitively (0.56 lower on MRR) on BioASQ showing that there is hope of transferring models zero-shot to entirely unseen domains.", |
| "cite_spans": [ |
| { |
| "start": 210, |
| "end": 229, |
| "text": "(Yoon et al., 2019)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 358, |
| "end": 376, |
| "text": "(Lee et al., 2020)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "ZSTL Experiments", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "On CovidQA, we predict the sentence that contains our predicted answers. Table 6 shows the results. GAAMA performs quite competitively to a BioBERT baseline and outperforms it on all the three metrics. This amplifies the fact that it is not always necessary to start with a domain-specific LM. We note that GAAMA gets better P@1, slightly lower R@3, and the same MRR and hence it still gives a tough competition to a system trained on empirically a much better performing pre-trained LM than BERT: T5 (Raffel et al., 2019) . We also note that both the T5 and BioBERT baselines are trained specifically to do sentence classification whereas GAAMA performs reading comprehension to extract answer spans and we predict the sentence that contains the spans. So no new \"task-specific' training is involved in this process. ", |
| "cite_spans": [ |
| { |
| "start": 501, |
| "end": 522, |
| "text": "(Raffel et al., 2019)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 73, |
| "end": 80, |
| "text": "Table 6", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "ZSTL Experiments", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Inference: Inference efficiency is a crucial requirement of industry-scale systems. We investigate the inference times of both base and large models; while large models are ideal for academic benchmarks, the faster inference times of base models can be worth the reduction in accuracy in industrial settings. Measurements are carried out using a random sample of examples from the NQ dev set with a Nvidia R Tesla R P100 GPU and 8 threads from an Intel R Xeon R E5-2690 16-core CPU. In order to decrease inference time, we simulate passage retrieval to send the model the most relevant passage by selecting the first correct top level candidate if there is one and the first (incorrect) top level candidate if there is not. We find in Table 4 that switching from base to large yields an 8.3% absolute increase in F1 in exchange for 1.3x to 2.8x increases in inference time. When running the model on a GPU these result in manageable 95th percentile inference times of less than a second; whereas on the CPU the 95th percentile times are multiple seconds. We conclude that either of these models could be deployed in production environments on GPU only. In future work we intend to explore network pruning or knowledge distillation techniques for potential speedups with the large model. Training: Efficient training is also an important component of industry-scale systems. To this end we consider both the number of model parameters and the amount of PT data. Our AoA implementation adds less than 1% to BERT W 's parameters and AD does not add any as it is implemented in the loss. Similarly, by using a well-aligned PT dataset (SQuAD 2.0) we are able to rival the performance of the much larger 4M synthetically generated corpus (Alberti et al., 2019a) with only 130K examples as seen in Table 2 .", |
| "cite_spans": [ |
| { |
| "start": 1732, |
| "end": 1755, |
| "text": "(Alberti et al., 2019a)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 735, |
| "end": 742, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 1791, |
| "end": 1798, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Efficiency", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "7 Analysis of GAAMA's Components Table 3 shows the ablation study of GAAMA's components. Note that our best model's performance on short answers (57.2) almost matches a single human performance 7 . When doing manual error analysis on a sample of the NQ dev set, we do observe patterns suggesting that each of GAAMA's components do bring different strengths over just the best final combination (BERT w + AoA + AD + SQuAD2 PT) e.g. the Wikipedia article for Salary Cap contains multiple sentences related to the query \"when did the nfl adopt a salary cap\":", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 33, |
| "end": 40, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Efficiency", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "The new Collective Bargaining Agreement (CBA) formulated in 2011 had an initial salary cap of $120 million...The cap was first introduced for the 1994 season and was initially $34.6 million. Both the cap and...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Efficiency", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "The later sentence contains the correct answer, 1994, since the question is asking for when the salary cap was initially adopted. The SOS augmented model correctly makes this prediction whereas our SQUAD 2 augmented models predict 2011 from the earlier sentence. There are also cases where the correct answer span appears in the middle or later part of a paragraph and, though our SQUAD 2 augmented models predict the spans correctly, they assign a lower score (relative to its optimal threshold) than the SOS augmented model. The position bias, therefore, appears to hurt the performance of the system in certain situations where location of the answer span relative to the paragraph is not a useful signal of correctness. On average, of course, the BERT W +SQUAD2 PT + AoA + AD configuration performs the best and manual error analysis indicates some ability to better attend to supporting evidence when it is further out from the correct answer span. For example, the correct answer in example 1 from figure 1 is December 24, 1836 which the AoA + AD model correctly identifies the answer span despite the question's and context's lack of lexical and grammatical alignment. While the base BERT W models fail at extracting the date (instead predicting a span more closely associated with the keywords in the query such as seven years earlier).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Efficiency", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "Although large pre-trained language models have shown super-human performance on benchmark datasets like SQuAD, we show that there is plenty of room to make improvements on top of BERT QA . Specifically, we outline prior strategies that do not work on a real benchmark consisting of \"natural questions\" showing the difficulty of the dataset and need for better algorithms. We introduce GAAMA and outline several strategies that are broadly classified under attention and data augmentation and show how effective it can be to attain competitive performance on NQ compared to other industry baselines. We also outline GAAMA's OOTB zero-shot transfer on two unseen datsets and show optimistic performance. Our future work will involve adding larger pre-trained language models like T5 and also exploring multi-lingual QA.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "https://www.onixnet.com/amazon-kendra 2 www.ibm.com/watson/assistant", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The candidate long answer HTML spans are provided as part of the preprocessed data for NQ.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Superscript L is dropped here for notation convenience; we use the last layer L = 24 from the BERT output.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "\u223c1% of the questions are annotated with boolean Yes/No instead of short answers. We leave it as future work to detect and generate answers for these types of queries.6 Note, we have tested GAAMA's ZSTL successful transfer on several in-house datasets which we cannot publish due to license restrictions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "(Kwiatkowski et al., 2019) notes that human performance was measured on a random sample of NQ dev.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the multilingual NLP team at IBM Research AI and the anonymous reviewers for their helpful suggestions and feedback.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgement", |
| "sec_num": "9" |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Synthetic QA corpora generation with roundtrip consistency", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Alberti", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Andor", |
| "suffix": "" |
| }, |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Pitler", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Alberti, Daniel Andor, Emily Pitler, Jacob Devlin, and Michael Collins. 2019a. Synthetic QA corpora generation with roundtrip consistency. CoRR, abs/1906.05416.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A BERT baseline for the natural questions", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Alberti", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1--4", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1901.08634" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris Alberti, Kenton Lee, and Michael Collins. 2019b. A BERT baseline for the natural questions. arXiv preprint arXiv:1901.08634, pages 1-4.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "The TechQA Dataset", |
| "authors": [ |
| { |
| "first": "Vittorio", |
| "middle": [], |
| "last": "Castelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Rishav", |
| "middle": [], |
| "last": "Chakravarti", |
| "suffix": "" |
| }, |
| { |
| "first": "Saswati", |
| "middle": [], |
| "last": "Dana", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Ferritto", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Florian", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Franz", |
| "suffix": "" |
| }, |
| { |
| "first": "Dinesh", |
| "middle": [], |
| "last": "Garg", |
| "suffix": "" |
| }, |
| { |
| "first": "Dinesh", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Mccarley", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Mccawley", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vittorio Castelli, Rishav Chakravarti, Saswati Dana, Anthony Ferritto, Radu Florian, Martin Franz, Dinesh Garg, Dinesh Khandelwal, Scott McCarley, Mike McCawley, et al. 2020. The TechQA Dataset. Association for Computational Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Reading wikipedia to answer open-domain questions", |
| "authors": [ |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Fisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1870--1879", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danqi Chen, Adam Fisch, Jason Weston, and Antoine Bordes. 2017. Reading wikipedia to answer open-domain questions. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1870-1879.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Attention-over-attention neural networks for reading comprehension", |
| "authors": [ |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Cui", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhipeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Shijin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Guoping", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proc. of ACL", |
| "volume": "1", |
| "issue": "", |
| "pages": "593--602", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yiming Cui, Zhipeng Chen, Si Wei, Shijin Wang, Ting Liu, and Guoping Hu. 2017. Attention-over-attention neural networks for reading comprehension. In Proc. of ACL (Volume 1: Long Papers), pages 593-602. ACL, July.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirec- tional transformers for language understanding. In NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Simple and effective semi-supervised question answering", |
| "authors": [ |
| { |
| "first": "Bhuwan", |
| "middle": [], |
| "last": "Dhingra", |
| "suffix": "" |
| }, |
| { |
| "first": "Danish", |
| "middle": [], |
| "last": "Pruthi", |
| "suffix": "" |
| }, |
| { |
| "first": "Dheeraj", |
| "middle": [], |
| "last": "Rajagopal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bhuwan Dhingra, Danish Pruthi, and Dheeraj Rajagopal. 2018. Simple and effective semi-supervised question answering. CoRR, abs/1804.00720.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Unified language model pre-training for natural language understanding and generation", |
| "authors": [ |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenhui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Hsiao-Wuen", |
| "middle": [], |
| "last": "Hon", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "NeurIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao- Wuen Hon. 2019. Unified language model pre-training for natural language understanding and generation. In NeurIPS.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "The curious case of neural text degeneration", |
| "authors": [ |
| { |
| "first": "Ari", |
| "middle": [], |
| "last": "Holtzman", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Buys", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxwell", |
| "middle": [], |
| "last": "Forbes", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ari Holtzman, Jan Buys, Maxwell Forbes, and Yejin Choi. 2019. The curious case of neural text degeneration. arXiv preprint.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Reformer: The efficient transformer", |
| "authors": [ |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Kitaev", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Anselm", |
| "middle": [], |
| "last": "Levskaya", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikita Kitaev, \u0141ukasz Kaiser, and Anselm Levskaya. 2020. Reformer: The efficient transformer.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Revealing the dark secrets of bert", |
| "authors": [ |
| { |
| "first": "Olga", |
| "middle": [], |
| "last": "Kovaleva", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexey", |
| "middle": [], |
| "last": "Romanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rogers", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rumshisky", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "4356--4365", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Olga Kovaleva, Alexey Romanov, Anna Rogers, and Anna Rumshisky. 2019. Revealing the dark secrets of bert. In EMNLP, pages 4356-4365.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Natural Questions: a benchmark for question answering research. TACL", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennimaria", |
| "middle": [], |
| "last": "Palomaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Olivia", |
| "middle": [], |
| "last": "Redfield", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Alberti", |
| "suffix": "" |
| }, |
| { |
| "first": "Danielle", |
| "middle": [], |
| "last": "Epstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Kelcey", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Matthew Kelcey, Jacob Devlin, Kenton Lee, Kristina N. Toutanova, Llion Jones, Ming-Wei Chang, Andrew Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. 2019. Natural Questions: a benchmark for question answering research. TACL.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Albert: A lite bert for self-supervised learning of language representations", |
| "authors": [ |
| { |
| "first": "Zhenzhong", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingda", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Piyush", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. Albert: A lite bert for self-supervised learning of language representations.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Latent retrieval for weakly supervised open domain question answering", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.00300" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. 2019. Latent retrieval for weakly supervised open domain question answering. arXiv preprint arXiv:1906.00300.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Biobert: a pre-trained biomedical language representation model for biomedical text mining", |
| "authors": [ |
| { |
| "first": "Jinhyuk", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Wonjin", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Sungdong", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Donghyeon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunkyu", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Chan", |
| "middle": [], |
| "last": "Ho So", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaewoo", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Bioinformatics", |
| "volume": "36", |
| "issue": "4", |
| "pages": "1234--1240", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2020. Biobert: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics, 36(4):1234-1240.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Multi-head attention with disagreement regularization", |
| "authors": [ |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhaopeng", |
| "middle": [], |
| "last": "Tu", |
| "suffix": "" |
| }, |
| { |
| "first": "Baosong", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tong", |
| "middle": [], |
| "last": "Michael R Lyu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2897--2903", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jian Li, Zhaopeng Tu, Baosong Yang, Michael R Lyu, and Tong Zhang. 2018. Multi-head attention with disagree- ment regularization. In EMNLP, pages 2897-2903.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "RoBERTa: A robustly optimized BERT pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A robustly optimized BERT pretraining approach. CoRR, abs/1907.11692.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Are sixteen heads really better than one?", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Michel", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Graham", |
| "middle": [], |
| "last": "Neubig", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "32", |
| "issue": "", |
| "pages": "14014--14024", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul Michel, Omer Levy, and Graham Neubig. 2019. Are sixteen heads really better than one? In Advances in Neural Information Processing Systems 32, pages 14014-14024. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "A discrete hard em approach for weakly supervised question answering", |
| "authors": [ |
| { |
| "first": "Sewon", |
| "middle": [], |
| "last": "Min", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sewon Min, Danqi Chen, Hannaneh Hajishirzi, and Luke Zettlemoyer. 2019. A discrete hard em approach for weakly supervised question answering. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "A decomposable attention model for natural language inference", |
| "authors": [ |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Oscar", |
| "middle": [], |
| "last": "T\u00e4ckstr\u00f6m", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankur Parikh, Oscar T\u00e4ckstr\u00f6m, Dipanjan Das, and Jakob Uszkoreit. 2016. A decomposable attention model for natural language inference. EMNLP.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
| "authors": [ |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Raffel", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharan", |
| "middle": [], |
| "last": "Narang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Matena", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanqi", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter J", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1910.10683" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2019. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv preprint arXiv:1910.10683.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "SQuAD: 100,000+ questions for machine comprehension of text", |
| "authors": [ |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Rajpurkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Konstantin", |
| "middle": [], |
| "last": "Lopyrev", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehension of text. EMNLP.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Know what you don't know: Unanswerable questions for SQuAD", |
| "authors": [ |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Rajpurkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1806.03822" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pranav Rajpurkar, Robin Jia, and Percy Liang. 2018. Know what you don't know: Unanswerable questions for SQuAD. arXiv preprint arXiv:1806.03822.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Transfer learning in natural language processing", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [ |
| "E" |
| ], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Swabha", |
| "middle": [], |
| "last": "Swayamdipta", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proc. of NAACL: Tutorials", |
| "volume": "", |
| "issue": "", |
| "pages": "15--18", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Ruder, Matthew E. Peters, Swabha Swayamdipta, and Thomas Wolf. 2019. Transfer learning in natural language processing. In Proc. of NAACL: Tutorials, pages 15-18, Minneapolis, Minnesota, June. ACL.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "On the importance of diversity in question generation for QA", |
| "authors": [ |
| { |
| "first": "Shubham", |
| "middle": [], |
| "last": "Md Arafat Sultan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ram\u00f3n", |
| "middle": [], |
| "last": "Chandel", |
| "suffix": "" |
| }, |
| { |
| "first": "Vittorio", |
| "middle": [], |
| "last": "Fernandez Astudillo", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Castelli", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "5651--5656", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Md Arafat Sultan, Shubham Chandel, Ram\u00f3n Fernandez Astudillo, and Vittorio Castelli. 2020. On the importance of diversity in question generation for QA. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5651-5656, Online, July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Rapidly Bootstrapping a Question Answering Dataset for COVID-19", |
| "authors": [ |
| { |
| "first": "Raphael", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Rodrigo", |
| "middle": [], |
| "last": "Nogueira", |
| "suffix": "" |
| }, |
| { |
| "first": "Edwin", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Phuong", |
| "middle": [], |
| "last": "Cam", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Raphael Tang, Rodrigo Nogueira, Edwin Zhang, Nikhil Gupta, Phuong Cam, Kyunghyun Cho, and Jimmy Lin. 2020. Rapidly Bootstrapping a Question Answering Dataset for COVID-19.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "An overview of the bioasq large-scale biomedical semantic indexing and question answering competition", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Tsatsaronis", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgios", |
| "middle": [], |
| "last": "Balikas", |
| "suffix": "" |
| }, |
| { |
| "first": "Prodromos", |
| "middle": [], |
| "last": "Malakasiotis", |
| "suffix": "" |
| }, |
| { |
| "first": "Ioannis", |
| "middle": [], |
| "last": "Partalas", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Zschunke", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Michael R Alvers", |
| "suffix": "" |
| }, |
| { |
| "first": "Anastasia", |
| "middle": [], |
| "last": "Weissenborn", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergios", |
| "middle": [], |
| "last": "Krithara", |
| "suffix": "" |
| }, |
| { |
| "first": "Dimitris", |
| "middle": [], |
| "last": "Petridis", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polychronopoulos", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "BMC bioinformatics", |
| "volume": "16", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Tsatsaronis, Georgios Balikas, Prodromos Malakasiotis, Ioannis Partalas, Matthias Zschunke, Michael R Alvers, Dirk Weissenborn, Anastasia Krithara, Sergios Petridis, Dimitris Polychronopoulos, et al. 2015. An overview of the bioasq large-scale biomedical semantic indexing and question answering competition. BMC bioinformatics, 16(1):138.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems, pages 5998-6008. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Graph attention networks", |
| "authors": [ |
| { |
| "first": "Petar", |
| "middle": [], |
| "last": "Veli\u010dkovi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillem", |
| "middle": [], |
| "last": "Cucurull", |
| "suffix": "" |
| }, |
| { |
| "first": "Arantxa", |
| "middle": [], |
| "last": "Casanova", |
| "suffix": "" |
| }, |
| { |
| "first": "Adriana", |
| "middle": [], |
| "last": "Romero", |
| "suffix": "" |
| }, |
| { |
| "first": "Pietro", |
| "middle": [], |
| "last": "Li\u00f2", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Petar Veli\u010dkovi\u0107, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Li\u00f2, and Yoshua Bengio. 2018. Graph attention networks. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Multi-granularity hierarchical attention fusion networks for reading comprehension and question answering", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| }, |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Wang, Ming Yan, and Chen Wu. 2018. Multi-granularity hierarchical attention fusion networks for reading comprehension and question answering. ACL.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Multi-task learning for machine reading comprehension", |
| "authors": [ |
| { |
| "first": "Yichong", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yelong", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingjing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yichong Xu, Xiaodong Liu, Yelong Shen, Jingjing Liu, and Jianfeng Gao. 2018. Multi-task learning for machine reading comprehension. CoRR, abs/1809.06963.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "HotpotQA: A dataset for diverse, explainable multi-hop question answering", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Qi", |
| "suffix": "" |
| }, |
| { |
| "first": "Saizheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1809.09600" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Peng Qi, Saizheng Zhang, Yoshua Bengio, William W Cohen, Ruslan Salakhutdinov, and Christo- pher D Manning. 2018. HotpotQA: A dataset for diverse, explainable multi-hop question answering. arXiv preprint arXiv:1809.09600.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "XLNet: Generalized autoregressive pretraining for language understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [ |
| "G" |
| ], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime G. Carbonell, Ruslan Salakhutdinov, and Quoc V. Le. 2019. XLNet: Generalized autoregressive pretraining for language understanding. CoRR, abs/1906.08237.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "A qualitative comparison of CoQA, SQuAD 2.0 and QuAC", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Yatskar", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Yatskar. 2018. A qualitative comparison of CoQA, SQuAD 2.0 and QuAC. CoRR, abs/1809.10735.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Pre-trained language model for biomedical question answering", |
| "authors": [ |
| { |
| "first": "Wonjin", |
| "middle": [], |
| "last": "Yoon", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinhyuk", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Donghyeon", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Minbyul", |
| "middle": [], |
| "last": "Jeong", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaewoo", |
| "middle": [], |
| "last": "Kang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Joint European Conference on Machine Learning and Knowledge Discovery in Databases", |
| "volume": "", |
| "issue": "", |
| "pages": "727--740", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wonjin Yoon, Jinhyuk Lee, Donghyeon Kim, Minbyul Jeong, and Jaewoo Kang. 2019. Pre-trained language model for biomedical question answering. In Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pages 727-740. Springer.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Addressing semantic drift in question generation for semi-supervised question answering", |
| "authors": [ |
| { |
| "first": "Shiyue", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shiyue Zhang and Mohit Bansal. 2019. Addressing semantic drift in question generation for semi-supervised question answering. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "SG-Net: Syntax-guided machine reading comprehension", |
| "authors": [ |
| { |
| "first": "Zhuosheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuwei", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Junru", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Sufeng", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1908.05147" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhuosheng Zhang, Yuwei Wu, Junru Zhou, Sufeng Duan, and Hai Zhao. 2019. SG-Net: Syntax-guided machine reading comprehension. arXiv preprint arXiv:1908.05147.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Document modeling with graph attention networks for multi-grained machine reading comprehension", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Haoyang", |
| "middle": [], |
| "last": "Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yaobo", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "" |
| }, |
| { |
| "first": "Wanxiang", |
| "middle": [], |
| "last": "Che", |
| "suffix": "" |
| }, |
| { |
| "first": "Daxin", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Zheng, Haoyang Wen, Yaobo Liang, Nan Duan, Wanxiang Che, Daxin Jiang, Ming Zhou, and Ting Liu. 2020. Document modeling with graph attention networks for multi-grained machine reading comprehension. ACL.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Ensemble BERT with data augmentation and linguistic knowledge on SQuAD", |
| "authors": [ |
| { |
| "first": "Wen", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Xianzhe", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wen Zhou, Xianzhe Zhang, and Hang Jiang. 2019. Ensemble BERT with data augmentation and linguistic knowledge on SQuAD 2.0.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "text": "Examples of questions in the NQ dataset. Example 1 contains the short answer in the long answer whereas Example 2 has none.", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "num": null, |
| "text": "Our Attention Diversity Mechanism", |
| "uris": null |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>Data Augmentation on BERT</td><td colspan=\"2\">Short Answer F1 Long Answer F1</td></tr><tr><td>BERTW + SOS</td><td>55.8</td><td>66.7</td></tr><tr><td>BERTW + 1M Synthetic</td><td>56.6</td><td>67.9</td></tr><tr><td>BERTW + 4M Synthetic</td><td>56.9</td><td>68.3</td></tr><tr><td>BERTW + SQuAD 2 PT</td><td>57.0</td><td>67.3</td></tr><tr><td>BERTW + SQuAD 2 PT + 4M Synthetic</td><td>56.8</td><td>67.6</td></tr></table>", |
| "text": "Comparison of GAAMA vs prior work in the industry. GAAMA clearly outperforms the competitors in both short and long answer F1." |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td colspan=\"2\">Model F 1</td><td>T G 50</td><td>T G 95</td><td>T C 50</td><td>T C 95</td></tr><tr><td>Base</td><td colspan=\"5\">42.5 0.05 0.49 0.53 2.32</td></tr><tr><td>Large</td><td colspan=\"5\">50.8 0.10 0.66 1.51 6.00</td></tr></table>", |
| "text": "Ablation study of GAAMA's various components." |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>GAAMA Configs</td><td>Factoid MRR</td></tr><tr><td>Trained on target domain</td><td>24.93</td></tr><tr><td>ZSTL</td><td>24.37</td></tr></table>", |
| "text": "F1 and inference times for BERT base and large models running on GPU and CPU for a subset of the NQ dev set. T D K is the K-th percentile inference in seconds when running on device D (GPU or CPU)." |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td/><td>P@1 R@3 MRR</td></tr><tr><td colspan=\"2\">Prior Work (Tang et al., 2020)</td></tr><tr><td colspan=\"2\">BioBERT + MS MARCO 0.194 0.313 0.312</td></tr><tr><td>T5 + MS MARCO</td><td>0.282 0.404 0.415</td></tr><tr><td>This Work</td><td/></tr><tr><td>GAAMA (ZSTL)</td><td>0.306 0.377 0.414</td></tr></table>", |
| "text": "Results on our test split of the BioASQ 8b dataset. GAAMA with ZSTL is highly competitive." |
| }, |
| "TABREF6": { |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "text": "ZSTL performance of GAAMA vs. the prior work on the CovidQA dataset." |
| } |
| } |
| } |
| } |