| { |
| "paper_id": "2022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T09:42:22.238136Z" |
| }, |
| "title": "Retrieval Data Augmentation Informed by Downstream Question Answering Performance", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Ferguson", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Allen Institute for AI", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Pradeep", |
| "middle": [], |
| "last": "Dasigi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Allen Institute for AI", |
| "location": {} |
| }, |
| "email": "pradeepd@allenai.org" |
| }, |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Allen Institute for AI", |
| "location": {} |
| }, |
| "email": "tushark@allenai.org" |
| }, |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Allen Institute for AI", |
| "location": {} |
| }, |
| "email": "hannaneh@cs.washington.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Training retrieval models to fetch contexts for Question Answering (QA) over large corpora requires labeling relevant passages in those corpora. Since obtaining exhaustive manual annotations of all relevant passages is not feasible, prior work uses text overlap heuristics to find passages that are likely to contain the answer, but this is not feasible when the task requires deeper reasoning and answers are not extractable spans (e.g.: multi-hop, discrete reasoning). We address this issue by identifying relevant passages based on whether they are useful for a trained QA model to arrive at the correct answers, and develop a search process guided by the QA model's loss. Our experiments show that this approach enables identifying relevant context for unseen data greater than 90% of the time on the IIRC dataset and generalizes better to the end QA task than those trained on just the gold retrieval data on IIRC and QASC datasets.", |
| "pdf_parse": { |
| "paper_id": "2022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Training retrieval models to fetch contexts for Question Answering (QA) over large corpora requires labeling relevant passages in those corpora. Since obtaining exhaustive manual annotations of all relevant passages is not feasible, prior work uses text overlap heuristics to find passages that are likely to contain the answer, but this is not feasible when the task requires deeper reasoning and answers are not extractable spans (e.g.: multi-hop, discrete reasoning). We address this issue by identifying relevant passages based on whether they are useful for a trained QA model to arrive at the correct answers, and develop a search process guided by the QA model's loss. Our experiments show that this approach enables identifying relevant context for unseen data greater than 90% of the time on the IIRC dataset and generalizes better to the end QA task than those trained on just the gold retrieval data on IIRC and QASC datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Answering questions over a large text corpus typically requires retrieving information relevant to the question from the corpus, which is then used by a Question Answering (QA) model to arrive at the answer. Recent work (Guu et al., 2020; Lewis et al., 2020; Ni et al., 2020) relies on retrieval models that learn dense representations of questions and retrieval candidates (Karpukhin et al., 2020; Khattab and Zaharia, 2020) trained separately or jointly with the QA model. These learned retrieval models are more effective than those that use simple word overlap signals (Robertson and Zaragoza, 2009; Chen et al., 2017) , but they require the positive retrieval targets for each question labeled. It is often difficult, if not impossible, to exhaustively label all the facts relevant to answering a question in a large corpus of text. Consequently, even when the datasets provide retrieval labels, it is often the case that there exist alternative paths to the answer that", |
| "cite_spans": [ |
| { |
| "start": 220, |
| "end": 238, |
| "text": "(Guu et al., 2020;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 239, |
| "end": 258, |
| "text": "Lewis et al., 2020;", |
| "ref_id": null |
| }, |
| { |
| "start": 259, |
| "end": 275, |
| "text": "Ni et al., 2020)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 374, |
| "end": 398, |
| "text": "(Karpukhin et al., 2020;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 399, |
| "end": 425, |
| "text": "Khattab and Zaharia, 2020)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 573, |
| "end": 603, |
| "text": "(Robertson and Zaragoza, 2009;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 604, |
| "end": 622, |
| "text": "Chen et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The digestive system breaks food into nutrients.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Gold", |
| "sec_num": null |
| }, |
| { |
| "text": "The digestive system breaks food down into what? a) meals b) fats c) fuel", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Q:", |
| "sec_num": null |
| }, |
| { |
| "text": "d) strength \u2026", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Q:", |
| "sec_num": null |
| }, |
| { |
| "text": "Nutrients are fuel for your body.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Q:", |
| "sec_num": null |
| }, |
| { |
| "text": "Carbohydrate breaks down into glucose in the digestive system.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Alternate Fact 1", |
| "sec_num": null |
| }, |
| { |
| "text": "All carbohydrate foods become glucose, fuel for the body.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Alternate Fact 2", |
| "sec_num": null |
| }, |
| { |
| "text": "After a meal the digestive system breaks some food down into glucose.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Alternate Fact 2", |
| "sec_num": null |
| }, |
| { |
| "text": "Glucose, a simple sugar, is the body's main fuel.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Alternate Fact 2", |
| "sec_num": null |
| }, |
| { |
| "text": "Properly digested food is our body's fuel.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Alternate Fact 2", |
| "sec_num": null |
| }, |
| { |
| "text": "Food supplies fuel in the form of nutrients. are not labeled (Jhamtani and Clark, 2020) , an example of which is shown in Figure 1 . The common heuristic of considering all contexts that contain mentions of the answer span (Clark and Gardner, 2018; Lee et al., 2019a) does not work when the QA task is not extractive (e.g.: when the answers are binary or require some numerical computation).", |
| "cite_spans": [ |
| { |
| "start": 61, |
| "end": 87, |
| "text": "(Jhamtani and Clark, 2020)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 223, |
| "end": 248, |
| "text": "(Clark and Gardner, 2018;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 249, |
| "end": 267, |
| "text": "Lee et al., 2019a)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 122, |
| "end": 130, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Alternate Fact 2", |
| "sec_num": null |
| }, |
| { |
| "text": "We propose to address this issue by augmenting the set of labeled retrieval targets with additional candidates that are not labeled as positive, but still provide sufficient information to answer the corresponding questions. Given question-answer pairs, and a QA model trained to maximize the likelihood of the correct answers conditioned on the labeled retrieval targets and the questions, we search for alternative contexts that also make the correct answers likely. Concretely, our search process finds those contexts not labeled as gold, that minimize the loss of the QA model. We consider these contexts as alternative retrieval targets, and train the retrieval model with the combination of these alternative contexts and the gold labeled contexts as positives. Our method is particularly effective for non-extractive QA tasks since it does not rely on answer-span overlaps.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Alternate Fact 2", |
| "sec_num": null |
| }, |
| { |
| "text": "We evaluate our approach on two multi-hop QA tasks, IIRC (Ferguson et al., 2020) and QASC (Khot et al., 2019) , and show that our search for relevant contexts guided by the performance of the QA model correctly identifies a relevant context 91% of the time on IIRC and 84% of the time on QASC (Table 2a ). Augmenting the retrieval training data with the results from our search process increases recall on unseen questions, leading to an improvement in the downstream QA performance by 0.5 F 1 points on IIRC and 2.1 accuracy points on QASC (Section 3.2).", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 80, |
| "text": "(Ferguson et al., 2020)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 90, |
| "end": 109, |
| "text": "(Khot et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 293, |
| "end": 302, |
| "text": "(Table 2a", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Alternate Fact 2", |
| "sec_num": null |
| }, |
| { |
| "text": "Overview and Problem Our approach uses the standard two-step pipeline for open-domain QA seen in prior work. We first run a retrieval model that takes as input a question, q, and a large corpus of passages, C, and outputs a small subset of those passages, c \u2282 C, that contains sufficient information to answer the question. This subset is then passed to the second step: the QA model. This model takes as input the same question, q, and subset of passages, c, from the first step, and outputs an answer, a. Depending on the data, this answer can take many forms, such as a span from the context, a number, yes/no, or none of these if the question is unanswerable.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "2" |
| }, |
| { |
| "text": "For each question, there may be many valid sets of context passages, where each set 1 contains all the information necessary to answer the question. We refer to individual sets as c * i , and the superset of all such sets as c * = {c * 1 . . . c * n }. As seen in Figure 1 , these different context sets may express different reasoning paths reaching the answer, or they may contain different ways of expressing the same reasoning path. However, most datasets just contain annotations of one such set per question, c * i . Our goal is to use these annotations to identify alternate, unannotated, relevant context,c \u2208 c * \\ {c * i }, for each question. These additional contexts is used to augment the retrieval training data. Approach The goal of the retrieval model is to identify context that maximizes the probability of the correct answer when given to the QA model. When supervised data, c * i , is available, this is achieved by training the retrieval model to predict the input that the QA model is trained on i.e., \u03b8 r = arg max \u03b8 P (c * i |q, \u03b8), and \u03b8 q = arg max \u03b8 P (a|q, c * i , \u03b8), where the retriever and the QA models are parameterized by \u03b8 r and \u03b8 q . We refer to this initial QA model as the base QA model. When supervised data is not available, we can identify the retrieved contexts\u0109, by searching over the corpus for the contexts that maximize the probability of the correct answer under the base QA model:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 264, |
| "end": 272, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "c = arg max c\u2282C P (a|q, c, \u03b8 q )", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Method", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Based on this, for each question, we search over the corpus for the top k contexts,\u0109 1 . . .\u0109 k , and add them as additional data augmentation when training a new retrieval model:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u03b8 r = arg max \u03b8 P (c * i |q, \u03b8) + k j=1 P (\u0109 j |q, \u03b8) (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Lastly, we train a final QA model using the gold context, including the results of this new retrieval model to incorporate the updated training and make it more robust to noise:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "c r = arg max c\u2208C P (c|q,\u03b8 r ) \u03b8 q = arg max \u03b8 P (a|q, {c * i , c r }, \u03b8)", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Method", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Labeling sets of facts Because we apply our approach to datasets containing questions that require multiple facts to answer, we need to label sets of facts, not individual ones. For this reason, we train our base QA models conditioned on sets of facts, and while both labeling new contexts with the base QA model, and retrieving contexts, we use beam search to output sets of facts. In order to prevent the base QA model from memorizing the gold contexts, we use a 10-fold cross-labeling approach. 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We show the effect of our approach on two multihop QA datasets: IIRC (Ferguson et al., 2020) and QASC (Khot et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 69, |
| "end": 92, |
| "text": "(Ferguson et al., 2020)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 102, |
| "end": 121, |
| "text": "(Khot et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "3" |
| }, |
| { |
| "text": "IIRC is a multi-hop QA open QA dataset, consisting of a mix of yes/no questions, span selection questions, unanswerable questions, and questions requiring discrete reasoning such as arithmetic or counting. Each question is associated with a paragraph, and requires both information from that paragraph, as well as information from one or more pages linked to from within that paragraph.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Setup", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "QASC is a multiple-choice, multi-hop QA dataset constructed from a corpus of 17M facts. Each question is written by composing two facts from the corpus, and includes eight answer choices. eQASC (Jhamtani and Clark, 2020) includes a more exhaustive annotation of relevant contexts for QASC questions and enables a more accurate evaluation of retrieval performance on QASC.", |
| "cite_spans": [ |
| { |
| "start": 194, |
| "end": 220, |
| "text": "(Jhamtani and Clark, 2020)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Setup", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Evaluation We report recall@10 and the final QA performance results that provide a more reliable evaluation of the retrieval performance. For eQASC, we use mean-average precision (MAP) of the positive examples. Implementation Details Following prior work on IIRC (Ni et al., 2020) , we adopt a pipeline approach consisting of three steps: link selection using RoBERTa-base, retrieval, and answer selection using NumNet++ (Ran et al., 2019). For QASC, we initially filter the corpus using the two-step BM25 described in (Khot et al., 2019) , selecting the top 1000 pairs of facts per answer choice. Similar to IIRC, we then select the top 10 pairs using a RoBERTa-base bi-encoder. Final QA model separately scores each answer choice using another RoBERTa-base model, and computes a softmax to get the final distribution over the choices.", |
| "cite_spans": [ |
| { |
| "start": 263, |
| "end": 280, |
| "text": "(Ni et al., 2020)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 519, |
| "end": 538, |
| "text": "(Khot et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and Setup", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We compare our approach of identifying additional relevant context using QA loss with other retrieval baselines and alternate augmentation methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparisons and Results", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We use the top results from BM25 in lieu of training a supervised model with the annotated data. This is a commonly used heuristic when no retrieval annotations are available.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BM25:", |
| "sec_num": null |
| }, |
| { |
| "text": "Sup A Models are trained using just the annotated training data with no additional data provided.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "BM25:", |
| "sec_num": null |
| }, |
| { |
| "text": "We augment the annotated training data with the top results from querying the corpus using BM25 with the question and answer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sup A+BM25", |
| "sec_num": null |
| }, |
| { |
| "text": "Sup A+R We augment the annotated training data with the top retrieval results conditioned on the question and correct answer. As in the QA-loss labeling approach, we use a 10-fold labeling procedure to prevent memorizing the annotated context. Prior Work -71.9 -50.6 - Table 1 : Comparison of different retrieval models. R@10 and MAP are direct evaluations of retrieval performance, Acc is the performance of the final QA model trained given retrieval results. For IIRC, prior work is the state-of-the-art model (Ni et al., 2020) that uses the same QA model as our work. For QASC, prior work is RoBERTa-base model that uses the same model size as ours and is trained and evaluated on the same data used by (Khashabi et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 512, |
| "end": 529, |
| "text": "(Ni et al., 2020)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 706, |
| "end": 729, |
| "text": "(Khashabi et al., 2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 269, |
| "end": 276, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sup A+BM25", |
| "sec_num": null |
| }, |
| { |
| "text": "Main Results Table 1 compares our approach, Sup A+QA , with the baselines and prior work. 3 Our approach results in improved performance on both datasets with a larger improvement on QASC over the baseline compared to IIRC. This is likely due to the fact that QASC has a much larger number of alternate contexts per question compared to IIRC (discussed below in oracle analysis). We generally see a correlation between retrieval recall of the gold annotations, performance on eQASC, and downstream accuracy, indicating that providing more accurate context to the downstream model does help with QA performance. We manually labeled the accuracy of the top result for 100 questions for each approach (results in table 2a). We can see that using the QA model to label data significantly outperforms the other two approaches. In table 2b we also further break down the accuracy based on the different types of questions in IIRC. Our approach works well on Binary and Numeric questions, where the span heuristic cannot be applied. Our approach also outperforms the it on Span Selection questions, where the answer is a span from the context. Although the heuristic can be applied on these questions, it often returns false positives. Our approach struggles with Span Compare questions, as discussed in more detail in Error Analysis below. Oracle Analysis Figure 2c shows an oracle study of the same 100 questions from the previous section to determine how many alternate contexts were available in each dataset. For IIRC, we considered all sentences from the gold articles, and for QASC we considered the top twenty sentences according to BM25. QASC has a much higher ceiling for this form of data augmentation, as can be seen by the fact that 70% of questions have multiple relevant contexts, compared to IIRC where many questions have only a single context. Additionally, many of the questions in IIRC with exactly 2 contexts share a similar structure, seen in the third example in Figure 2 . Although our approach is often able to identify this alternate context, using it to augment the data does not add much new information. Error Analysis Figure 2 shows examples of problems our approach encounters in IIRC. The first question requires the model to count occurrences of an event, but the QA model instead selects context containing a textual expression of the answer. The second question is a span compare example. The model has to identify context containing attributes of two entities mentioned in the original paragraph, but takes a shortcut and and only selects context for the correct answer.", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 91, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 13, |
| "end": 20, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1350, |
| "end": 1359, |
| "text": "Figure 2c", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1979, |
| "end": 1987, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 2141, |
| "end": 2149, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sup A+BM25", |
| "sec_num": null |
| }, |
| { |
| "text": "Most similar to our work are recent approaches using weak supervision for learning to retrieve for QA, using only questions and answers. Lee et al. (2019b) pretrain a retrieval model using an inverse cloze task. Zhao et al. (2021) more recently pro-posed to iteratively improve a retrieval model using hard-EM. Both approaches filter the data using the answer span heuristic. This heuristic breaks down on multi-hop questions, as well as questions that are not answerable by spans, such as true/false or discrete reasoning questions. Izacard and Grave (2021) and Yang and Seo (2021) propose using knowledge distillation to incorporate QA information into a supervised retriever, and while assuming access to retrieval annotations, Ni et al. (2020) jointly learn retrieval and QA by marginalizing over potential contexts. All three of these approaches require encoding all potential contexts together with the question, whereas ours does not have that requirement, making ours more memory-efficient.", |
| "cite_spans": [ |
| { |
| "start": 212, |
| "end": 230, |
| "text": "Zhao et al. (2021)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 534, |
| "end": 558, |
| "text": "Izacard and Grave (2021)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 563, |
| "end": 582, |
| "text": "Yang and Seo (2021)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 731, |
| "end": 747, |
| "text": "Ni et al. (2020)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "4" |
| }, |
| { |
| "text": "This work shows that using the loss of a QA model trained on a partial set of labeled contexts to search for alternative contexts for retrieval is an effective method for augmenting the retriever's training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our results present a more label-efficient training scheme for building supervised retrievers for QA. They also suggest that creators of datasets for open QA tasks that require supervised retrievers can better allocate their annotation budgets by obtaining retrieval labels for a small set of questions while maximizing the number of question-answer annotations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We apply our approach to datasets containing questions that require multiple facts to answer, so we label sets of facts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We train ten models, each on 90% of the data, and use them to label the remaining 10%.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The state-of-the-art model(Khashabi et al., 2020) for QASC uses roughly 100x more parameters than us (with the results 89.6), but the same model with a comparable size as ours is significantly worse, 50.8. Therefore, we use the best-performing model that has the same size as ours.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Reading wikipedia to answer opendomain questions", |
| "authors": [ |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Fisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danqi Chen, Adam Fisch, Jason Weston, and Antoine Bordes. 2017. Reading wikipedia to answer open- domain questions. In ACL.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Simple and effective multi-paragraph reading comprehension", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "845--855", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1078" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher Clark and Matt Gardner. 2018. Simple and effective multi-paragraph reading comprehension. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 845-855, Melbourne, Australia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Iirc: A dataset of incomplete information reading comprehension questions", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Ferguson", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "" |
| }, |
| { |
| "first": "Pradeep", |
| "middle": [], |
| "last": "Dasigi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Ferguson, Matt Gardner, Hannaneh Hajishirzi, Tushar Khot, and Pradeep Dasigi. 2020. Iirc: A dataset of incomplete information reading compre- hension questions. In ACL.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Retrieval augmented language model pre-training", |
| "authors": [ |
| { |
| "first": "Kelvin", |
| "middle": [], |
| "last": "Guu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Zora", |
| "middle": [], |
| "last": "Tung", |
| "suffix": "" |
| }, |
| { |
| "first": "Panupong", |
| "middle": [], |
| "last": "Pasupat", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingwei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 37th International Conference on Machine Learning", |
| "volume": "119", |
| "issue": "", |
| "pages": "3929--3938", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 3929-3938. PMLR.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Distilling knowledge from reader to retriever for question answering", |
| "authors": [ |
| { |
| "first": "Gautier", |
| "middle": [], |
| "last": "Izacard", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gautier Izacard and Edouard Grave. 2021. Distilling knowledge from reader to retriever for question an- swering. In ICLR.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Learning to explain: Datasets and models for identifying valid reasoning chains in multihop question-answering", |
| "authors": [ |
| { |
| "first": "Harsh", |
| "middle": [], |
| "last": "Jhamtani", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Harsh Jhamtani and Peter Clark. 2020. Learning to explain: Datasets and models for identifying valid reasoning chains in multihop question-answering. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Dense passage retrieval for open-domain question answering", |
| "authors": [ |
| { |
| "first": "Vladimir", |
| "middle": [], |
| "last": "Karpukhin", |
| "suffix": "" |
| }, |
| { |
| "first": "Barlas", |
| "middle": [], |
| "last": "Oguz", |
| "suffix": "" |
| }, |
| { |
| "first": "Sewon", |
| "middle": [], |
| "last": "Min", |
| "suffix": "" |
| }, |
| { |
| "first": "Ledell", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wen", |
| "middle": [], |
| "last": "Tau", |
| "suffix": "" |
| }, |
| { |
| "first": "Yih", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vladimir Karpukhin, Barlas Oguz, Sewon Min, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen tau Yih. 2020. Dense passage retrieval for open-domain ques- tion answering. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Unifiedqa: Crossing format boundaries with a single qa system", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Khashabi", |
| "suffix": "" |
| }, |
| { |
| "first": "Sewon", |
| "middle": [], |
| "last": "Min", |
| "suffix": "" |
| }, |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Sabharwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Oyvind", |
| "middle": [], |
| "last": "Tafjord", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Khashabi, Sewon Min, Tushar Khot, Ashish Sab- harwal, Oyvind Tafjord, Peter Clark, and Hannaneh Hajishirzi. 2020. Unifiedqa: Crossing format bound- aries with a single qa system. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Colbert: Efficient and effective passage search via contextualized late interaction over bert", |
| "authors": [ |
| { |
| "first": "Omar", |
| "middle": [], |
| "last": "Khattab", |
| "suffix": "" |
| }, |
| { |
| "first": "Matei", |
| "middle": [], |
| "last": "Zaharia", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "SIGIR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omar Khattab and Matei Zaharia. 2020. Colbert: Effi- cient and effective passage search via contextualized late interaction over bert. In SIGIR.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Qasc: A dataset for question answering via sentence composition", |
| "authors": [ |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Guerquin", |
| "suffix": "" |
| }, |
| { |
| "first": "Petre", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Shish", |
| "middle": [], |
| "last": "Sabharwal", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tushar Khot, Peter Clark, Michael Guerquin, Petre Jansen, and Shish Sabharwal. 2019. Qasc: A dataset for question answering via sentence composition. In AAAI.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Latent retrieval for weakly supervised open domain question answering", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "6086--6096", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1612" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. 2019a. Latent retrieval for weakly supervised open domain question answering. In Proceedings of the 57th Annual Meeting of the Association for Computa- tional Linguistics, pages 6086-6096, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Latent retrieval for weakly supervised open domain question answering", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. 2019b. Latent retrieval for weakly supervised open domain question answering. In ACL.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Retrieval-augmented generation for knowledgeintensive nlp tasks", |
| "authors": [ |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Ethan", |
| "middle": [], |
| "last": "Perez", |
| "suffix": "" |
| }, |
| { |
| "first": "Aleksandara", |
| "middle": [], |
| "last": "Piktus", |
| "suffix": "" |
| }, |
| { |
| "first": "Fabio", |
| "middle": [], |
| "last": "Petroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Vladimir", |
| "middle": [], |
| "last": "Karpukhin", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Hein- rich Kuttler, Mike Lewis, Wen tau Yih, Tim Rock- t\u00e4schel, Sebastian Riedel, and Douwe Kiela. 2020. Retrieval-augmented generation for knowledge- intensive nlp tasks. ArXiv, abs/2005.11401.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Mitigating false-negative contexts in multi-document question answering with retrieval marginalization", |
| "authors": [ |
| { |
| "first": "Ansong", |
| "middle": [], |
| "last": "Ni", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Pradeep", |
| "middle": [], |
| "last": "Dasigi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXivpreprintarXiv:2103.12235" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ansong Ni, Matt Gardner, and Pradeep Dasigi. 2020. Mitigating false-negative contexts in multi-document question answering with retrieval marginalization. In arXiv preprint arXiv:2103.12235.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "NumNet: Machine reading comprehension with numerical reasoning", |
| "authors": [ |
| { |
| "first": "Yankai", |
| "middle": [], |
| "last": "Qiu Ran", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qiu Ran, Yankai Lin, Peng Li, Jie Zhou, and Zhiyuan Liu. 2019. NumNet: Machine reading comprehen- sion with numerical reasoning. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "The probabilistic relevance framework: Bm25 and beyond", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Robertson", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugo", |
| "middle": [], |
| "last": "Zaragoza", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Foundations and Trends in Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Robertson and Hugo Zaragoza. 2009. The prob- abilistic relevance framework: Bm25 and beyond. In Foundations and Trends in Information Retrieval.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Is retriever merely an approximator of reader? In arXiv preprint", |
| "authors": [ |
| { |
| "first": "Sohee", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Minjoon", |
| "middle": [], |
| "last": "Seo", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2010.10999" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sohee Yang and Minjoon Seo. 2021. Is retriever merely an approximator of reader? In arXiv preprint arXiv:2010.10999.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Distantly-supervised evidence retrieval enables question answerwing without evidence annotation", |
| "authors": [ |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Chenyan", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Boyd-Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen Zhao, Chenyan Xiong, Jordan Boyd-Graber, and Hal Daum\u00e9 III. 2021. Distantly-supervised evidence retrieval enables question answerwing without evi- dence annotation. In EMNLP.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Retrieval annotations (gold) are often incomplete, only providing one of many relevant contexts. Alternative contexts can provide different views of the same information, providing more robust training data." |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Example errors of our approach in IIRC. Relevant context is highlighted in green, and irrelevant context is in red." |
| }, |
| "TABREF2": { |
| "text": "", |
| "num": null, |
| "content": "<table><tr><td colspan=\"5\">: (a) Manual analysis Accuracy of different approaches based on manual analysis on 100 examples for different context labeling approaches, (b) comparing span-selection retrieval baseline with our approach for different question types, and (c) Comparison of the number of relevant contexts in each dataset.</td></tr><tr><td colspan=\"4\">Q: How many championships had Biela won? A: 10</td></tr><tr><td colspan=\"2\">Main context \u2026 started his career in 1988 replacing Audi Vice Champion Frank Biela \u2026</td><td colspan=\"2\">Gold His greatest achievements include winning: 1991 \u2026 1993 \u2026</td><td>QA-loss Biela comfortably won the title \u2026 being classified in the top ten \u2026</td><td>BM25 After winning the ALMS series\u2026</td></tr><tr><td colspan=\"5\">Q: Which play was published first? A: A Midsummer Night's Dream</td></tr><tr><td>Main context</td><td>Gold</td><td/><td>QA-loss</td><td>BM25</td></tr><tr><td>\u2026 performed in productions</td><td colspan=\"2\">written between 1599/1602.</td><td colspan=\"2\">Set in Denmark, the play depicts Prince Hamlet\u2026</td><td>Shakespeare in the Arb has published\u2026</td></tr><tr><td>of Hamlet and A Midsummer</td><td/><td/><td/></tr><tr><td>Night's Dream \u2026</td><td colspan=\"2\">written in 1595/1596.</td><td colspan=\"2\">Usually dated 1595 or early 1596.</td><td>To die, to sleep, is that all?</td></tr><tr><td colspan=\"3\">Q: What year did the war begin? A: 1756</td><td/></tr><tr><td>Main context</td><td>Gold</td><td/><td>QA-loss</td><td>BM25</td></tr><tr><td>\u2026 and was expanded during</td><td colspan=\"2\">The Seven Years' War \u2026 fought</td><td colspan=\"2\">It is called the Seven Years' War</td><td>Pitt was the head of the government</td></tr><tr><td>the Seven Years' War \u2026</td><td colspan=\"2\">between 1756 and 1763</td><td>(1756 -1763).</td><td>from 1756 to 1761, and\u2026</td></tr></table>", |
| "type_str": "table", |
| "html": null |
| } |
| } |
| } |
| } |