| { |
| "paper_id": "N19-1030", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:56:51.451228Z" |
| }, |
| "title": "Learning to Attend On Essential Terms: An Enhanced Retriever-Reader Model for Open-domain Question Answering", |
| "authors": [ |
| { |
| "first": "Jianmo", |
| "middle": [], |
| "last": "Ni", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of California", |
| "location": { |
| "addrLine": "San Diego" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Chenguang", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Microsoft Speech and Dialogue Research Group", |
| "institution": "", |
| "location": {} |
| }, |
| "email": "chezhu@microsoft.com" |
| }, |
| { |
| "first": "Weizhu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Dynamics", |
| "location": { |
| "postCode": "365 AI" |
| } |
| }, |
| "email": "wzchen@microsoft.com" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Mcauley", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of California", |
| "location": { |
| "addrLine": "San Diego" |
| } |
| }, |
| "email": "jmcauley@ucsd.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Open-domain question answering remains a challenging task as it requires models that are capable of understanding questions and answers, collecting useful information, and reasoning over evidence. Previous work typically formulates this task as a reading comprehension or entailment problem given evidence retrieved from search engines. However, existing techniques struggle to retrieve indirectly related evidence when no directly related evidence is provided, especially for complex questions where it is hard to parse precisely what the question asks. In this paper we propose a retriever-reader model that learns to attend on essential terms during the question answering process. We build (1) an essential term selector which first identifies the most important words in a question, then reformulates the query and searches for related evidence; and (2) an enhanced reader that distinguishes between essential terms and distracting words to predict the answer. We evaluate our model on multiple open-domain multiplechoice QA datasets, notably performing at the level of the state-of-the-art on the AI2 Reasoning Challenge (ARC) dataset. * Most of the work was done during internship at Microsoft, Redmond.", |
| "pdf_parse": { |
| "paper_id": "N19-1030", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Open-domain question answering remains a challenging task as it requires models that are capable of understanding questions and answers, collecting useful information, and reasoning over evidence. Previous work typically formulates this task as a reading comprehension or entailment problem given evidence retrieved from search engines. However, existing techniques struggle to retrieve indirectly related evidence when no directly related evidence is provided, especially for complex questions where it is hard to parse precisely what the question asks. In this paper we propose a retriever-reader model that learns to attend on essential terms during the question answering process. We build (1) an essential term selector which first identifies the most important words in a question, then reformulates the query and searches for related evidence; and (2) an enhanced reader that distinguishes between essential terms and distracting words to predict the answer. We evaluate our model on multiple open-domain multiplechoice QA datasets, notably performing at the level of the state-of-the-art on the AI2 Reasoning Challenge (ARC) dataset. * Most of the work was done during internship at Microsoft, Redmond.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Open-domain question answering (QA) has been extensively studied in recent years. Many existing works have followed the 'search-and-answer' strategy and achieved strong performance (Chen et al., 2017; Kwon et al., 2018; Wang et al., 2018b) spanning multiple QA datasets such as TriviaQA (Joshi et al., 2017) , SQuAD (Rajpurkar et al., 2016) , MS-Macro (Nguyen et al., 2016) , ARC ", |
| "cite_spans": [ |
| { |
| "start": 181, |
| "end": 200, |
| "text": "(Chen et al., 2017;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 201, |
| "end": 219, |
| "text": "Kwon et al., 2018;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 220, |
| "end": 239, |
| "text": "Wang et al., 2018b)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 287, |
| "end": 307, |
| "text": "(Joshi et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 316, |
| "end": 340, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 352, |
| "end": 373, |
| "text": "(Nguyen et al., 2016)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "However, open-domain QA tasks become inherently more difficult when (1) dealing with questions with little available evidence; (2) solving questions where the answer type is free-form text (e.g. multiple-choice) rather than a span among existing passages (i.e., 'answer span'); or when (3) the need arises to understand long and complex questions and reason over multiple passages, rather than simple text matching. As a result, it is essential to incorporate commonsense knowledge or to improve retrieval capability to better capture partially related evidence (Chen et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 562, |
| "end": 581, |
| "text": "(Chen et al., 2017)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "among others.", |
| "sec_num": null |
| }, |
| { |
| "text": "As shown in Table 1 , the TriviaQA, SQuAD, and MS-Macro datasets all provide passages within which the correct answer is guaranteed to exist. However, this assumption ignores the difficulty of retrieving question-related evidence from a large volume of open-domain resources, especially when considering complex questions which require reasoning or commonsense knowledge. On the other hand, ARC does not provide passages known to contain the correct answer. Instead, the task of identifying relevant passages is left to the solver. However, questions in ARC have multiple answer choices that provide indirect information that can help solve the question. As such an effective model needs to account for relations among passages, questions, and answer choices. Real-world datasets such as Amazon-QA (a corpus of user queries from Amazon) (McAuley and Yang, 2016) also exhibit the same challenge, i.e., the need to surface related evidence from which to extract or summarize an answer. Figure 1 shows an example of a question in the ARC dataset and demonstrates the difficulties in retrieval and reading comprehension. As shown for Choice 1 (C1), a simple concatenation of the 1 For SQuAD and TriviaQA, since the questions are paired with span-type answers, it is convenient to obtain ranking supervision where retrieved passages are relevant via distant supervision; however free-form questions in ARC and Amazon-QA result in a lack of supervision which makes the problem more difficult. For MS-Macro, the dataset is designed to annotate relevant passages though it has free-form answers. ARC \u2248 7K Amazon-QA (McAuley and Yang, 2016) \u2248 1.48M", |
| "cite_spans": [ |
| { |
| "start": 837, |
| "end": 861, |
| "text": "(McAuley and Yang, 2016)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1607, |
| "end": 1631, |
| "text": "(McAuley and Yang, 2016)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 984, |
| "end": 992, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "among others.", |
| "sec_num": null |
| }, |
| { |
| "text": "SQuAD (Rajpurkar et al., 2016) \u2248 100K TriviaQA (Joshi et al., 2017) \u2248 650K MS-Macro (Nguyen et al., 2016) \u2248 1M Table 1 : Differences among popular QA datasets.", |
| "cite_spans": [ |
| { |
| "start": 6, |
| "end": 30, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 47, |
| "end": 67, |
| "text": "(Joshi et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 84, |
| "end": 105, |
| "text": "(Nguyen et al., 2016)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 111, |
| "end": 118, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "among others.", |
| "sec_num": null |
| }, |
| { |
| "text": "Query1: Mercury , the planet nearest to the Sun , has extreme surface temperatures , ranging from 465 C in sunlight to -180 C in darkness .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "among others.", |
| "sec_num": null |
| }, |
| { |
| "text": "Why is there such a large range of temperatures on Mercury? The planet is too small to hold heat.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "among others.", |
| "sec_num": null |
| }, |
| { |
| "text": "Q: Mercury , the planet nearest to the Sun , has extreme surface temperatures , ranging from 465 C in sunlight to -180 C in darkness . Why is there such a large range of temperatures on Mercury?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "among others.", |
| "sec_num": null |
| }, |
| { |
| "text": "C1: The planet is too small to hold heat. C4: The planet lacks an atmosphere to hold heat .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "among others.", |
| "sec_num": null |
| }, |
| { |
| "text": "S1: The lack of atmosphere also contributes to the planet 's wild temperature extremes . S2: Mercury is the closest planet to the sun and has a thin atmosphere, no air pressure and an extremely high temperature. \u2026 S1: Other planets such as Mercury has extreme hot and cold temperatures . S2: The planet Mercury is too small and has too little gravity to hold onto an atmosphere. \u2026 question and the answer choice is not a reliable query and is of little help when trying to find supporting evidence to answer the question (e.g. we might retrieve sentences similar to the question or the answer choice, but would struggle to find evidence explaining why the answer choice is correct). On the other hand, a reformulated query consisting of essential terms in the question and Choice 4 can help retrieve evidence explaining why Choice 4 is a correct answer. To achieve this, the model needs to (1) ensure that the retrieved evidence supports the fact mentioned in both the question and the answer choices and (2) capture this information and predict the correct answer. To address these difficulties, we propose an essential-term-aware Retriever-Reader (ET-RR) model that learns to attend on essential terms during retrieval and reading. Specifically, we develop a two-stage method with an essential term selector followed by an attention-enhanced reader.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Query1 = Q+C1", |
| "sec_num": null |
| }, |
| { |
| "text": "Essential term selector. ET-Net is a recurrent neural network that seeks to understand the question and select essential terms, i.e., key words, from the question. We frame this problem as a classification task for each word in the question. These essential terms are then concatenated with each answer choice and fed into a retrieval engine to obtain related evidence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MRC", |
| "sec_num": null |
| }, |
| { |
| "text": "Attention-Enhanced Reader.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MRC", |
| "sec_num": null |
| }, |
| { |
| "text": "Our neural reader takes the triples (question, answer choice, retrieved passage) as input. The reader consists of a sequence of language understanding layers: an input layer, attention layer, sequence modeling layer, fusion layer, and an output layer. The attention and fusion layers help the model to obtain a refined representation of one text sequence based on the understanding of another, e.g. a passage representation based on an understanding of the question. We further add a choice-interaction module to handle the semantic relations and differences between answer choices. Experiments show that this can further improve the model's accuracy. We evaluate our model on the ARC Challenge dataset, where our model achieves an accuracy of 36.61% on the test set, and outperformed all leaderboard solutions at the time of writing (Sep. 2018). To compare with other benchmark datasets, we adapt RACE (Lai et al., 2017) and MCScript (Ostermann et al., 2018) to the open domain setting by removing their super-vision in the form of relevant passages. We also consider a large-scale real-world open-domain dataset, Amazon-QA, to evaluate our model's scalability and to compare against standard benchmarks designed for the open-domain setting. Experiments on these three datasets show that ET-RR outperforms baseline models by a large margin. We conduct multiple ablation studies to show the effectiveness of each component of our model. Finally, we perform in-depth error analysis to explore the model's limitations.", |
| "cite_spans": [ |
| { |
| "start": 903, |
| "end": 921, |
| "text": "(Lai et al., 2017)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 935, |
| "end": 959, |
| "text": "(Ostermann et al., 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "MRC", |
| "sec_num": null |
| }, |
| { |
| "text": "There has recently been growing interest in building better retrievers for open-domain QA. Wang et al. (2018b) proposed a Reinforced Ranker-Reader model that ranks retrieved evidence and assigns different weights to evidence prior to processing by the reader. Min et al. (2018) demonstrated that for several popular MRC datasets (e.g. SQuAD, TriviaQA) most questions can be answered using only a few sentences rather than the entire document. Motivated by this observation, they built a sentence selector to gather this potential evidence for use by the reader model. Nishida et al. (2018) developed a multi-task learning (MTL) method for a retriever and reader in order to obtain a strong retriever that considers certain passages including the answer text as positive samples during training. The proposed MTL framework is still limited to scenarios where it is feasible to discover whether the passages contain the answer span. Although these works have achieved progress on open-domain QA by improving the ranking or selection of given evidence, few have focused on the scenario where the model needs to start by searching for the evidence itself.", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 110, |
| "text": "Wang et al. (2018b)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 260, |
| "end": 277, |
| "text": "Min et al. (2018)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Scientific Question Answering (SQA) is a representative open-domain task that requires capability in both retrieval and reading comprehension. In this paper, we study question answering on the AI2 Reasoning Challenge (ARC) scientific QA dataset . This dataset contains multiple-choice scientific questions from 3rd to 9th grade standardized tests and a large corpus of relevant information gathered from search engines. The dataset is partitioned into \"Challenge\" and \"Easy\" sets. The challenge set consists of questions that cannot be answered correctly by either of the solvers based on Pointwise Mutual Information (PMI) or Information Retrieval (IR).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Existing models tend to achieve only slightly better and sometimes even worse performance than random guessing, which shows that existing models are not well suited to this kind of QA task. Jansen et al. (2017) first developed a rule-based focus word extractor to identify essential terms in the question and answer candidates. The extracted terms are used to aggregate a list of potential answer justifications for each answer candidate. Experiments shown that focus words are beneficial for SQA on a subset of the ARC dataset. Khashabi et al. (2017) also worked on the problem of finding essential terms in a question for solving SQA problems. They published a dataset containing over 2,200 science questions annotated with essential terms and train multiple classifiers on it. Similarly, we leverage this dataset to build an essential term selector using a neural network-based algorithm. More recently, Boratko et al. 2018developed a labeling interface to obtain high quality labels for the ARC dataset. One finding is that human annotators tend to retrieve better evidence after they reformulate the search queries which are originally constructed by a simple concatenation of question and answer choice. By feeding the evidence obtained by human-reformulated queries into a pre-trained MRC model (i.e., DrQA (Chen et al., 2017) ) they achieved an accuracy increase of 42% on a subset of 47 questions. This shows the potential for a \"human-like\" retriever to boost performance on this task.", |
| "cite_spans": [ |
| { |
| "start": 190, |
| "end": 210, |
| "text": "Jansen et al. (2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 529, |
| "end": 551, |
| "text": "Khashabi et al. (2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1314, |
| "end": 1333, |
| "text": "(Chen et al., 2017)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Query reformulation has been shown to be effective in information retrieval (Lavrenko and Croft, 2001 ). Nogueira and Cho (2017) modeled the query reformulation task as a binary term selection problem (i.e., whether to choose the term in the original query and the documents retrieved using the original query). The selected terms are then concatenated to form the new query. Instead of choosing relevant words, Buck et al. (2018) proposed a sequence-to-sequence model to generate new queries. Das et al. (2019) proposed Multistep Retriever-Reader which explores an iterative retrieve-and-read strategy for open-domain question answering. It formulates the query reformulation problem in the embedding space where the vector representation of the question is changed to improve the performance. Since there is no supervision for training the query reformulator, all these methods using reinforcement learning to maximize the task-specific metrics (e.g. Recall for para-graph ranking, F1 and Exact Matching for spanbased MRC). Different from these works, we train the query reformulator using an annotated dataset as supervision and then apply the output to a separate reader model. We leave the exploration of training our model end-to-end using reinforcement learning as future work.", |
| "cite_spans": [ |
| { |
| "start": 76, |
| "end": 101, |
| "text": "(Lavrenko and Croft, 2001", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 412, |
| "end": 430, |
| "text": "Buck et al. (2018)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 494, |
| "end": 511, |
| "text": "Das et al. (2019)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this section, we introduce the essential-termaware retriever-reader model (ET-RR). As shown in Figure 2 , we build a term selector to discover which terms are essential in a question. The selected terms are then used to formulate a more efficient query enabling the retriever to obtain related evidence. The retrieved evidence is then fed to the reader to predict the final answer.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 98, |
| "end": 106, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "For a question with q words", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Q = {w Q t } q t=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "along with its N answer choices", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "C = {C n } N n=1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where C n = {w C t } c t=1 , the essential-term selector chooses a subset of essential terms E \u2282 Q, which are then concatenated with each C n to formulate a query. The query for each answer choice, E + C n , is sent to the retriever (e.g. Elastic Search 2 ), and the top K retrieved sentences based on the scores returned by the retriever are then concatenated into the evidence passage P n = {w P t } K t=1 . Next, given these text sequences Q, C, and P = {P n } N n=1 , the reader will determine a matching score for each triple {Q, C n , P n }. The answer choice C n * with the highest score is selected.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We first introduce the reader model in Section 3.1 and then the essential term selector in Section 3.2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "To simplify notation, we ignore the subscript n denoting the answer choice until the final output layer. In the input layer, all text inputs-the question, answer choices, and passages, i.e., retrieved evidence-are converted into embedded representations. Similar to Wang (2018), we consider the following components for each word: Word Embedding. Pre-trained GloVe word embedding with dimensionality d w = 300. Part-of-Speech Embedding and Named-Entity Embedding. The part-of-speech tags and named entities for each word are mapped to embeddings with dimension 16.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Layer", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "Relation Embedding. A relation between each word in P and any word in Q or C is mapped to an embedding with dimension 10. In the case that multiple relations exist, we select one uniformly at random. The relation is obtained by querying ConceptNet (Speer et al., 2017) . Feature Embeddings. Three handcrafted features are used to enhance the word representations: (1) Word Match; if a word or its lemma of P exists in Q or C, then this feature is 1 (0 otherwise). (2) Word Frequency; a logarithmic term frequency is calculated for each word. (3) Essential Term; for the i-th word in Q, this feature, denoted as w e i , is 1 if the word is an essential term (0 otherwise). Let w e = [w e 1 , w e 2 , . . . , w eq ] denote the essential term vector.", |
| "cite_spans": [ |
| { |
| "start": 248, |
| "end": 268, |
| "text": "(Speer et al., 2017)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Layer", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "For Q, C, P, all of these components are concatenated to obtain the final word representations", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Layer", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "W Q \u2208 R q\u00d7d Q , W C \u2208 R c\u00d7d C , W P \u2208 R p\u00d7d P , where d Q , d C , d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Layer", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "P are the final word dimensions of Q, C, and P.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Layer", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "As shown in Figure 2 , after obtaining word-level embeddings, attention is added to enhance word representations. Given two word embedding sequences W U , W V , word-level attention is calculated as:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Attention Layer", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "M U V = W U U \u2022 (W V V) M U V = softmax(M U V ) W V U = M U V \u2022 (W V V),", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Attention Layer", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Layer", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "U \u2208 R d U \u00d7dw and V \u2208 R d V \u00d7dw", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Layer", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "are two matrices that convert word embedding sequences to dimension d w , and M U V contains dot products between each word in W U and W V , and softmax is applied on M U V row-wise. Three types of attention are calculated using Equation (1):(1) question-aware passage representation W Q P \u2208 R p\u00d7dw ; (2) question-aware choice representation W Q C \u2208 R c\u00d7dw ; and (3) passage-aware choice representation W P C \u2208 R c\u00d7dw .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attention Layer", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "To model the contextual dependency of each text sequence, we use BiLSTMs to process the word representations obtained from the input layer and attention layer: where H q \u2208 R q\u00d7l , H c \u2208 R c\u00d7l , and H p \u2208 R p\u00d7l are the hidden states of the BiLSTMs, ';' is feature-wise concatenation, and l is the size of the hidden states.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sequence Modeling Layer", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "H q = BiLSTM[W Q ] H c = BiLSTM[W C ; W P C ; W Q C ] H p = BiLSTM[W P ; W Q P ],", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Sequence Modeling Layer", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "We further convert each question and answer choice into a single vector: q \u2208 R l and c \u2208 R l :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fusion Layer", |
| "sec_num": "3.1.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b1 q = softmax([H q ; w e ] \u2022 w sq ), q = H q \u03b1 q \u03b1 c = softmax(H c \u2022 w sc ), c = H c \u03b1 c ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Fusion Layer", |
| "sec_num": "3.1.4" |
| }, |
| { |
| "text": "where the essential-term feature w e from Section 3.1.1 is concatenated with H q , and w sq and w sc are learned parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fusion Layer", |
| "sec_num": "3.1.4" |
| }, |
| { |
| "text": "Finally, a bilinear sequence matching is calculated between H p and q to obtain a question-aware passage representation, which is used as the final passage representation:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fusion Layer", |
| "sec_num": "3.1.4" |
| }, |
| { |
| "text": "\u03b1 p = softmax(H p \u2022 q); p = H p \u03b1 p . (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fusion Layer", |
| "sec_num": "3.1.4" |
| }, |
| { |
| "text": "When a QA task provides multiple choices for selection, the relationship between the choices can provide useful information to answer the question. Therefore, we integrate a choice interaction layer to handle the semantic correlation between multiple answer choices. Given the hidden state H cn of choice c n and H c i of other choices c i , \u2200i = n, we calculate the differences between the hidden states and apply max-pooling over the differences:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Choice Interaction", |
| "sec_num": "3.1.5" |
| }, |
| { |
| "text": "c inter = Maxpool(H cn \u2212 1 N \u2212 1 i =n H c i ), (5)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Choice Interaction", |
| "sec_num": "3.1.5" |
| }, |
| { |
| "text": "where N is the total number of answer choices. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Choice Interaction", |
| "sec_num": "3.1.5" |
| }, |
| { |
| "text": "For each tuple {q, p n , c n } N n=1 , two scores are calculated by matching (1) the passage and answer choice and (2) question and answer choice. We use a bilinear form for both matchings. Finally, a softmax function is applied over N answer choices to determine the best answer choice:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Output Layer", |
| "sec_num": "3.1.6" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "s pc n = p n W pc c final n ; s qc n = qW qc c final n s = softmax(s pc ) + softmax(s qc ),", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Output Layer", |
| "sec_num": "3.1.6" |
| }, |
| { |
| "text": "where s pc n , s qc n are the scores for answer choice 1 \u2264 n \u2264 N ; s pc , s qc are score vectors for all N choices; and s contains the final scores for each answer choice. During training, we use a crossentropy loss.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Output Layer", |
| "sec_num": "3.1.6" |
| }, |
| { |
| "text": "Essential terms are key words in a question that are crucial in helping a retriever obtain related evidence. Given a question Q and N answer choices C 1 , . . . , C N , the goal is to predict a binary variable y i for each word Q i in the question Q, where y i = 1 if Q i is an essential term and 0 otherwise. To address this problem, we build a neural model, ET-Net, which has the same design as the reader model for the input layer, attention layer, and sequence modeling layer to obtain the hidden state H q for question Q.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Essential Term Selector", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In detail, we take the question Q and the concatenation C of all N answer choices as input to", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Essential Term Selector", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "If an object is attracted to a magnet, the object is most likely made of (A) wood (B) plastic (C) cardboard (D) metal # annotators 5 Annotation If,0; an,0; object,3; is,0; attracted,5; to,0; a,0; magnet,,5; the,0; object,1; is,0; most,0; likely,0; made,2; of,0 ET-Net. Q and C first go through an input layer to convert to the embedded word representation, and then word-level attention is calculated to obtain a choice-aware question representation W C Q as in Equation (1). We concatenate the word representation and word-level attention representation of the question and feed it into the sequence modeling layer:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "H q = BiLSTM[W Q ; W C Q ].", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Question", |
| "sec_num": null |
| }, |
| { |
| "text": "As shown in Figure 2 , the hidden states obtained from the attention layer are then concatenated with the embedded representations of Q and fed into a projection layer to obtain the prediction vector y \u2208 R q for all words in the question:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Question", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y = [H q ; W f Q ] \u2022 w s ,", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Question", |
| "sec_num": null |
| }, |
| { |
| "text": "where w s contains the learned parameters, and W f Q is the concatenation of the POS embedding, NER embedding, relation embedding, and feature embedding from Section 3.1.1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question", |
| "sec_num": null |
| }, |
| { |
| "text": "After obtaining the prediction for each word, we use a binary cross-entropy loss to train the model. During evaluation, we take words with y i greater than 0.5 as essential terms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question", |
| "sec_num": null |
| }, |
| { |
| "text": "In this section, we first discuss the performance of the essential term selector, ET-Net, on a public dataset. We then discuss the performance of the whole retriever-reader pipeline, ET-RR, on multiple open-domain datasets.For both the ET-Net and ET-RR models, we use 96-dimensional hidden states and 1-layer BiLSTMs in the sequence modeling layer. A dropout rate of 0.4 is applied for the embedding layer and the BiLSTMs' output layer. We use adamax (Kingma and Ba, 2014) with a learning rate of 0.02 and batch size of 32. The model is trained for 100 epochs. Our code is released at https://github.com/ nijianmo/arc-etrr-code.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We use the public dataset from Khashabi et al. (2017) which contains 2,223 annotated questions, each accompanied by four answer choices. Table 2 gives an example of an annotated question. As shown, the dataset is annotated for binary classification. For each word in the question, the data measures whether the word is an \"essential\" term according to 5 annotators. We then split the dataset into training, development, and test sets using an 8:1:1 ratio and select the model that performs best on the development set. Table 3 shows the performance of our essential term selector and baseline models from Khashabi et al. (2017) . The second best model (ET Classifier) is an SVM-based model from Khashabi et al. (2017) requiring over 100 handcrafted features. As shown, our ET-Net achieves a comparable result with ET Classifier in terms of the F1 Score. Table 4 shows example predictions made by ET-Net. As shown, ET-Net is capable of selecting most ground-truth essential terms. It rejects certain words such as \"organisms\" which have a high TF-IDF in the corpus but are not relevant to answering a particular question. This shows its ability to discover essential terms according to the context of the question.", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 53, |
| "text": "Khashabi et al. (2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 605, |
| "end": 627, |
| "text": "Khashabi et al. (2017)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 695, |
| "end": 717, |
| "text": "Khashabi et al. (2017)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 137, |
| "end": 144, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 519, |
| "end": 526, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 854, |
| "end": 861, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance on Essential Term Selection", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Multiple-choice QA", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance on Open-domain", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We train and evaluate our proposed pipeline method ET-RR on four open-domain multiplechoice QA datasets. All datasets are associated with a sentence-level corpus. Detailed statistics are shown in Table 5 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 196, |
| "end": 203, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance on Open-domain", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 ARC : We consider the 'Challenge' set in the ARC dataset and use the provided corpus during retrieval.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance on Open-domain", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u2022 RACE-Open: We adapted the RACE dataset (Lai et al., 2017) to the open-domain setting. Originally, each question in RACE comes", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 59, |
| "text": "(Lai et al., 2017)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance on Open-domain", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Which unit of measurement can be used to describe the length of a desk ? One way animal usually respond to a sudden drop in temperature is by Organisms require energy to survive. Which of the following processes provides energy to the body ? According to the article we know it is to prevent the forests from slowly disappearing.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Example questions", |
| "sec_num": null |
| }, |
| { |
| "text": "Amazon-QA For anyone with small ears, do these fit comfortably or do they feel like they are always going to fall out, not in correctly, etc. Does it remove easily and does it leave any sticky residue behind? thanks in advance. with a specific passage. To enable passage retrieval, we concatenate all passages into a corpus with sentence deduplication. 3", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Example questions", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 MCScript-Open: The MCScript (Ostermann et al., 2018) dataset is also adapted to the open-domain setting. Again we concatenate all passages to build the corpus. 4", |
| "cite_spans": [ |
| { |
| "start": 30, |
| "end": 54, |
| "text": "(Ostermann et al., 2018)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Example questions", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 Amazon-QA: The Amazon-QA dataset (McAuley and Yang, 2016) is an opendomain QA dataset covering over one million questions across multiple product categories. Each question is associated with a free-form answer. We adapt it into a 2-way multiple-choice setting by randomly sampling an answer from other questions as an answer distractor. We split all product reviews at the sentence-level to build the corpus. We consider three categories from the complete dataset in our experiments.", |
| "cite_spans": [ |
| { |
| "start": 35, |
| "end": 59, |
| "text": "(McAuley and Yang, 2016)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Example questions", |
| "sec_num": null |
| }, |
| { |
| "text": "In the experiments, ET-RR uses ET-Net to choose essential terms in the question. Table 6 shows example predictions on these target datasets. Then it generates a query for each of the N answer choices by concatenating essential terms and the answer choice. For each query, ET-RR obtains the top K sentences returned by the retriever and considers these sentences as a passage for the reader. We set K = 10 for all experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Example questions", |
| "sec_num": null |
| }, |
| { |
| "text": "We compare ET-RR with existing retrieve-andread methods on both datasets. As shown in Table 7, on the ARC dataset, ET-RR outperforms all previous models without using pre-trained models and achieves a relative 8.1% improvement over the second best BiLSTM Max-out method (Mihaylov et al., 2018) . Recently, finetuning on pretrained models has shown great improvement over a wide range of NLP tasks. Sun et al. (2019) proposed a 'Reading Strategies' method to finetune the pre-trained model OpenAI GPT, a language model trained on the BookCorpus dataset (Radford, 2018). They trained Reading Strategies on the RACE dataset to obtain more auxiliary knowledge and then finetune that model on the ARC corpus. Table 8 demonstrates the performance comparison of ET-RR and Reading Strategies on ARC. As shown, though Reading Strategies trained on both ARC and RACE dataset outperforms ET-RR, ET-RR outperforms Reading Strategies using only the ARC dataset at training time.", |
| "cite_spans": [ |
| { |
| "start": 270, |
| "end": 293, |
| "text": "(Mihaylov et al., 2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 398, |
| "end": 415, |
| "text": "Sun et al. (2019)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 704, |
| "end": 711, |
| "text": "Table 8", |
| "ref_id": "TABREF9" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Example questions", |
| "sec_num": null |
| }, |
| { |
| "text": "On the RACE-Open and MCScript-Open datasets, ET-RR achieves a relative improvement of 24.6% and 10.5% on the test set compared with the second best method IR solver respectively. We also evaluate on multiple categories of the Amazon-QA dataset. As shown in Table 9 , ET-RR increases the accuracy by 10.33% on average compared to the state-of-the-art model Moqa (McAuley and Yang, 2016) . We also compare ET-RR with ET-RR (Concat), which is a variant of our proposed model that concatenates the question and choice as a query for each choice. Among all datasets, ET-RR outperforms ET-RR (concat) consistently which shows the effectiveness of our essential-term-aware retriever.", |
| "cite_spans": [ |
| { |
| "start": 361, |
| "end": 385, |
| "text": "(McAuley and Yang, 2016)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 257, |
| "end": 264, |
| "text": "Table 9", |
| "ref_id": "TABREF10" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Example questions", |
| "sec_num": null |
| }, |
| { |
| "text": "We investigate how each component contributes to model performance. Performance of reader. Our reader alone can be applied on MRC tasks using the given passages. Here, we evaluate our reader on the original RACE dataset to compare with other MRC models as shown in Table 10 . As shown, the recently proposed Reading Strategies and OpenAI GPT models, that finetune generative pre-trained models achieve the highest scores. Among nonpre-trained models, our reader outperforms other baselines: Bi-attn (MRU) (Tay et al., 2018) and Hierarchical Co-Matching (Wang et al., 2018a ) by a relative improvement of 3.8%.", |
| "cite_spans": [ |
| { |
| "start": 505, |
| "end": 523, |
| "text": "(Tay et al., 2018)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 553, |
| "end": 572, |
| "text": "(Wang et al., 2018a", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 265, |
| "end": 273, |
| "text": "Table 10", |
| "ref_id": "TABREF11" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation study", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Attention components. Table 11 demonstrates how the attention components contribute to the performance of ET-RR. As shown, ET-RR with all attention components performs the best on the ARC test set. The performance of ET-RR without passage-question attention drops the most significantly out of all the components. It is worth noting that the choice interaction layer gives a further 0.24% boost on test accuracy.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 22, |
| "end": 30, |
| "text": "Table 11", |
| "ref_id": "TABREF13" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation study", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Essential term selection. To understand the contribution of our essential-term selector, we compare ET-RR with two variants: (1) ET-RR (Concat) and (2) ET-RR (TF-IDF). For ET-RR (TF-IDF), we calculate the TF-IDF scores and take words with the top 30% of TF-IDF scores in the question to concatenate with each answer choice as a query. 5 Table 12 shows an ablation study comparing different query formulation methods and amounts of retrieved evidence K. As shown, with the essential term selector ET-Net, the model consistently outperforms other baselines, given different numbers of retrievals K. Performance for all models is best when K = 10. Furthermore, only using TF-IDF to select essential terms in a question is not effective. When K = 10, the ET-RR (TF-IDF) method performs even worse than ET-RR (Concat). This illustrates the challenges in understanding what is essential in a question. Though ET-RR consistently outperforms ET-RR (TF-IDF), the improvement is relatively modest on the Test set (around 1.4%). A similar outcome has been reported in Jansen et al. (2017) ; Khashabi et al. (2017) where essential term extraction methods have shown around 2%-4% gain compared with TF-IDF models and struggle to obtain further improvement on SQA tasks. This consensus might show the discrepancy of essential terms between human and machine (i.e., the essential terms obtained using a human annotated dataset might not be helpful in a machine inference model). Another reason might be the current retrieval method does not effectively use these essential terms and the performance highly depends on the dataset. Note that the ET-RR outperforms ET-RR (TF-IDF) by around 4% on the Dev set. Therefore, how to develop well-formed single or even multi-hop queries using these terms are worth studying in the future. Table 13 shows two major types of error, where the correct answer choice is in bold and the predicted answer choice is in italics. Retrieved supporting evidence but failed to reason over it. For the first question, there exists evidence that can justify the answer candidate (C). However, the model chooses (D) which has more words overlapping with its evidence. This shows that the model still lacks the reasoning capability to solve complex questions. Failed to retrieve supporting evidence. For the second question, the retrieved evidence of both the correct answer (D) and the prediction (B) is not helpful to solve the question. Queries such as 'what determines the year of a planet' are needed to acquire the knowledge for solving this question.", |
| "cite_spans": [ |
| { |
| "start": 1057, |
| "end": 1077, |
| "text": "Jansen et al. (2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1080, |
| "end": 1102, |
| "text": "Khashabi et al. (2017)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 337, |
| "end": 345, |
| "text": "Table 12", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 1814, |
| "end": 1822, |
| "text": "Table 13", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation study", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The elements carbon, hydrogen, and oxygen are parts of many different compounds. Which explains why these three elements can make so many different compounds? (A) They can be solids, liquids, or gases. (B) They come in different sizes and shapes. (C) They combine in different numbers and ratios. * There are many different types of compounds because atoms of elements combine in many different ways (and in different whole number ratios) to form different compounds. (D) They can be a proton, a neutron, or an electron. * Atoms of different elements have a different number of protons, neutrons, and electrons.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Which planet in the solar system has the longest year? (A) The planet closest to the Sun. (B) The planet with the longest day. * The planet with the longest day is Venus; a day on Venus takes 243 Earth days. (C) The planet with the most moons. (D) The planet farthest from the Sun. * The last planet discovered in our solar system is farthest away from the sun. This poses further challenges to design a retriever that can rewrite such queries.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We present a new retriever-reader model (ET-RR) for open-domain QA. Our pipeline has the following contributions: (1) we built an essential term selector (ET-Net) which helps the model understand which words are essential in a question leading to more effective search queries when retrieving related evidence; (2) we developed an attentionenhanced reader with attention and fusion among passages, questions, and candidate answers. Experimental results show that ET-RR outperforms existing QA models on open-domain multiplechoice datasets as ARC Challenge, RACE-Open, MCScript-Open and Amazon-QA. We also perform in-depth error analysis to show the limitations of current work. For future work, we plan to explore the directions of (1) constructing multihop query and (2) developing end-to-end retrieverreader model via reinforcement learning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "https://www.elastic.co/products/elasticsearch", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "As short questions might not contain any words which can relate the question to any specific passage or sentence, we only keep questions with more than 15 words.4 We keep questions with more than 10 words rather than 15 words to ensure that there is sufficient data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "According to the annotated dataset, around 30% of the terms in each question are labelled as essential.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank Jade Huang for proofreading the paper, Liang Wang and Daniel Khashabi for sharing code and the annotated dataset with us. We thank all the reviewers for their constructive suggestions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "A systematic classification of knowledge, reasoning, and context within the arc dataset", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Boratko", |
| "suffix": "" |
| }, |
| { |
| "first": "Harshit", |
| "middle": [], |
| "last": "Padigela", |
| "suffix": "" |
| }, |
| { |
| "first": "Divyendra", |
| "middle": [], |
| "last": "Mikkilineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Pritish", |
| "middle": [], |
| "last": "Yuvraj", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajarshi", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mc-Callum", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Achille", |
| "middle": [], |
| "last": "Fokoue", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavan", |
| "middle": [], |
| "last": "Kapanipathi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicholas", |
| "middle": [], |
| "last": "Mattei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Musa", |
| "suffix": "" |
| }, |
| { |
| "first": "Kartik", |
| "middle": [], |
| "last": "Talamadupula", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Witbrock", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "1st Workshop on Machine Reading for Question Answering", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Boratko, Harshit Padigela, Divyendra Mikki- lineni, Pritish Yuvraj, Rajarshi Das, Andrew Mc- Callum, Maria Chang, Achille Fokoue, Pavan Ka- panipathi, Nicholas Mattei, Ryan Musa, Kartik Ta- lamadupula, and Michael Witbrock. 2018. A sys- tematic classification of knowledge, reasoning, and context within the arc dataset. In 1st Workshop on Machine Reading for Question Answering.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Ask the right questions: Active question reformulation with reinforcement learning", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Buck", |
| "suffix": "" |
| }, |
| { |
| "first": "Jannis", |
| "middle": [], |
| "last": "Bulian", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimiliano", |
| "middle": [], |
| "last": "Ciaramita", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Gesmundo", |
| "suffix": "" |
| }, |
| { |
| "first": "Neil", |
| "middle": [], |
| "last": "Houlsby", |
| "suffix": "" |
| }, |
| { |
| "first": "Wojciech", |
| "middle": [], |
| "last": "Gajewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Buck, Jannis Bulian, Massimiliano Cia- ramita, Andrea Gesmundo, Neil Houlsby, Wojciech Gajewski, and Wei Wang. 2018. Ask the right ques- tions: Active question reformulation with reinforce- ment learning. In ICLR.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Reading wikipedia to answer opendomain questions", |
| "authors": [ |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Fisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danqi Chen, Adam Fisch, Jason Weston, and Antoine Bordes. 2017. Reading wikipedia to answer open- domain questions. In ACL.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Think you have solved question answering? try arc, the ai2 reasoning challenge", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Isaac", |
| "middle": [], |
| "last": "Cowhey", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| }, |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Sabharwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Carissa", |
| "middle": [], |
| "last": "Schoenick", |
| "suffix": "" |
| }, |
| { |
| "first": "Oyvind", |
| "middle": [], |
| "last": "Tafjord", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. 2018. Think you have solved question answering? try arc, the ai2 reasoning challenge. CoRR, abs/1803.05457.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Multi-step retrieverreader interaction for scalable open-domain question answering", |
| "authors": [ |
| { |
| "first": "Rajarshi", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Shehzaad", |
| "middle": [], |
| "last": "Dhuliawala", |
| "suffix": "" |
| }, |
| { |
| "first": "Manzil", |
| "middle": [], |
| "last": "Zaheer", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rajarshi Das, Shehzaad Dhuliawala, Manzil Zaheer, and Andrew McCallum. 2019. Multi-step retriever- reader interaction for scalable open-domain question answering. In ICLR.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Framing qa as building and ranking intersentence answer justifications", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Sharp", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Computational Linguistics", |
| "volume": "43", |
| "issue": "", |
| "pages": "407--449", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Jansen, Rebecca Sharp, Mihai Surdeanu, and Pe- ter Clark. 2017. Framing qa as building and ranking intersentence answer justifications. Computational Linguistics, 43:407-449.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension", |
| "authors": [ |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Eunsol", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "S" |
| ], |
| "last": "Weld", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mandar Joshi, Eunsol Choi, Daniel S. Weld, and Luke S. Zettlemoyer. 2017. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. In ACL.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Learning what is essential in questions", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Khashabi", |
| "suffix": "" |
| }, |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Sabharwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Khashabi, Tushar Khot, Ashish Sabharwal, and Dan Roth. 2017. Learning what is essential in ques- tions. In CoNLL.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. CoRR, abs/1412.6980.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Controlling information aggregation for complex question answering", |
| "authors": [ |
| { |
| "first": "Heeyoung", |
| "middle": [], |
| "last": "Kwon", |
| "suffix": "" |
| }, |
| { |
| "first": "Harsh", |
| "middle": [], |
| "last": "Trivedi", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Jansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "Niranjan", |
| "middle": [], |
| "last": "Balasubramanian", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ECIR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heeyoung Kwon, Harsh Trivedi, Peter Jansen, Mi- hai Surdeanu, and Niranjan Balasubramanian. 2018. Controlling information aggregation for complex question answering. In ECIR.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Race: Large-scale reading comprehension dataset from examinations", |
| "authors": [ |
| { |
| "first": "Guokun", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Qizhe", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanxiao", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [ |
| "H" |
| ], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guokun Lai, Qizhe Xie, Hanxiao Liu, Yiming Yang, and Eduard H. Hovy. 2017. Race: Large-scale read- ing comprehension dataset from examinations. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Relevancebased language models", |
| "authors": [ |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Lavrenko", |
| "suffix": "" |
| }, |
| { |
| "first": "W. Bruce", |
| "middle": [], |
| "last": "Croft", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "SIGIR Forum", |
| "volume": "51", |
| "issue": "", |
| "pages": "260--267", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor Lavrenko and W. Bruce Croft. 2001. Relevance- based language models. SIGIR Forum, 51:260-267.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Addressing complex and subjective product-related queries with customer reviews", |
| "authors": [ |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Mcauley", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julian McAuley and Alex Yang. 2016. Addressing complex and subjective product-related queries with customer reviews. In WWW.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Can a suit of armor conduct electricity? a new dataset for open book question answering", |
| "authors": [ |
| { |
| "first": "Todor", |
| "middle": [], |
| "last": "Mihaylov", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Tushar", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Sabharwal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Todor Mihaylov, Peter Clark, Tushar Khot, and Ashish Sabharwal. 2018. Can a suit of armor conduct elec- tricity? a new dataset for open book question an- swering. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Efficient and robust question answering from minimal context over documents", |
| "authors": [ |
| { |
| "first": "Sewon", |
| "middle": [], |
| "last": "Min", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Zhong", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sewon Min, Victor Zhong, Richard Socher, and Caim- ing Xiong. 2018. Efficient and robust question an- swering from minimal context over documents. In ACL.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Ms marco: A human generated machine reading comprehension dataset", |
| "authors": [ |
| { |
| "first": "Tri", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Mir", |
| "middle": [], |
| "last": "Rosenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Xia", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Saurabh", |
| "middle": [], |
| "last": "Tiwary", |
| "suffix": "" |
| }, |
| { |
| "first": "Rangan", |
| "middle": [], |
| "last": "Majumder", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tri Nguyen, Mir Rosenberg, Xia Song, Jianfeng Gao, Saurabh Tiwary, Rangan Majumder, and Li Deng. 2016. Ms marco: A human generated machine read- ing comprehension dataset. CoRR, abs/1611.09268.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Retrieve-andread: Multi-task learning of information retrieval and reading comprehension", |
| "authors": [ |
| { |
| "first": "Kyosuke", |
| "middle": [], |
| "last": "Nishida", |
| "suffix": "" |
| }, |
| { |
| "first": "Itsumi", |
| "middle": [], |
| "last": "Saito", |
| "suffix": "" |
| }, |
| { |
| "first": "Atsushi", |
| "middle": [], |
| "last": "Otsuka", |
| "suffix": "" |
| }, |
| { |
| "first": "Hisako", |
| "middle": [], |
| "last": "Asano", |
| "suffix": "" |
| }, |
| { |
| "first": "Junji", |
| "middle": [], |
| "last": "Tomita", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "CIKM", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyosuke Nishida, Itsumi Saito, Atsushi Otsuka, Hisako Asano, and Junji Tomita. 2018. Retrieve-and- read: Multi-task learning of information retrieval and reading comprehension. In CIKM.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Taskoriented query reformulation with reinforcement learning", |
| "authors": [ |
| { |
| "first": "Rodrigo", |
| "middle": [], |
| "last": "Nogueira", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rodrigo Nogueira and Kyunghyun Cho. 2017. Task- oriented query reformulation with reinforcement learning. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Semeval-2018 task 11: Machine comprehension using commonsense knowledge", |
| "authors": [ |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Ostermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashutosh", |
| "middle": [], |
| "last": "Modi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "SemEval@NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simon Ostermann, Michael Roth, Ashutosh Modi, Ste- fan Thater, and Manfred Pinkal. 2018. Semeval- 2018 task 11: Machine comprehension using com- monsense knowledge. In SemEval@NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Improving language understanding by generative pre-training", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Technical report", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford. 2018. Improving language understand- ing by generative pre-training. In Technical report.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Squad: 100, 000+ questions for machine comprehension of text", |
| "authors": [ |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Rajpurkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Konstantin", |
| "middle": [], |
| "last": "Lopyrev", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100, 000+ questions for machine comprehension of text. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Conceptnet 5.5: An open multilingual graph of general knowledge", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Speer", |
| "suffix": "" |
| }, |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Chin", |
| "suffix": "" |
| }, |
| { |
| "first": "Catherine", |
| "middle": [], |
| "last": "Havasi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Speer, Joshua Chin, and Catherine Havasi. 2017. Conceptnet 5.5: An open multilingual graph of general knowledge. In AAAI.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Improving machine reading comprehension with general reading strategies", |
| "authors": [ |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Dian", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Dong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Cardie", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Sun, Dian Yu, Dong Yu, and Claire Cardie. 2019. Improving machine reading comprehension with general reading strategies. In NAACL.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Multi-range reasoning for machine comprehension", |
| "authors": [ |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Tay", |
| "suffix": "" |
| }, |
| { |
| "first": "Anh", |
| "middle": [], |
| "last": "Luu", |
| "suffix": "" |
| }, |
| { |
| "first": "Siu Cheung", |
| "middle": [], |
| "last": "Tuan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hui", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yi Tay, Luu Anh Tuan, and Siu Cheung Hui. 2018. Multi-range reasoning for machine comprehension. CoRR, abs/1803.09074.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Yuanfudao at semeval-2018 task 11: Three-way attention and relational knowledge for commonsense machine comprehension", |
| "authors": [ |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "SemEval@NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liang Wang. 2018. Yuanfudao at semeval-2018 task 11: Three-way attention and relational knowledge for commonsense machine comprehension. In SemEval@NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "A co-matching model for multi-choice reading comprehension", |
| "authors": [ |
| { |
| "first": "Shuohang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Shiyu", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuohang Wang, Mo Yu, Shiyu Chang, and Jing Jiang. 2018a. A co-matching model for multi-choice read- ing comprehension. In ACL.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "R3: Reinforced ranker-reader for open-domain question answering", |
| "authors": [ |
| { |
| "first": "Shuohang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoxiao", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiguo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Klinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shiyu", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerald", |
| "middle": [], |
| "last": "Tesauro", |
| "suffix": "" |
| }, |
| { |
| "first": "Bowen", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuohang Wang, Mo Yu, Xiaoxiao Guo, Zhiguo Wang, Tim Klinger, Wei Zhang, Shiyu Chang, Gerald Tesauro, Bowen Zhou, and Jing Jiang. 2018b. R3: Reinforced ranker-reader for open-domain question answering. In AAAI.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Example of the retrieve-and-read process to solve open-domain questions. Words related with the question are in bold; and words related with C1 and C4 are in italics." |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Here, c inter characterizes the differences between an answer choice c n and other answer choices. The final representation of an answer choice is updated by concatenating the self-attentive answer choice vector and inter-choice representation as c final = [c; c inter ]." |
| }, |
| "TABREF2": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Model</td><td colspan=\"2\">Precision Recall</td><td>F1</td></tr><tr><td>MaxPMI</td><td>0.88</td><td>0.65</td><td>0.75</td></tr><tr><td>SumPMI</td><td>0.88</td><td>0.65</td><td>0.75</td></tr><tr><td>PropSurf</td><td>0.68</td><td>0.64</td><td>0.66</td></tr><tr><td>PropLem</td><td>0.76</td><td>0.64</td><td>0.69</td></tr><tr><td>ET Classifier</td><td>0.91</td><td>0.71</td><td>0.80</td></tr><tr><td>ET-Net</td><td>0.74</td><td>0.90</td><td>0.81</td></tr></table>", |
| "text": "Example of essential term data." |
| }, |
| "TABREF3": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "Performance of different selectors." |
| }, |
| "TABREF4": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Dataset</td><td>Train</td><td>Dev</td><td colspan=\"2\">Test Corpus</td></tr><tr><td>ARC</td><td>1,119</td><td colspan=\"2\">299 1,172</td><td>1.46M</td></tr><tr><td>RACE-Open</td><td>9,531</td><td>473</td><td>528</td><td>0.52M</td></tr><tr><td>MCScript-Open</td><td>1,036</td><td>156</td><td>319</td><td>24.2K</td></tr><tr><td>Amazon-Patio</td><td colspan=\"3\">36,587 4,531 4,515</td><td>2.55M</td></tr><tr><td>Amazon-Auto</td><td colspan=\"3\">49,643 6,205 6,206</td><td>7.32M</td></tr><tr><td>Amazon-Cell</td><td colspan=\"3\">40,842 5,105 5,106</td><td>1.86M</td></tr></table>", |
| "text": "Examples of essential term prediction (in questions) by ET-Net. True positives are marked bold and underlined while false positives are only underlined." |
| }, |
| "TABREF5": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>:</td><td>Statistics on ARC, RACE-Open,</td></tr><tr><td colspan=\"2\">MCScript-Open and Amazon-QA. Corpus size is</td></tr><tr><td colspan=\"2\">the number of sentences.</td></tr></table>", |
| "text": "" |
| }, |
| "TABREF6": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "" |
| }, |
| "TABREF8": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td colspan=\"2\">Training Corpus model</td><td>ARC</td></tr><tr><td>ARC</td><td colspan=\"2\">Reading Strategies 35.0 ET-RR 36.6</td></tr><tr><td>ARC+RACE</td><td colspan=\"2\">Reading Strategies 40.7</td></tr></table>", |
| "text": "Accuracy on multiple-choice selection on ARC, RACE-Open and MCScript-Open." |
| }, |
| "TABREF9": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Model</td><td colspan=\"3\">Amazon Amazon Aamzon</td></tr><tr><td/><td>-Patio</td><td>-Auto</td><td>-Cell</td></tr><tr><td>IR solver</td><td>72.80</td><td>73.60</td><td>70.50</td></tr><tr><td>Moqa</td><td>84.80</td><td>86.30</td><td>88.60</td></tr><tr><td>ET-RR (Concat)</td><td>96.19</td><td>95.21</td><td>93.26</td></tr><tr><td>ET-RR</td><td>96.61</td><td>95.96</td><td>93.81</td></tr></table>", |
| "text": "Comparisons of ET-RR and Reading Strategies on ARC." |
| }, |
| "TABREF10": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td colspan=\"3\">: Accuracy on multiple-choice selection on</td></tr><tr><td colspan=\"2\">three product categoris of Amazon-QA.</td><td/></tr><tr><td>Pre-trained</td><td>model</td><td>RACE</td></tr><tr><td/><td>Reading Strategies</td><td>63.8</td></tr><tr><td/><td>OpenAI GPT</td><td>59.0</td></tr><tr><td/><td>ET-RR (reader)</td><td>52.3</td></tr><tr><td/><td>Bi-attn (MRU)</td><td>50.4</td></tr><tr><td/><td>Hier. Co-Matching</td><td>50.4</td></tr></table>", |
| "text": "" |
| }, |
| "TABREF11": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "Experimental results for reader on RACE." |
| }, |
| "TABREF13": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "Ablation test on attention components of ET-RR on ARC. '-' denotes the ablated feature." |
| }, |
| "TABREF14": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Model</td><td colspan=\"2\">ET-RR (Concat)</td><td colspan=\"2\">ET-RR (TF-IDF)</td><td colspan=\"2\">ET-RR</td></tr><tr><td colspan=\"2\">Top K Dev</td><td>Test</td><td>Dev</td><td>Test</td><td>Dev</td><td>Test</td></tr><tr><td>5</td><td>39.26</td><td/><td/><td/><td/></tr></table>", |
| "text": "33.36 39.93 34.73 39.93 35.59 10 38.93 35.33 39.43 35.24 43.96 36.61 20 41.28 34.56 38.59 33.88 42.28 35.67" |
| }, |
| "TABREF15": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "Comparison of query formulation methods and amounts of retrieved evidence (i.e., top K) on the ARC dataset, in terms of percentage accuracy." |
| }, |
| "TABREF16": { |
| "num": null, |
| "type_str": "table", |
| "html": null, |
| "content": "<table/>", |
| "text": "Examples where ET-RR fails on ARC. The retrieved evidence for each answer candidate is marked by *." |
| } |
| } |
| } |
| } |