| { |
| "paper_id": "K18-1042", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:09:22.457664Z" |
| }, |
| "title": "Linguistically-based Deep Unstructured Question Answering", |
| "authors": [ |
| { |
| "first": "Ahmad", |
| "middle": [], |
| "last": "Aghaebrahimian", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Charles University", |
| "location": { |
| "postCode": "11800", |
| "settlement": "Praha 1", |
| "country": "Czech Republic" |
| } |
| }, |
| "email": "ebrahimian@ufal.mff.cuni.cz" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper, we propose a new linguisticallybased approach to answering non-factoid open-domain questions from unstructured data. First, we elaborate on an architecture for textual encoding based on which we introduce a deep end-to-end neural model. This architecture benefits from a bilateral attention mechanism which helps the model to focus on a question and the answer sentence at the same time for phrasal answer extraction. Second, we feed the output of a constituency parser into the model directly and integrate linguistic constituents into the network to help it concentrate on chunks of an answer rather than on its single words for generating more natural output. By optimizing this architecture, we managed to obtain near-to-human-performance results and competitive to a state-of-the-art system on SQuAD and MS-MARCO datasets respectively.", |
| "pdf_parse": { |
| "paper_id": "K18-1042", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper, we propose a new linguisticallybased approach to answering non-factoid open-domain questions from unstructured data. First, we elaborate on an architecture for textual encoding based on which we introduce a deep end-to-end neural model. This architecture benefits from a bilateral attention mechanism which helps the model to focus on a question and the answer sentence at the same time for phrasal answer extraction. Second, we feed the output of a constituency parser into the model directly and integrate linguistic constituents into the network to help it concentrate on chunks of an answer rather than on its single words for generating more natural output. By optimizing this architecture, we managed to obtain near-to-human-performance results and competitive to a state-of-the-art system on SQuAD and MS-MARCO datasets respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Reading, comprehending and reasoning over texts and answering a question about them (i.e. Question Answering) is a fundamental aspect of computational intelligence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Question Answering (QA), as a measure of intelligence, has been even suggested to replace Turing test (Clark and Etzioni, 2016) .", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 127, |
| "text": "(Clark and Etzioni, 2016)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The development of large datasets of QA in recent years (Hermann et al., 2015; Hill et al., 2015; Rajpurkar et al., 2016) advanced the field especially for two significant branches of QA namely factoid (Aghaebrahimian and Jur\u010d\u00ed\u010dek, 2016a,b) and non-factoid QA 1 (Rajpurkar et al., 2016) . Non-factoid QA or QA over unstructured data is a somewhat new challenge in open-domain QA. A non-factoid QA system answers questions by reading and comprehending a context. The context in which we assume the answer is mentioned may have different granularities from a single sentence or paragraph to larger units of text. A QA system is supposed to extract a phrase answer from the provided paragraph or sentence depending on its granularity level.", |
| "cite_spans": [ |
| { |
| "start": 56, |
| "end": 78, |
| "text": "(Hermann et al., 2015;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 79, |
| "end": 97, |
| "text": "Hill et al., 2015;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 98, |
| "end": 121, |
| "text": "Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 202, |
| "end": 240, |
| "text": "(Aghaebrahimian and Jur\u010d\u00ed\u010dek, 2016a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 262, |
| "end": 286, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The context for answering questions is usually extracted using an Information Retrieval (IR) technique. Then, a QA system should extract the best answer sentence. There are many studies about extracting answer sentences including but not limited to (He et al., 2015; Yih et al., 2013; Yu et al., 2016; Rao et al., 2016; Aghaebrahimian, 2017a) .", |
| "cite_spans": [ |
| { |
| "start": 249, |
| "end": 266, |
| "text": "(He et al., 2015;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 267, |
| "end": 284, |
| "text": "Yih et al., 2013;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 285, |
| "end": 301, |
| "text": "Yu et al., 2016;", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 302, |
| "end": 319, |
| "text": "Rao et al., 2016;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 320, |
| "end": 342, |
| "text": "Aghaebrahimian, 2017a)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Extracting the final or shortest possible answer from a set of candidate answer sentences is addressed in many studies as well (Zhang et al., 2017; Gong and Bowman, 2017; Shen et al., 2016; Weissenborn et al., 2017a) . Instead of reasoning over and making inference on linguistic symbols (i.e., words or characters), almost all of these models use a neural architecture to encode contexts and questions into a vector representation and to reason over them.", |
| "cite_spans": [ |
| { |
| "start": 127, |
| "end": 147, |
| "text": "(Zhang et al., 2017;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 148, |
| "end": 170, |
| "text": "Gong and Bowman, 2017;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 171, |
| "end": 189, |
| "text": "Shen et al., 2016;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 190, |
| "end": 216, |
| "text": "Weissenborn et al., 2017a)", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A typical pattern in most of the current models is the use of a variant of uni-or bi-directional attention schemes (question to context and viceversa) to encode the semantic content of questions' words with a focus on their context's words (Seo et al., 2016; Xiong et al., 2016; Weissenborn et al., 2017b; Wang et al., 2017) . Compared to these models the novelty of our work is in explicitly conducting attention over both the context and the question for each candidate constituent 2 answer (every constituent", |
| "cite_spans": [ |
| { |
| "start": 240, |
| "end": 258, |
| "text": "(Seo et al., 2016;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 259, |
| "end": 278, |
| "text": "Xiong et al., 2016;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 279, |
| "end": 305, |
| "text": "Weissenborn et al., 2017b;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 306, |
| "end": 324, |
| "text": "Wang et al., 2017)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Training set Development set NP 59 % 62 % ROOT 8 % 6 % NNP 5 % 4 % NN 4 % 2 % JJ 3 % 1 % VP 3 % 4 % CD 3 % 2 % PP 2 % 4 % S 2 % 2 % others 11 %(each < 2%) 13 %(each < 2%) . To see the full list of available constituency types in the dataset, please refer to Appendix A.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 25, |
| "end": 166, |
| "text": "set NP 59 % 62 % ROOT 8 % 6 % NNP 5 % 4 % NN 4 % 2 % JJ 3 % 1 % VP 3 % 4 % CD 3 % 2 % PP 2 % 4 % S 2 % 2 % others", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Constituents Type", |
| "sec_num": null |
| }, |
| { |
| "text": "in the context). The fact that this is better than attending only to the question words is investigated and proved according to the results reported in Section 7.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constituents Type", |
| "sec_num": null |
| }, |
| { |
| "text": "Another observation is that a majority of recent studies are purely based on data science where one can barely see a linguistic intuition towards the problem. We show that a pure linguistic intuition could help neural reasoning and attention mechanisms to achieve quantitatively and qualitatively better results in QA.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constituents Type", |
| "sec_num": null |
| }, |
| { |
| "text": "By analyzing a human-generated QA dataset called SQuAD (Rajpurkar et al., 2016) , we realized that people tend to answer questions in units called constituents (Table 1) . We expect an answer to a question to be a valid constituent otherwise it would probably not be grammatical.", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 79, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 160, |
| "end": 169, |
| "text": "(Table 1)", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Constituents Type", |
| "sec_num": null |
| }, |
| { |
| "text": "Constituents and Constituency relations are the bases of Phrase Structure Grammar first proposed by Noam Chomsky (Chomsky, 1957) . Phrase Structure Grammar and many of its variants including Government and Binding theory (Chomsky, 1993) or Generalized and Head-driven Phrase Structure Grammar (Gazdar et al., 1994; Pollard and Sag, 1994) define hierarchical binary relations between the constituents of a text, and hence help to realize an exact and natural answer boundary for answer extraction.", |
| "cite_spans": [ |
| { |
| "start": 113, |
| "end": 128, |
| "text": "(Chomsky, 1957)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 221, |
| "end": 236, |
| "text": "(Chomsky, 1993)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 293, |
| "end": 314, |
| "text": "(Gazdar et al., 1994;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 315, |
| "end": 337, |
| "text": "Pollard and Sag, 1994)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constituents Type", |
| "sec_num": null |
| }, |
| { |
| "text": "Having these two points in mind and inspired by attentive pooling networks by Santos et al. (2016), we designed an attentive bilateral model and trained it on the constituents of questions and answers. We attempted to use some information from the parser, so to go beyond a simple wordbased or vector-based representations. The results obtained by the model are near to human performance on SQuAD dataset and competitive to a state-of-the-art system on MS-MARCO dataset. The contributions of our work are:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constituents Type", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 A bilateral linguistically-based attention model for Question Answering", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constituents Type", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 Integrating linguistic constituents into a DNN architecture for the QA task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constituents Type", |
| "sec_num": null |
| }, |
| { |
| "text": "In the next section, we review some of recent QA systems with a focus on unstructured QA. In Section 3, we briefly explain the constituency types. Then in Section 4, we discuss the details of our system architecture. In Section 5, we talk about the datasets and the way we prepared them for training. In Sections 6 and 7, we describe the training details and present the results of our experiments. Finally, we explain some ablation studies and error analysis in Section 8 before we conclude in Section 9.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Constituents Type", |
| "sec_num": null |
| }, |
| { |
| "text": "In recent years, QA has been largely benefited from the development of Deep Neural Network (DNN) architectures largely in the form of Convolution Neural Networks (CNN) (LeCun et al., 1998) or Recurrent Neural Networks (RNN) (Elman, 1990 ). QA systems based on semantic parsing (Clarke et al., 2010; Kwiatkowski et al., 2010) , IR-based systems (Yao and Durme, 2014) , cloze-type (Kadlec et al., 2016; Hermann et al., 2015) , factoid (Aghaebrahimian and Jur\u010d\u00ed\u010dek, 2016b; and non-factoid systems (Aghaebrahimian, 2017a; Rajpurkar et al., 2016) are some of the QA variants that have been improved by DNNs. Among all of these varieties, factoid and non-factoid are two most widely studied branches of QA systems.", |
| "cite_spans": [ |
| { |
| "start": 168, |
| "end": 188, |
| "text": "(LeCun et al., 1998)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 224, |
| "end": 236, |
| "text": "(Elman, 1990", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 277, |
| "end": 298, |
| "text": "(Clarke et al., 2010;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 299, |
| "end": 324, |
| "text": "Kwiatkowski et al., 2010)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 344, |
| "end": 365, |
| "text": "(Yao and Durme, 2014)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 379, |
| "end": 400, |
| "text": "(Kadlec et al., 2016;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 401, |
| "end": 422, |
| "text": "Hermann et al., 2015)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 433, |
| "end": 469, |
| "text": "(Aghaebrahimian and Jur\u010d\u00ed\u010dek, 2016b;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 494, |
| "end": 517, |
| "text": "(Aghaebrahimian, 2017a;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 518, |
| "end": 541, |
| "text": "Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In Factoid QA like air traffic information systems (ATIS) or dialogue systems, we answer the questions by extracting an entity from a structured database like relational databases or knowledge graphs. In contrast, in non-factoid systems, answers are extracted mostly from unstructured data like Wikipedia.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Unstructured QA in recent years has been studied with a few distinguishable different settings such as answer selection, answer trigging, and answers extraction. In answer selection (Aghaebrahimian, 2017a; Yu et al., 2016) and answer trigging (Jurczyk et al., 2016) the goal is to find the best answer sentence given each question. These answer sentences may be non-existent in the provided context for answer triggering. In answer extraction (Shen and Klakow, 2006; Sultan et al., 2016) , we extract a chunk of a sentence as the shortest possible answer.", |
| "cite_spans": [ |
| { |
| "start": 206, |
| "end": 222, |
| "text": "Yu et al., 2016)", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 243, |
| "end": 265, |
| "text": "(Jurczyk et al., 2016)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 443, |
| "end": 466, |
| "text": "(Shen and Klakow, 2006;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 467, |
| "end": 487, |
| "text": "Sultan et al., 2016)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Answer selection can be used as a measure of machine comprehension (Kadlec et al., 2016; Hermann et al., 2015) . In this setting, a typical QA system reads a text and then answers either multiple-answer (cloze-type) or free-text questions. Cloze-type answers are limited to multiple distinct entities (usually 4 or 5) while a span of words answers free-text questions.", |
| "cite_spans": [ |
| { |
| "start": 67, |
| "end": 88, |
| "text": "(Kadlec et al., 2016;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 89, |
| "end": 110, |
| "text": "Hermann et al., 2015)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The performance of cloze-type systems is a good indication of machine comprehension. However, in QA systems for a real-life application like in dialogue systems or scientists' assistants, the answers, their boundaries and their types (e.g., proper noun, adjective or noun phrase) are not known in advance, and it makes this type of QA more challenging. In this setting, free-text QA or QA over unstructured data (Aghaebrahimian, 2017b; Rajpurkar et al., 2016; Cui et al., 2016) is advocated where answers are spans of multiple consecutive words in large repositories of textual data like Wikipedia.", |
| "cite_spans": [ |
| { |
| "start": 412, |
| "end": 435, |
| "text": "(Aghaebrahimian, 2017b;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 436, |
| "end": 459, |
| "text": "Rajpurkar et al., 2016;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 460, |
| "end": 477, |
| "text": "Cui et al., 2016)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Many successful studies have been performed for free-text (i.e., phrase) answer extraction from SQuAD since its release in 2016. Almost all of these models benefited from a form of DNN architecture and a majority of them integrated a kind of the attention mechanism. Some of these studies integrated attention to predicting the physical location of answers (Xiong et al., 2016; Cui et al., 2016; Hu et al., 2017; Seo et al., 2016) . Others made an effort to find a match between queries and their contexts (Cui et al., 2016) or to compute a global distribution over the tokens in the context given a query (Wang and Jiang, 2016) . Still, some other models integrated other mechanisms like memory networks (Pan et al., 2017) or reinforcement learning (Shen et al., 2016) to enhance their attention performance.", |
| "cite_spans": [ |
| { |
| "start": 357, |
| "end": 377, |
| "text": "(Xiong et al., 2016;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 378, |
| "end": 395, |
| "text": "Cui et al., 2016;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 396, |
| "end": 412, |
| "text": "Hu et al., 2017;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 413, |
| "end": 430, |
| "text": "Seo et al., 2016)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 506, |
| "end": 524, |
| "text": "(Cui et al., 2016)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 606, |
| "end": 628, |
| "text": "(Wang and Jiang, 2016)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 705, |
| "end": 723, |
| "text": "(Pan et al., 2017)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 750, |
| "end": 769, |
| "text": "(Shen et al., 2016)", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To add to these efforts, we intend to propose a new perspective on using attention and to enhance it by using linguistic constituents as a linguistically motivated feature.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "There is hardly a universal agreement upon the definition of the term 'constituent'. In general, a constituent is an inseparable unit that can appear in different places of a sentence. Instead of defining what a constituent is, linguists define a set of experiments such as replacement or expansion to distinguish between constituents and nonconstituents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linguistic Constituents", |
| "sec_num": "3" |
| }, |
| { |
| "text": "For instance, let's consider the sentence 'Plans for the relay were announced on April 26, 2007, in Beijing, China.' We can replace or expand some of its constituents and rephrase the sentence as 'on April 26, 2007, plans for the relay were announced , in Beijing, China.' or 'Plans for the great and important relay were announced on April 26, 2007, in Beijing, China.' while we are sure that these rephrases are not only both syntactically and semantically correct but also convey the same meaning as the original sentence. Some of the earliest works which tried to integrate more linguistic structures into QA are (Zhang et al., 2017; Xie and Eric, 2017) . Using TreeLSTM, Zhang et al. (2017) tried to integrate linguistic structure into QA implicitly. At the prediction step, they used pointer network (Vinyals et al., 2017) to detect the beginning and the end of answer chunks. In contrast, (Xie and Eric, 2017) explicitly modeled candidate answers as sequences of constituents by encoding individual constituents using a chain oftrees LSTM (CT-LSTM) and tree-guided attention mechanism. However, their formulation of constituents is more complicated than ours and as we will see, a direct use of constituents as answer chunk is much less complicated and yields better results.", |
| "cite_spans": [ |
| { |
| "start": 617, |
| "end": 637, |
| "text": "(Zhang et al., 2017;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 638, |
| "end": 657, |
| "text": "Xie and Eric, 2017)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 806, |
| "end": 828, |
| "text": "(Vinyals et al., 2017)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linguistic Constituents", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this section, we describe how to represent questions, sentences and answers in vector space in Subsection 4.1 and then we train the vectors in Subsection 4.2 using a specific loss function and distance measure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Architecture", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Our goal is to extract constituent answers by loading their vector representations with the semantic content of their question and their containing answer sentence. To achieve this end, we integrated a bilateral attention mechanism into our model which lets us estimate a joint vector representation between answers when they are attending to questions' constituents and when they are attending to sentences' constituents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation Learning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "To encode the semantic information in questions and sentences, we used a simple encoding unit (see Equations 1 to 6 and Figure 1 ). In this unit, W k \u2208 R |V | are words in one-hot vector representations where k th element of each vector is one and others are 0. V are all vocabularies in training questions and answers. E \u2208 R |V |\u00d7de is the embedding matrix and d e is the embedding dimension. The product of the multiplication in Equation 1 is the word embeddings in which each cell W i,t is the word in time step t in sample i. W i,t is the input of forward and backward RNN cells in Equations 2 and 4.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 120, |
| "end": 128, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Representation Learning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "As RNN cell, we used Long Short-Term Memory architecture (LSTM) (Hochreiter and Schmidhuber, 1997) . Pan et al. (2017) and Hu et al. (2017) show that bi-directional LSTM architectures provide more accurate representations of textual data. The common practice to form a bidirectional LSTM is to concatenate the last vectors in forward and backward LSTMs. Instead, we used a stepwise max pooling (SWMP) mechanism which takes the most important vectors from forward and backward LSTMs in Equations 3 and 5 and concatenate them in Equations 6.", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 98, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 101, |
| "end": 118, |
| "text": "Pan et al. (2017)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 123, |
| "end": 139, |
| "text": "Hu et al. (2017)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation Learning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "W i,t = E W k (1) \u2212\u2192 enc i,t = LST M ( \u2212\u2192 enc i,t\u22121 , W i,t ) (2) \u2212\u2192 enc i = SW M P ( \u2212\u2192 enc i,t ) (3) \u2190\u2212 enc i,t = LST M ( \u2190\u2212 enc i,t+1 , W i,t ) (4) \u2190\u2212 enc i = SW M P ( \u2190\u2212 enc i,t ) (5) enc i = [ \u2212\u2192 enc i ; \u2190\u2212 enc i ]", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Representation Learning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Using our encoding unit we encode questions and sentences and then concatenate the resulted vectors to generate a joint representation of questions and their answer sentences in Equation 7.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation Learning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "enc QS i = [enc Q i ; enc S i ]", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Representation Learning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In the next step, we need to encode the constituent answers. Our answer encoding unit has two modules, one with attention on questions enc Q i (equations 8-15) and the other with attention on sentences enc S i (equations 16-23). In both modules, we used an architecture similar to the one in the encoding unit with an additional attention unit.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation Learning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In the answer encoding unit, again the input to LSTM cells are word embeddings generated by lookup table W A i,t . Two attention layers in this unit receive the output sequences of the forward and backward LSTM cells and focus once on questions and once on sentences. This is done using an attention mechanism similar to the one proposed by Santos et al. (2016) .", |
| "cite_spans": [ |
| { |
| "start": 341, |
| "end": 361, |
| "text": "Santos et al. (2016)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation Learning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In the end, the vectors generated by these two modules are concatenated in h", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation Learning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "A QS i = [h A Q i ; h A S", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation Learning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "i ] 3 to form a general attentive representation of constituents with respect to their corresponding questions and sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation Learning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "At training time, we try to learn the vector representations of questions, sentences, and their constituents jointly. However, we like to learn the vectors in a way that leads to a small distance between questions and their true constituents and a long distance between them and their false constituents. For this purpose, for each pair of question and sentence, we compute one true answer A + QS and some false answer A \u2212 QS vectors. We generated these vectors by passing a correct constituent A + and a random wrong constituent A \u2212 through question-attentive (see equations 8-15) and sentence-attentive (see equations 16-23) modules and by concatenating the outputs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Representation Learning", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In this section, we train our model. Given a question and the constituents associated with its answer sentence, the model generates a score for each constituent. The score is an estimate of how similar the constituent to the gold answer is. Taking the argmax over the scores, the model returns the id of its true predicted constituent.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "To train the model, we need to compute the distance between questions and their true constituents and to contrast it with the distance between questions and their false constituents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "There are various measures of distance or similarity between two vectors each with its own merits. Feng et al. (2015) did an exhaustive study on different distance measures for text classification and proposed some new measures including the Geometric mean of Euclidean and Sigmoid Dot product (GESD) (Formula 1) which outperformed other measures in their study.", |
| "cite_spans": [ |
| { |
| "start": 99, |
| "end": 117, |
| "text": "Feng et al. (2015)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We integrated GESD in our work to estimate Figure 1 : The encoding unit. The Embedding lookup uses pre-trained Glove word vectors (Pennington et al., 2014) and updates them through training. The output is the concatenation of max-pooled vectors of LSTM encoders. the distance between questions and their true and false constituents. GESD linearly combines two other measures called L2-norm and inner product. L2-norm is the forward-line semantic distance between two sentences and inner product measures the angle between two sentence vectors.", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 155, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 43, |
| "end": 51, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "DIS(Q, A) = 1 1+exp(\u2212(Q.A)) * 1 1+||Q\u2212A||", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Formula 1: The distance between Question (Q) and Answer (A) vectors. Now everything is ready to train the model. The overall system architecture is illustrated in Figure 3 . We use Hinge loss function (Formula 2) to estimate the loss on each question-answer combination. Hinge function increases the loss with the distance between a question and its true constituents while decreases it with the distance between a question and its false constituents. In Equation 2, enc QS is the joint vector representation of questions and their answer sentences, h", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 163, |
| "end": 171, |
| "text": "Figure 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "A \u2212 QS i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "is the vector of false answers, h A + QS is the vectors of true answers. Finally, m is the margin between positive and negative answers. It makes a trade-off between the mistakes in positive and negative classes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "L = i max(0, m + DIS(enc QS i , h A + QS i ) \u2212 DIS(enc QS i , h A \u2212 QS i ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Formula 2: Hinge function. m is the margin, A \u2212 QS are false and A + QS are true answers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The Stanford Question Answering Dataset (SQuAD) (Rajpurkar et al., 2016) is a dataset for sentence-level (i.e. answer selection) and word-level (i.e. answer extraction) QA. It includes 107,785 question-answer pairs synthesized by crowd workers on 536 Wikipedia articles. The dataset is randomly shuffled and divided into training (80%), development (10%) and test (10%) sets. Due to its large number of questions compared to previous datasets (Hirschman et al., 1999; Richardson et al., 2013) , it is considered a good testbed for data-intensive QA methods. The answers in SQuAD are categorized into ten types including Person, Date, Location, etc (Rajpurkar et al., 2016) . However, there are no statistics available on the constituent type of each answer. To control the vocabulary size we needed to eliminate redundant numeric values, but at the same time, we wanted to parse the contents, and we needed to keep the semantic values of numeric tokens. Hence to preprocess the questions and sentences in the dataset, we removed all nonalphanumeric characters from all contents and then replaced numeric values with '9'. Then we used CoreNLP tool to tokenize and to perform constituency parsing on the contents. After extracting constituents from the tree of sentences and comparing them with gold answers, we realized that 72% of the answers are constituents. Other 21% of the answers had slight divergences from a constituent, like lacking or having a determiner or punctuation mark which were eventually going to be disregarded in the official evaluation script. The remaining 7% was a combination of two smaller constituents or a part of a larger one. In the training set, to use constituents as answers, we replaced non-matching answers with the smallest and most similar constituents. Since at the evaluation time, we needed the gold answers and not their replaced constituents, we did not change the answers in the development set. We extracted a total number of 48 different constituents types including both terminal and nonterminal ones from SQuAD. The percentage of each constituent type in training and development sets are presented in Table 1 . The figures for development set are computed only based on exact match answers. We used SQuAD's development set for testing the system and reporting the results. To prepare the dataset for training and evaluating our system we used a state-of-the-art answer sentence selection system (Aghaebrahimian, 2017a) to extract the best answer sentences. The system provides us the best sentence with 94.4 % accuracy given each question. After pre-processing the sentence as explained above, we extracted its constituents and trained the model using the correct constituents as true and other constituents as negative samples. At test time, we used the same procedure to extract the constituents, but we used gold answers as they are without substituting non-matching answerconstituents. Then, we added other constituents as negative samples. For evaluation purpose, we used SQuAD's official evaluation script which computes the exact match and the F1 score. The exact match is the percentage of predictions which exactly match the gold answer and the F1 (Macro-averaged) score is the average overlap between the prediction and ground truth answer while treating them both as bags of tokens, and computing their F1. To experiment our model furthermore, we used MS-MARCO dataset (Nguyen et al., 2015) . As a machine comprehension dataset, MS-MARCO has two fundamental differences with SQuAD. Every question in MS-MARCO has several passages from which the best answer should be retrieved. Moreover, the answers in MS-MARCO are not necessarily sub-spans of the provided contexts so that BLEU and ROUGE are used as the metrics in the official tool of MS-MARCO evaluation. During training we used the highest BLEU scored constituent as the answer and in the evaluation, we computed the BLEU and ROUGE scores of the constituents selected by the system. As the results in Table 7 show, our system obtained competitive results to another state-of-the-art system trained on the same dataset.", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 72, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 443, |
| "end": 467, |
| "text": "(Hirschman et al., 1999;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 468, |
| "end": 492, |
| "text": "Richardson et al., 2013)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 648, |
| "end": 672, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 2443, |
| "end": 2466, |
| "text": "(Aghaebrahimian, 2017a)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 3428, |
| "end": 3449, |
| "text": "(Nguyen et al., 2015)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 2149, |
| "end": 2156, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 4015, |
| "end": 4022, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "5" |
| }, |
| { |
| "text": "To evaluate our new architecture and to see how integrating linguistic constituents affects its performance we set up two settings. We designed one setting for evaluating the effect of using constituents instead of words (constituent-base vs. word-base) and another to evaluate the effect of using attention mechanism on top of vector training modules (uni-vs. bi-attention). Therefore we conducted four experiments on both datasets or eight experiments in total.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In the constituent-base setting, we generated the training and test data as described in Section 5. In word-base however, we replaced constituents with the tokens in answer sentences for both train and test sets and trained our model to compute two scores for initial and final positions of answer chunks. In the constituent-base setting at test time, we directly used the predicted constituent as the final answer. In the word-base setting, however, we got the final answer using the highest-scored words for initial and final positions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We also investigated the effect of bilateral attention on the model performance. In the bi-attention model, we used the model as described in Section 4. In the uni-attention model, we eliminated the attention on sentences and only used the module for attention on questions. For training our model, we used 300-dimensional pre-trained Glove word vectors (Pennington et al., 2014) to generate the embedding matrix and kept the embedding matrix updated through training. We used 128-dimensional LSTMs for all recurrent networks and used 'Adam' with parameters learning rate=0.001, \u03b2 1 = 0.9, \u03b2 2 = 0.999 for optimization. We set batch size to 32 and dropout rate to 0.5 for all LSTMs and embedding layers. We performed the accuracy check only on the first best answer.", |
| "cite_spans": [ |
| { |
| "start": 354, |
| "end": 379, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment", |
| "sec_num": "6" |
| }, |
| { |
| "text": "The results of our experiments are summarized in Table 2 . By contrasting the results of uni-/biattention and word/constituent-base models, we can see that the proposed bi-attention mechanism with linguistic constituents integrated into it makes a significant improvement on answer extraction. Another interesting observation is that the Exact Match metric benefits from restriction to constituents as answers. Concerning the MS-MARCO dataset, the results are competitive to a state-of-the-art system tested on the same dataset (Wang et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 528, |
| "end": 547, |
| "text": "(Wang et al., 2017)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 49, |
| "end": 56, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Result", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In this section, we analyze the SQuAD concerning answer distribution over different query types. Table 1 shows that the NP type constituents are the most prominent type among all other answers. However, to investigate the importance of other types in overall system performance, we performed an ablation study where we studied the influence of each constituent type on overall accuracy. The results are presented in Figure 4 . We also studied how much the model succeeded in retrieving answers from each type. The results are presented in the same figure. This table also shows how often does the method succeed in cases where the correct answer is, in fact, a constituent span. As seen in Figure 4 , the answers are mostly singular noun phrases after which with a significant difference are proper nouns, verb phrases, and prepo-SQuAD Development set MS-MARCO Evaluation set Exact-match(%) F1(%) BLEU ROUGE Logistic Regression (Rajpurkar et al., 2016) 40.00 % 51.00 % --Uni-Attention Word-base ( (Zhang et al., 2017) 69.10 % 78.38 % --BIDAF (Seo et al., 2016) 72.6 % 80.7 % --CCNN (Xie and Eric, 2017) 74.1 % 82.6 % --R-net (Wang et al., 2017) 75.60 % 82.80 % 42.2 42.9 Bi-Attention Constituency-base (this work) 80.72 % 83.25 % 42.1 42.7 Human Performance (Rajpurkar et al., 2016) 82.30 % 91.22 % -- Figure 4 : Blue lines are the contribution of each type in the overall system performance using the best model and at the convergence time. Red bars represent the performance of the model in retrieving each constituent type when the model is converged. Performance is expressed in the exact match metric (%). As a guide to how to read the chars, the first blue line for NP type says that 50% of all correctly extracted answers by the system are NP type-answers. The red line of the same type says that our system managed to retrieve about 87% of all NP-type answers in the dataset. sitional phrases. We can also see how the model performed for each constituent. It seems that extracting cardinal numbers is much easier for the model than retrieving roots or full sentences. An analysis of the errors shows that false answer sentence, non-constituent answers, parsing errors, overlapping constituents and unknown words are the primary reasons for the mistakes made by our system. The sentence selection process brought about six percent incorrect answers. The next primary reason for making mistakes is the constituents which contain other smaller constituents. While in all cases we extract the smallest constituent, in about three percent of overlapping constituents the more extended ones are the correct answer. Parsing errors where the constituents are not retrieved correctly and unknown words where the embeddings are not trained properly are responsible for other four percent of the errors. Finally, non-constituent answers led to around eight percent false answers in the system output.", |
| "cite_spans": [ |
| { |
| "start": 928, |
| "end": 952, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 995, |
| "end": 996, |
| "text": "(", |
| "ref_id": null |
| }, |
| { |
| "start": 997, |
| "end": 1017, |
| "text": "(Zhang et al., 2017)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 1042, |
| "end": 1060, |
| "text": "(Seo et al., 2016)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 1082, |
| "end": 1102, |
| "text": "(Xie and Eric, 2017)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 1125, |
| "end": 1144, |
| "text": "(Wang et al., 2017)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 1258, |
| "end": 1282, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 97, |
| "end": 104, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 416, |
| "end": 424, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 690, |
| "end": 698, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 1302, |
| "end": 1310, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation and Error Analysis", |
| "sec_num": "8" |
| }, |
| { |
| "text": "We described a new linguistically-based end-toend DNN for Question Answering from unstructured data. This model is a neural formulation in which linguistic constituents are explicitly modeled. It operates an LSTM over the constituents and uses the resulting hidden states to attend both to question and to the encompassing context sentence, thereby enriching the constituents representation with both. The use of constituents instead of an arbitrary string of words in answers improves the system performance in three ways. First, it increases the precision of the system. By looking at the small gap between the F1 and the exact match metrics in our system and compare it to the ones for the other systems, we can see that the ratio of exact-match answers in our system is higher than that of the other ones. Second, it helps an answer to look more like a human-generated one. Considering prediction and ground truth as bags of tokens, the F1 (Macro-averaged) metric computes the average overlap between the prediction and ground truth answer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "9" |
| }, |
| { |
| "text": "While a predicted answer may have a full overlap with the ground truth hence gains a high F1 score, due to the irrelevant words it contains, it poses an incoherent answer to users. The longer the gap between exact match and F1 measures, the more inappropriate words appear in answers. This is primarily an essential factor in the overall quality of dialogue QA systems where users expect to receive a natural and human-generated-like answer. Last but not least, imposing constraints on the candidate space, limit errors and make the system more efficient by decreasing the search space and weeding out non-relevant answers. In the future, we plan to integrate dependency relations into the model by designing a larger model and evaluating it on other QA datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "9" |
| }, |
| { |
| "text": "While the answer to a non-factoid question is a chunk of one or more adjacent words, the answer to a factoid question is only an entity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Form now on and for the sake of brevity by constituents we mean linguistic constituents as they are referred to in Phrase Structure Grammar(Chomsky, 1957).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "All concatenations are performed on the last layer (i.e. data dimensions).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This research was partially funded by the Ministry of Education, Youth and Sports of the Czech Republic (project LM2015071), by Charles University SVV project number 260 453 and GAUK 207-10/250098 of Charles University in Prague.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| }, |
| { |
| "text": " ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Full Constituent types", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Constrained deep answer sentence selection", |
| "authors": [ |
| { |
| "first": "Ahmad", |
| "middle": [], |
| "last": "Aghaebrahimian", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 20th International Conference on Text, Speech, and Dialogue (TSD)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ahmad Aghaebrahimian. 2017a. Constrained deep an- swer sentence selection. In Proceedings of the 20th In- ternational Conference on Text, Speech, and Dialogue (TSD).", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Hybrid Deep Open-Domain Question Answering", |
| "authors": [ |
| { |
| "first": "Ahmad", |
| "middle": [], |
| "last": "Aghaebrahimian", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 8th Language and Technology Conference (LTC)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ahmad Aghaebrahimian. 2017b. Hybrid Deep Open- Domain Question Answering. In Proceedings of the 8th Language and Technology Conference (LTC).", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Constraint-Based Open-Domain Question Answering Using Knowledge Graph Search", |
| "authors": [ |
| { |
| "first": "Ahmad", |
| "middle": [], |
| "last": "Aghaebrahimian", |
| "suffix": "" |
| }, |
| { |
| "first": "Filip", |
| "middle": [], |
| "last": "Jur\u010d\u00ed\u010dek", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 19th International Conference on Text, Speech and Dialogue (TSD), LNAI 9924", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ahmad Aghaebrahimian and Filip Jur\u010d\u00ed\u010dek. 2016a. Constraint-Based Open-Domain Question Answering Using Knowledge Graph Search. In Proceedings of the 19th International Conference on Text, Speech and Di- alogue (TSD), LNAI 9924.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Open-domain Factoid Question Answering via Knowledge Graph Search", |
| "authors": [ |
| { |
| "first": "Ahmad", |
| "middle": [], |
| "last": "Aghaebrahimian", |
| "suffix": "" |
| }, |
| { |
| "first": "Filip", |
| "middle": [], |
| "last": "Jur\u010d\u00ed\u010dek", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Workshop on Human-Computer Question Answering, The North American Chapter of the Association for Computational Linguistics (NAACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ahmad Aghaebrahimian and Filip Jur\u010d\u00ed\u010dek. 2016b. Open-domain Factoid Question Answering via Knowl- edge Graph Search. In Proceedings of the Workshop on Human-Computer Question Answering, The North American Chapter of the Association for Computa- tional Linguistics (NAACL).", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Large-scale simple question answering with memory networks", |
| "authors": [ |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Usunier", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1506.02075" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antoine Bordes, Nicolas Usunier, Sumit Chopra, and Jason Weston. 2015. Large-scale simple question answering with memory networks. arXiv preprint arXiv:1506.02075.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Reading wikipedia to answer open-domain questions", |
| "authors": [ |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Fisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1704.00051" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danqi Chen, Adam Fisch, Jason Weston, and Antoine Bordes. 2017. Reading wikipedia to answer open-do- main questions. In arXiv:1704.00051, 2017a.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Syntactic structures. The Hague", |
| "authors": [ |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Chomsky", |
| "suffix": "" |
| } |
| ], |
| "year": 1957, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noam Chomsky. 1957. Syntactic structures. The Hague, Paris: Mouton.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Lectures on Government and Binding: The Pisa Lectures", |
| "authors": [ |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Chomsky", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noam Chomsky. 1993. Lectures on Government and Binding: The Pisa Lectures. Mouton de Gruyter.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "My computer is an honor student but how intelligent is it? standardized tests as a measure of ai", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Clark and Oren Etzioni. 2016. My computer is an honor student but how intelligent is it? standardized tests as a measure of ai. AI Magazine.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Driving semantic parsing from the worlds response", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Clarke", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Goldwasser", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [ |
| "Roth" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Conference on Computational Natural Language Learning (CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Clarke, Dan Goldwasser, Ming-Wei Chang, and Dan Roth. 2010. Driving semantic parsing from the worlds response. In Proceedings of the Conference on Computational Natural Language Learning (CoNLL).", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Attention-overattention neural networks for reading comprehension", |
| "authors": [ |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Cui", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhipeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Shijin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Guoping", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1607.04423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yiming Cui, Zhipeng Chen, Si Wei, Shijin Wang, Ting Liu, and Guoping Hu. 2016. Attention-over- attention neural networks for reading comprehension. arXiv:1607.04423.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Finding structure in time", |
| "authors": [ |
| { |
| "first": "Jeffry", |
| "middle": [ |
| "L" |
| ], |
| "last": "Elman", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Cognitive Science", |
| "volume": "14", |
| "issue": "2", |
| "pages": "179--211", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffry L. Elman. 1990. Finding structure in time. Cog- nitive Science, 14(2):179-211.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Applying deep learning to answer selection: a study and an open task", |
| "authors": [ |
| { |
| "first": "Minwei", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Xiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "R" |
| ], |
| "last": "Glass", |
| "suffix": "" |
| }, |
| { |
| "first": "Lidan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bowen", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of IEEE ASRU Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minwei Feng, Bing Xiang, Michael R. Glass, Lidan Wang, and Bowen Zhou. 2015. Applying deep learn- ing to answer selection: a study and an open task. In Proceedings of IEEE ASRU Workshop.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Generalized Phrase Structure Grammar", |
| "authors": [ |
| { |
| "first": "Gerald", |
| "middle": [], |
| "last": "Gazdar", |
| "suffix": "" |
| }, |
| { |
| "first": "Ewan", |
| "middle": [ |
| "H" |
| ], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "K" |
| ], |
| "last": "Pullum", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [ |
| "A" |
| ], |
| "last": "Sag", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gerald Gazdar, Ewan H. Klein, Geoffrey K. Pullum, and Ivan A. Sag. 1994. Generalized Phrase Structure Grammar. Blackwell, Oxford.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Ruminating reader: Reasoning with gated multi-hop attention", |
| "authors": [ |
| { |
| "first": "Yichen", |
| "middle": [], |
| "last": "Gong", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Samuel R Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1704.07415" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yichen Gong and Samuel R Bowman. 2017. Ruminat- ing reader: Reasoning with gated multi-hop attention. arXiv:1704.07415.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Multiperspective sentence similarity modeling with convolutional neural networks", |
| "authors": [ |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing(EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hua He, Kevin Gimpel, and Jimmy Lin. 2015. Multi- perspective sentence similarity modeling with convolu- tional neural networks. In Proceedings of the Confer- ence on Empirical Methods in Natural Language Pro- cessing(EMNLP).", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Pairwise word interaction modeling with deep neural networks for semantic similarity measurement", |
| "authors": [ |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of The North American Chapter of the Association for Computational Linguistics (NAACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hua He and Jimmy Lin. 2016. Pairwise word interac- tion modeling with deep neural networks for seman- tic similarity measurement. In Proceedings of The North American Chapter of the Association for Com- putational Linguistics (NAACL).", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Teaching machines to read and comprehend", |
| "authors": [ |
| { |
| "first": "Karl", |
| "middle": [], |
| "last": "Moritz Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Kocisky", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| }, |
| { |
| "first": "Lasse", |
| "middle": [], |
| "last": "Espeholt", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Kay", |
| "suffix": "" |
| }, |
| { |
| "first": "Mustafa", |
| "middle": [], |
| "last": "Suleyman", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karl Moritz Hermann, Tomas Kocisky, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Su- leyman, and Phil Blunsom. 2015. Teaching machines to read and comprehend. In Proceedings of Advances in Neural Information Processing Systems.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "The goldilocks principle: Reading children's books with explicit memory representations", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1511.02301" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Hill, Antoine Bordes, Sumit Chopra, and Ja- son Weston. 2015. The goldilocks principle: Reading children's books with explicit memory representations. arXiv :1511.02301.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Deep read: A reading comprehension system", |
| "authors": [ |
| { |
| "first": "Lynette", |
| "middle": [], |
| "last": "Hirschman", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Light", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Breck", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "D" |
| ], |
| "last": "Burger", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proceedings of Association for Computational Linguistics (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lynette Hirschman, Marc Light, Eric Breck, and John D. Burger. 1999. Deep read: A reading com- prehension system. In Proceedings of Association for Computational Linguistics (ACL).", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "Jurgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Comput", |
| "volume": "9", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and Jurgen Schmidhuber. 1997. Long short-term memory. Neural Comput., 9(8).", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Reinforced mnemonic reader for machine comprehension", |
| "authors": [ |
| { |
| "first": "Minghao", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuxing", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1705.02798" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minghao Hu, Yuxing Peng, and Xipeng Qiu. 2017. Reinforced mnemonic reader for machine comprehen- sion. arXiv:1705.02798.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Selqa: A new benchmark for selection-based question answering", |
| "authors": [ |
| { |
| "first": "Tomasz", |
| "middle": [], |
| "last": "Jurczyk", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Jinho", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 28th International Conference on Tools with Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomasz Jurczyk, Michael Zhai, and Jinho D. Choi. 2016. Selqa: A new benchmark for selection-based question answering. In Proceedings of the 28th In- ternational Conference on Tools with Artificial Intel- ligence.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Text understanding with the attention sum reader network", |
| "authors": [ |
| { |
| "first": "Rudolf", |
| "middle": [], |
| "last": "Kadlec", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Schmid", |
| "suffix": "" |
| }, |
| { |
| "first": "Ondrej", |
| "middle": [], |
| "last": "Bajgar", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rudolf Kadlec, Martin Schmid, Ondrej Bajgar, and Jan Kleindienst. 2016. Text understanding with the atten- tion sum reader network. In Proceedings of the Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Inducing probabilistic ccg grammars from logical form with higher-order unification", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kwiatkowski, Luke Zettlemoyer, Sharon Goldwa- ter, and Mark Steedman. 2010. Inducing probabilistic ccg grammars from logical form with higher-order uni- fication. In Proceedings of the Conference on Empiri- cal Methods in Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Gradient-based learning applied to document recognition", |
| "authors": [ |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Lecun", |
| "suffix": "" |
| }, |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Haffner", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of the IEEE", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yann LeCun, Leon Bottou, Yoshua Bengio, and Patrick Haffner. 1998. Gradient-based learning applied to doc- ument recognition. In Proceedings of the IEEE.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "The Stanford CoreNLP natural language processing toolkit", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jenny", |
| "middle": [], |
| "last": "Finkel", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [ |
| "J" |
| ], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mcclosky", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "proceedings of the Association for Computational Linguistics (ACL) System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher D. Manning, Mihai Surdeanu, John Bauer, Jenny Finkel, Steven J. Bethard, and David McClosky. 2014. The Stanford CoreNLP natural language pro- cessing toolkit. In proceedings of the Association for Computational Linguistics (ACL) System Demonstra- tions.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Ms marco: A human generated machine reading comprehension dataset", |
| "authors": [ |
| { |
| "first": "Tri", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Mir", |
| "middle": [], |
| "last": "Rosenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Xia", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Saurabh", |
| "middle": [], |
| "last": "Tiwary", |
| "suffix": "" |
| }, |
| { |
| "first": "Rangan", |
| "middle": [], |
| "last": "Majumder", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tri Nguyen, Mir Rosenberg, Xia Song, Jianfeng Gao, Saurabh Tiwary, Rangan Majumder, and Li Deng. 2015. Ms marco: A human generated machine read- ing comprehension dataset. CoRR, abs/1611.09268.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Memen: Multi-layer embedding with memory networks for machine comprehension", |
| "authors": [ |
| { |
| "first": "Boyuan", |
| "middle": [], |
| "last": "Pan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhou", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1707.09098" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Boyuan Pan, Hao Li, Zhou Zhao, Bin Cao, Deng Cai, and Xiaofei He. 2017. Memen: Multi-layer embed- ding with memory networks for machine comprehen- sion. arXiv:1707.09098.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Procedings of the conference Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christo- pher D. Manning. 2014. Glove: Global vectors for word representation. In Procedings of the conference Empirical Methods in Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Head-driven phrase structure grammar", |
| "authors": [ |
| { |
| "first": "Carl", |
| "middle": [], |
| "last": "Pollard", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [ |
| "A" |
| ], |
| "last": "Sag", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carl Pollard and Ivan A. Sag. 1994. Head-driven phrase structure grammar. University of Chicago Press, Chicago.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Squad: 100,000+ questions for machine comprehension of text", |
| "authors": [ |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Rajpurkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Konstantin", |
| "middle": [], |
| "last": "Lopyrev", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1606.05250" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100,000+ questions for ma- chine comprehension of text. arXiv:1606.05250.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Noisecontrastive estimation for answer selection with deep neural networks", |
| "authors": [ |
| { |
| "first": "Jinfeng", |
| "middle": [], |
| "last": "Rao", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 25th ACM International on Conference on Information and Knowledge Management, CIKM '16", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinfeng Rao, Hua He, and Jimmy Lin. 2016. Noise- contrastive estimation for answer selection with deep neural networks. In Proceedings of the 25th ACM In- ternational on Conference on Information and Knowl- edge Management, CIKM '16.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Mctest: A challenge dataset for the open-domain machine comprehension of text", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Richardson", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Burges", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "C" |
| ], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Renshaw", |
| "middle": [], |
| "last": "Erin", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Richardson, Burges, Christopher J.C., and Renshaw Erin. 2013. Mctest: A challenge dataset for the open-domain machine comprehension of text. In Proceedings of Empirical Methods in Natural Lan- guage Processing(EMNLP).", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Attentive pooling networks", |
| "authors": [ |
| { |
| "first": "Santos", |
| "middle": [], |
| "last": "Cicero Dos", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Xiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bowen", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1602.03609v1" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cicero dos Santos, Ming Tan, Bing Xiang, and Bowen Zhou. 2016. Attentive pooling networks. arXiv:1602.03609v1.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Bidirectional attention flow for machine comprehension", |
| "authors": [ |
| { |
| "first": "Minjoon", |
| "middle": [], |
| "last": "Seo", |
| "suffix": "" |
| }, |
| { |
| "first": "Aniruddha", |
| "middle": [], |
| "last": "Kembhavi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ali", |
| "middle": [], |
| "last": "Farhadi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1611.01603" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, and Hannaneh Hajishirzi. 2016. Bidirectional attention flow for machine comprehension. arXiv:1611.01603.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Exploring correlation of dependency relation paths for answer extraction", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Dietrich", |
| "middle": [], |
| "last": "Klakow", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 21st International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Shen and Dietrich Klakow. 2006. Exploring corre- lation of dependency relation paths for answer extrac- tion. In Proceedings of the 21st International Confer- ence on Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Reasonet: Learning to stop reading in machine comprehension", |
| "authors": [ |
| { |
| "first": "Yelong", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Po-Sen", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Weizhu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Workshop on Cognitive Computation: Integrating neural and symbolic approaches 2016 co-located with the 30th Annual Conference on Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yelong Shen, Po-Sen Huang, Jianfeng Gao, and Weizhu Chen. 2016. Reasonet: Learning to stop read- ing in machine comprehension. In Proceedings of the Workshop on Cognitive Computation: Integrating neu- ral and symbolic approaches 2016 co-located with the 30th Annual Conference on Neural Information Pro- cessing Systems (NIPS 2016).", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "A joint model for answer sentence ranking and answer extraction", |
| "authors": [ |
| { |
| "first": "Vittorio", |
| "middle": [], |
| "last": "Md Arafat Sultan", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Castelli", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Florian", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Md Arafat Sultan, Vittorio Castelli, and Radu Florian. 2016. A joint model for answer sentence ranking and answer extraction. In Transactions of the Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Pointer networks", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Meire", |
| "middle": [], |
| "last": "Fortunato", |
| "suffix": "" |
| }, |
| { |
| "first": "Navdeep", |
| "middle": [], |
| "last": "Jaitly", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, Meire Fortunato, and Navdeep Jaitly. 2017. Pointer networks. In Proceedings of NIPS.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Machine comprehension using match-lstm and answer pointer", |
| "authors": [ |
| { |
| "first": "Shuohang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1608.07905" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuohang Wang and Jing Jiang. 2016. Machine comprehension using match-lstm and answer pointer. arXiv:1608.07905.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Gated self-matching networks for reading comprehension and question answering", |
| "authors": [ |
| { |
| "first": "Wenhui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Baobao", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Association for Computational Linguistics(ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenhui Wang, Nan Yang, Furu Wei, Baobao Chang, and Ming Zhou. 2017. Gated self-matching networks for reading comprehension and question answering. In Proceedings of the Association for Computational Lin- guistics(ACL).", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Sentence similarity learning by lexical decomposition and composition", |
| "authors": [ |
| { |
| "first": "Zhiguo", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Haitao", |
| "middle": [], |
| "last": "Mi", |
| "suffix": "" |
| }, |
| { |
| "first": "Abraham", |
| "middle": [], |
| "last": "Ittycheriah", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1602.07019" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhiguo Wang, Haitao Mi, and Abraham Ittycheriah. 2016. Sentence similarity learning by lexical decom- position and composition. arXiv:1602.07019.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Fastqa: A simple and efficient neural architecture for question answering", |
| "authors": [ |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Weissenborn", |
| "suffix": "" |
| }, |
| { |
| "first": "Georg", |
| "middle": [], |
| "last": "Wiese", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Seiffe", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1703.04816" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dirk Weissenborn, Georg Wiese, and Laura Seiffe. 2017a. Fastqa: A simple and efficient neural architec- ture for question answering. arXiv:1703.04816.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Making neural qa as simple as possible but not simpler", |
| "authors": [ |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Weissenborn", |
| "suffix": "" |
| }, |
| { |
| "first": "Georg", |
| "middle": [], |
| "last": "Wiese", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Seiffe", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Computational Natural Language Learning (CoNLL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dirk Weissenborn, Georg Wiese, and Laura Seiffe. 2017b. Making neural qa as simple as possible but not simpler. In Proceedings of the Computational Natural Language Learning (CoNLL).", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "A constituentcentric neural architecture for reading comprehension", |
| "authors": [ |
| { |
| "first": "Pengtao", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Xing", |
| "middle": [], |
| "last": "Eric", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pengtao Xie and Xing Eric. 2017. A constituent- centric neural architecture for reading comprehension. In Proceedings of the 55th Annual Meeting of the As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Dynamic coattention networks for question answering", |
| "authors": [ |
| { |
| "first": "Caiming", |
| "middle": [], |
| "last": "Xiong", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Zhong", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1611.01604" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Caiming Xiong, Victor Zhong, and Richard Socher. 2016. Dynamic coattention networks for question an- swering. arXiv:1611.01604.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Information extraction over structured data: Question answering with freebase", |
| "authors": [ |
| { |
| "first": "Xuchen", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xuchen Yao and Benjamin Van Durme. 2014. Infor- mation extraction over structured data: Question an- swering with freebase. In Proceedings of Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Question answering using enhanced lexical semantic models", |
| "authors": [ |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Wen-Tau Yih", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrzej", |
| "middle": [], |
| "last": "Meek", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pastusiak", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of Association for Computational Linguistics(ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wen-tau Yih, Ming-Wei Chang, Christopher Meek, and Andrzej Pastusiak. 2013. Question answering using enhanced lexical semantic models. In Proceedings of Association for Computational Linguistics(ACL).", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Deep learning for answer sentence selection", |
| "authors": [ |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Karl", |
| "middle": [ |
| "Moritz" |
| ], |
| "last": "Hermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Pulman", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the NIPS Deep Learning Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lei Yu, Karl Moritz Hermann, Phil Blunsom, and Stephen Pulman. 2016. Deep learning for answer sen- tence selection. Proceedings of the NIPS Deep Learn- ing Workshop.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Exploring question understanding and adaptation in neural-network-based question answering", |
| "authors": [ |
| { |
| "first": "Junbei", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Lirong", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1703.04617" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junbei Zhang, Xiaodan Zhu, Qian Chen, Lirong Dai, and Hui Jiang. 2017. Exploring question understand- ing and adaptation in neural-network-based question answering. arXiv:1703.04617.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Question-aware (equations 16-23) and sentence-aware (equations 8-15) encoding", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "text": "The system architecture. Two answer modules, one with attention on questions and the other on sentences, provide a joint representation containing all required information with respect to questions and sentences for making inference on true constituents.", |
| "num": null, |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "html": null, |
| "num": null, |
| "text": "The distribution of constituent types of answers in SQuAD training and development sets. For constituency parsing, we used the Standford CoreNLP tool", |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "num": null, |
| "text": "The performances of different models in the exact match and F1 metrics for SQuAD and BLEU and ROUGE for the MS-MARCO dataset.", |
| "content": "<table><tr><td/><td/><td colspan=\"2\">Model Performance</td></tr><tr><td/><td/><td colspan=\"2\">Type Contribution</td></tr><tr><td/><td>0.8</td><td/></tr><tr><td>Performance</td><td>0.4 0.6</td><td/></tr><tr><td/><td>0.2</td><td/></tr><tr><td/><td>0.0</td><td>NP ROOT NNP NN Constituent Type JJ VP CD PP</td><td>S Others</td></tr></table>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |