| { |
| "paper_id": "E17-1011", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:53:41.983687Z" |
| }, |
| "title": "Which is the Effective Way for Gaokao: Information Retrieval or Neural Networks?", |
| "authors": [ |
| { |
| "first": "Shangmin", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "National Laboratory of Pattern Recognition", |
| "institution": "Chinese Academy of Sciences", |
| "location": { |
| "postCode": "100190", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "shangmin.guo@nlpr.ia.ac.cn" |
| }, |
| { |
| "first": "Xiangrong", |
| "middle": [], |
| "last": "Zeng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "National Laboratory of Pattern Recognition", |
| "institution": "Chinese Academy of Sciences", |
| "location": { |
| "postCode": "100190", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "xiangrong.zeng@nlpr.ia.ac.cn" |
| }, |
| { |
| "first": "Shizhu", |
| "middle": [], |
| "last": "He", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "National Laboratory of Pattern Recognition", |
| "institution": "Chinese Academy of Sciences", |
| "location": { |
| "postCode": "100190", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "shizhu.he@nlpr.ia.ac.cn" |
| }, |
| { |
| "first": "Kang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "National Laboratory of Pattern Recognition", |
| "institution": "Chinese Academy of Sciences", |
| "location": { |
| "postCode": "100190", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "kliu@nlpr.ia.ac.cn" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "National Laboratory of Pattern Recognition", |
| "institution": "Chinese Academy of Sciences", |
| "location": { |
| "postCode": "100190", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "jzhao@nlpr.ia.ac.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "As one of the most important test of China, Gaokao is designed to be difficult enough to distinguish the excellent high school students. In this work, we detailed the Gaokao History Multiple Choice Questions(GKHMC) and proposed two different approaches to address them using various resources. One approach is based on entity search technique (IR approach), the other is based on text entailment approach where we specifically employ deep neural networks(NN approach). The result of experiment on our collected real Gaokao questions showed that they are good at different categories of questions, i.e. IR approach performs much better at entity questions(EQs) while NN approach shows its advantage on sentence questions(SQs). Our new method achieves state-of-the-art performance and show that it's indispensable to apply hybrid method when participating in the real-world tests.", |
| "pdf_parse": { |
| "paper_id": "E17-1011", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "As one of the most important test of China, Gaokao is designed to be difficult enough to distinguish the excellent high school students. In this work, we detailed the Gaokao History Multiple Choice Questions(GKHMC) and proposed two different approaches to address them using various resources. One approach is based on entity search technique (IR approach), the other is based on text entailment approach where we specifically employ deep neural networks(NN approach). The result of experiment on our collected real Gaokao questions showed that they are good at different categories of questions, i.e. IR approach performs much better at entity questions(EQs) while NN approach shows its advantage on sentence questions(SQs). Our new method achieves state-of-the-art performance and show that it's indispensable to apply hybrid method when participating in the real-world tests.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Gaokao, namely the National College Entrance Examination, is the most important examination for Chinese senior high school students. Every college in China, no matter it is Top10 or Top100, would only accept the exam-takers whose Gaokao score is higher than its threshold score. As there are almost 10 million students take the examination every year, Gaokao needs to be difficult enough to distinguish the excellent students. Therefore, it includes various types of questions such as multiple-choice questions, short-answer \u2020 Both of the two authors contributed equally to this paper. questions and essays and it covers several different subjects, like Chinese, Math, History and etc. In this work, we focus on Gaokao History Multiple Choice questions which is denoted as GKHMC. Both of the factoid question answering task and reading comprehension task are similar to GKHMC. But, the GKHMC questions have their own characteristics. A multiple-choice question in GKHMC such as the examples shown in Figure 1 is composed of a question stem and four candidates. Our goal is to figure out the only one correct candidate. But, there are certain obstacles to achieve it. First, several background sentencess and a lead-in sentence conjointly constitutes the question stem, which makes these questions more complicated than former one-sentence-long factoid questions that can be handled by the existing approaches, like (Kolomiyet and Moens, 2011; Kwiatkowski et al., 2013; Berant and Liang, 2014; Yih et al., 2015) . Secondly, the background sentences generally contain various clues to figure out the historical events or personages which may be the perdue key to answer the question. These clues may include Tang poem and Song iambic verse, domainspecific expressions, even some mixture of mod- ern Chinese and excerpt from ancient books and etc. The dependence of background knowledge makes the models that are designed for reading comprehension such as (Pe\u00f1as et al., 2013; Richardson et al., 2013) fail. Thirdly, the diversity of candidates' granularity, i.e. candidates can either be entities or sentences, makes it harder to match the candidate and stem. So, the answer selection is disparate from the former approaches whose candidates are usually just entities. Lastly, as the candidates are already given, the answer generation step in former neural network approaches based question answering system is no longer necessary.", |
| "cite_spans": [ |
| { |
| "start": 1415, |
| "end": 1442, |
| "text": "(Kolomiyet and Moens, 2011;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1443, |
| "end": 1468, |
| "text": "Kwiatkowski et al., 2013;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1469, |
| "end": 1492, |
| "text": "Berant and Liang, 2014;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1493, |
| "end": 1510, |
| "text": "Yih et al., 2015)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 1953, |
| "end": 1973, |
| "text": "(Pe\u00f1as et al., 2013;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1974, |
| "end": 1998, |
| "text": "Richardson et al., 2013)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1000, |
| "end": 1008, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As mentioned above and shown in Figure 1 , in accordance with candidates' granularity, the GKHMC questions can be divided into two types: entity questions(EQs) and sentence questions(SQs). Entity questions are those whose candidates are all entities, no matter they are people, dynasties, warfares or something else. And, sentence questions are those whose candidates are all sentences. We observe that such two types of questions have their own specific characteristics. Most of background sentences in EQs are description of the right candidate, so it may be particularly suitable to apply information retrieval like approach to handle them. Meanwhile, as the background sentences and lead-in sentences in SQs are more like the entailing text, these questions aren't appropriate to be addressed by lexically searching and matching. Therefore, it seems that it's more resonable to resolve SQs by using textual reasoning techniques.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 32, |
| "end": 41, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we wonder about which kind of approach is more effective for GKHMC. Furthermore, whether we should select specific method to work out different types of questions. In terms of various characteristics of GKHMC questions, we introduce two independent approaches to address them. One is based on entity search technique (IR approach) and the other is based on a text entailment approach where we specifically employ deep neural networks (NN approach). In IR approach, we use the key entities and relationships extracted from questions to form a query, then inquire this query in all the text resources to get the most relevant candidate. In NN approach, we take the question text and every candidate to form four statements respectively, then judge how possible every statement is right so that we can figure out which is most likely to be the correct answer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To test the two approaches' performance, we collected and classified the multiple-choice questions in Gaokao test papers from 2011 to 2015 all over the country, and they are released. From the result, we find that the performance of two approaches are significantly discrepant at each kind of questions. That is, IR approach shows noticeable advantages on EQs, while NN approach performs much better on SQs. This will be further discussed in Section 4.4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, our contributions are as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We gave a detailed description of the Gaokao History Multiple Choice Questions task and showed its importance and difficulty.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We released a dataset 1 for this task. The dataset is manually collected and classified. All questions in the dataset are real Gaokao quesitons from 2011 to 2015.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We introduced two different approaches for this task. Each approach achieved a promising results. We also compared this two approaches and found that they are complementary, i.e. they are good at different types of questions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We introduced permanent provisional memory network(PPMN) to model the joint background knowledge and sentences in question stem, and it beats existing memory networks on SQs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As described in the Introduction, we collected the historical multiple-choice questions from Gaokao all over the country in rencent five years. However, quite a lot contain graphs or tables which require the techniques beyond natural language processing(NLP). So, we filter out this part of questions and manually classified the left into two parts: EQs and SQs. The number of different kinds of questions are listed in Table 1 . The examples of different types of questions translated into English are shown in Figure 1 . It is worth mentioning that there is a special type of questions on test papers named sequential questions. The candidates of this kind of questions are just some ordered numbers. Every number stands for a certain content which is given in question stem. We simply replace every sequential number in candidates with their corresponding contents. Then, we can classify these questions as EQs or SQs according to the type of contents.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 420, |
| "end": 427, |
| "text": "Table 1", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 512, |
| "end": 520, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We also collected a wide diversity of resources including Baidu Encyclopedia, textbooks and practice questions as our external knowledge when inquiring the generated query. Baidu Encyclopedia which is also known as Baidu Baike, is something like Wikipedia, but the content of it is written in Chinese. We denote this resource as BAIKE. The textbooks resource contains three compulsory history textbooks published by People's Education Press. We denote them as BOOK. And we gathered about 50,000 practice questions and their answers, and this is denoted as TIKU.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "2" |
| }, |
| { |
| "text": "3 Approaches 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The GKHMC questions require figuring out the most relevant candidate to the question stem from the four given candidates. Our IR approach is inspired by this observation. The diagram of IR approach is illustrated in Figure 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 216, |
| "end": 224, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "IR Approach", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The pipeline of IR approach is: (1) use the classifier to automatically classify the question and select the weights according to the classification result; (2) calculate the relevance scores for every candidate(we introduce three different methods with seven score functions to calculate the relevance scores) and combine them together with specific weights; (3) choose the candidate with highest score as right answer. Despite the simplicity of it, IR approach achieves a promising result in experiment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "IR Approach", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We build a naive bayes classifier to classify questions. Using length of candidates, entity number of candidates and verb number of candidates as features, every question is classified as EQ or SQ. When building the classifier, we do 10-folder cross validation on the GKHMC dataset and the results are 90.00% precision and 84.38% recall in EQs and 95.79% precision and 97.43% recalls in SQs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Naive Bayes Classifier", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "To calculate the relevance between question stem and candidates, we introduce three different methods with seven score functions, which are summarized in Table 2 . Lexical Matching Score: Since the correct candidate usually directly related to question stem, it's reasonable to assume that the facts in question stem may appear in documents related to them, together with the correct candidate. Here we introduce our lexical matching score functions, taking BAIKE as our external resource. The four queries are formed by each candidate and question stem separately. Then we retrieval every query and sum up the scores of the top three returned documents as the lexical matching score. We use score top i to denote the score of the top ith returned documents. score top i is calculated by Lucene's TFIDFSimilarity function 3 . The lexical matching score Score lexical (candidate k ) is calculated as", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 154, |
| "end": 161, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "Score lexical (candidate k ) = 3 i=1 (score top i ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "(1) We build indices for BAIKE with different grains. The index built for every BAIKE document is denoted as BAIKE Document Index(BDI). The index built for every paragraph in BAIKE is denoted as BAIKE Paragraph Index(BPI). And, the index built for every sentence in BAIKE is called BAIKE Sentence Index(BSI).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "We denote the lexical matcing score function using BDI, BPI and BSI as Score BDI , Score BP I and Score BSI respectively. Entity Co-Occurrence Score: We also consider the relevance of entities in co-occurrence aspect. If two entities often appearing together, we assume that they are revelent. We use normalized google distance (Cilibrasi and Vitanyi, 2007) to calculate the entity co-occurrence score Score co (candidate k ).", |
| "cite_spans": [ |
| { |
| "start": 328, |
| "end": 357, |
| "text": "(Cilibrasi and Vitanyi, 2007)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "N GD(e i , e j ) = M ax(e i , e j ) \u2212 log f (e i , e j ) log N \u2212 M in(e i , e j )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "M ax(e i , e j ) = max{log f (e i ), log f (e j )}", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "M in(e i , e j ) = min{log f (e i ), log f (e j )} (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Score co (candidate k ) = \u2212 log(N GD(e i , e j ))", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "e i \u2208 E stem , e j \u2208 E candidate k .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "In which, e i is entity; f (e i ) is the number of parts which contain entity e i ; f (e i , e j ) is the number of parts which contain both entity e i and e j ; E stem and E candidate k denotes the entities in question stem and candidate.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "The entity co-occurrence could be in document, paragraph or sentence, and they are donated as Score BDC , Score BP C and Score BSC respectively. Page Link Score: Inspired from PageRank algorithm (Page et al., 1999) , we assume that entities have links to each other are relevant. Here we introduce the page link score function. We use Link(e i , e j ) to denote the number of links between entities e i and e j . The link score Score link (candidate k ) could be calculated as:", |
| "cite_spans": [ |
| { |
| "start": 195, |
| "end": 214, |
| "text": "(Page et al., 1999)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Score link (candidate k ) = max(Link(e i , e j ))", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "e i \u2208 E stem , e j \u2208 E candidate k .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "We only count the number of links between BAIKE documents, and it is denoted as Score BDL", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Score Functions", |
| "sec_num": "3.1.2" |
| }, |
| { |
| "text": "Description Score BDI Score lexical using BDI Score BP I Score lexical using BPI Score BSI Score lexical using BSI Score BDC document level Score co Score BP C paragraph level Score co Score BSC sentence level Score co Score BDL document link score function Table 2 : Summarization of score functions.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 258, |
| "end": 265, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Function", |
| "sec_num": null |
| }, |
| { |
| "text": "Since we have seven score functions, we need combine them together with different weights.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Weights", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "For a given question, we calculate the score of every candidate as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Weights", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "score candidate k = 7 i=i (w i * f i (candidate k )) (7)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Weights", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "where k \u2208 {1, 2, 3, 4}, f i is one of the seven score functions and w i is the corresponding weight. Then we normalize the scores of all candidates:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Weights", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "score k = score candidate k 4 i=1 (score candidate i )", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Training Weights", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "We suppose that the true answer of a question is the n-th candidate, where n \u2208 {1, 2, 3, 4}. The loss of it is", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Weights", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "loss quesiton = \u2212log(1 \u2212 score n )", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Training Weights", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "Now we can calculate the total loss of the dataset with M questions:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Weights", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "loss = M i (loss question i )", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Training Weights", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "All operations are derivable so that we can use gradient descent algorithm to train the weights.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Weights", |
| "sec_num": "3.1.3" |
| }, |
| { |
| "text": "As deep neural networks are widely used in natural language processing tasks and has gained great success, it's naturally to come up with building deep neural networks to handle GKHMC task. So, we built several deep neural networks in different structures. And, we used both TIKU and BOOK to train these models, in order to teach models not only how to answer the questions but also the historical knowledge.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NN Approach", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "To handle the joint inference between background knowledge and question stems in GKHMC e c u l t u r a l a c h i v e m e n t s o f t h e m a r e l e a d -i n r e p r e s e n t a t i o n S i m i l a r i t y J u d g e r m 1 m 2 m 3 m 4 m 5 m 6 m t E n c o d e r E n c o d e r \u2026 \u2026 s c o r e a , s c o r e b , s c o r e c , s c o r e d E n c o d e r m a t e r i a l s e n t e n c e s A f t e r W W \u2161 , t h e U n i t e d S t a t e s a n d t h e U n i o n o f S o v i e t S o c i a l i s t R e p u b l i c s g r a d u a l l y b e g a n c o m p e t i n g i n a l l f i e l d s i n c l u d i n g p o l i t i c s , e c o n o m y a n d m i l i t a r y . I n o r d e r t o c o o r d i n a t e a n d p r o m o t e e c o n o m i c a l d e v e l o p m e n t o f m e m b e r c o u n t r i e s i n s o c i a l i s t p a r t y , U S S R e s t a b l i s h e d C o u n c i l f o r M u t u a l E c o n o m i c A s s i s t a n c e i n 1 9 4 9 . T h i s m o v e i s m a i n l y f o r c o n f r o n t i n g M a r s h a l l P l a n . A f t e r W W \u2161 , t h e U n i t e d S t a t e s a n d t h e U n i o n o f S o v i e t S o c i a l i s t R e p u b l i c s g r a d u a l l y b e g a n c o m p e t i n g i n a l l f i e l d s i n c l u d i n g p o l i t i c s , e c o n o m y a n d m i l i t a r y . I n o r d e r t o c o o r d i n a t e a n d p r o m o t e e c o n o m i c a l d e v e l o p m e n t o f m e m b e r c o u n t r i e s i n s o c i a l i s t p a r t y , U S S R e s t a b l i s h e d C o u n c i l f o r M u t u a l E c o n o m i c A s s i s t a n c e i n 1 9 4 9 . T h i s m o v e i s m a i n l y f o r c o n f r o n t i n g M a r s h a l l P l a n . A f t e r W W \u2161 , t h e U n i t e d S t a t e s a n d t h e U n i o n o f S o v i e t S o c i a l i s t R e p u b l i c s g r a d u a l l y b e g a n c o m p e t i n g i n a l l f i e l d s i n c l u d i n g p o l i t i c s , e c o n o m y a n d m i l i t a r y . I n o r d e r t o c o o r d i n a t e a n d p r o m o t e e c o n o m i c a l d e v e l o p m e n t o f m e m b e r c o u n t r i e s i n s o c i a l i s t p a r t y , U S S R e s t a b l i s h e d C o u n c i l f o r M u t u a l E c o n o m i c A s s i s t a n c e i n 1 9 4 9 . T h i s m o v e i s m a i n l y f o r c o n f r o n t i n g M a r s h a l l P l a n . A f t e r W W \u2161 , t h e U n i t e d S t a t e s a n d t h e U n i o n o f S o v i e t S o c i a l i s t R e p u b l i c s g r a d u a l l y b e g a n c o m p e t i n g i n a l l f i e l d s i n c l u d i n g p o l i t i c s , e c o n o m y a n d m i l i t a r y . I n o r d e r t o c o o r d i n a t e a n d p r o m o t e e c o n o m i c a l d e v e l o p m e n t o f m e m b e r c o u n t r i e s i n s o c i a l i s t p a r t y , U S S R e s t a b l i s h e d C o u n c i l f o r M u t u a l E c o n o m i c A s s i s t a n c e i n 1 9 4 9 . T h i s m o v e i s m a i n l y f o r c o n f r o n t i n g M a r s h a l l P l a n . A f t e r W W \u2161 , t h e U n i t e d S t a t e s a n d t h e U n i o n o f S o v i e t S o c i a l i s t R e p u b l i c s g r a d u a l l y b e g a n c o m p e t i n g i n a l l f i e l d s i n c l u d i n g p o l i t i c s , e c o n o m y a n d m i l i t a r y . I n o r d e r t o c o o r d i n a t e a n d p r o m o t e e c o n o m i c a l d e v e l o p m e n t o f m e m b e r c o u n t r i e s i n s o c i a l i s t p a r t y , U S S R e s t a b l i s h e d C o u n c i l f o r M u t u a l E c o n o m i c A s s i s t a n c e i n 1 9 4 9 . T h i s m o v e i s m a i n l y f o r c o n f r o n t i n g M a r s h a l l P l a questions, we introduce permanent-provisional memory network(PPMN). As illuminated in Figure 3, our PPMN is composed by the following components:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 87, |
| "end": 5277, |
| "text": "e c u l t u r a l a c h i v e m e n t s o f t h e m a r e l e a d -i n r e p r e s e n t a t i o n S i m i l a r i t y J u d g e r m 1 m 2 m 3 m 4 m 5 m 6 m t E n c o d e r E n c o d e r \u2026 \u2026 s c o r e a , s c o r e b , s c o r e c , s c o r e d E n c o d e r m a t e r i a l s e n t e n c e s A f t e r W W \u2161 , t h e U n i t e d S t a t e s a n d t h e U n i o n o f S o v i e t S o c i a l i s t R e p u b l i c s g r a d u a l l y b e g a n c o m p e t i n g i n a l l f i e l d s i n c l u d i n g p o l i t i c s , e c o n o m y a n d m i l i t a r y . I n o r d e r t o c o o r d i n a t e a n d p r o m o t e e c o n o m i c a l d e v e l o p m e n t o f m e m b e r c o u n t r i e s i n s o c i a l i s t p a r t y , U S S R e s t a b l i s h e d C o u n c i l f o r M u t u a l E c o n o m i c A s s i s t a n c e i n 1 9 4 9 . T h i s m o v e i s m a i n l y f o r c o n f r o n t i n g M a r s h a l l P l a n . A f t e r W W \u2161 , t h e U n i t e d S t a t e s a n d t h e U n i o n o f S o v i e t S o c i a l i s t R e p u b l i c s g r a d u a l l y b e g a n c o m p e t i n g i n a l l f i e l d s i n c l u d i n g p o l i t i c s , e c o n o m y a n d m i l i t a r y . I n o r d e r t o c o o r d i n a t e a n d p r o m o t e e c o n o m i c a l d e v e l o p m e n t o f m e m b e r c o u n t r i e s i n s o c i a l i s t p a r t y , U S S R e s t a b l i s h e d C o u n c i l f o r M u t u a l E c o n o m i c A s s i s t a n c e i n 1 9 4 9 . T h i s m o v e i s m a i n l y f o r c o n f r o n t i n g M a r s h a l l P l a n . A f t e r W W \u2161 , t h e U n i t e d S t a t e s a n d t h e U n i o n o f S o v i e t S o c i a l i s t R e p u b l i c s g r a d u a l l y b e g a n c o m p e t i n g i n a l l f i e l d s i n c l u d i n g p o l i t i c s , e c o n o m y a n d m i l i t a r y . I n o r d e r t o c o o r d i n a t e a n d p r o m o t e e c o n o m i c a l d e v e l o p m e n t o f m e m b e r c o u n t r i e s i n s o c i a l i s t p a r t y , U S S R e s t a b l i s h e d C o u n c i l f o r M u t u a l E c o n o m i c A s s i s t a n c e i n 1 9 4 9 . T h i s m o v e i s m a i n l y f o r c o n f r o n t i n g M a r s h a l l P l a n . A f t e r W W \u2161 , t h e U n i t e d S t a t e s a n d t h e U n i o n o f S o v i e t S o c i a l i s t R e p u b l i c s g r a d u a l l y b e g a n c o m p e t i n g i n a l l f i e l d s i n c l u d i n g p o l i t i c s , e c o n o m y a n d m i l i t a r y . I n o r d e r t o c o o r d i n a t e a n d p r o m o t e e c o n o m i c a l d e v e l o p m e n t o f m e m b e r c o u n t r i e s i n s o c i a l i s t p a r t y , U S S R e s t a b l i s h e d C o u n c i l f o r M u t u a l E c o n o m i c A s s i s t a n c e i n 1 9 4 9 . T h i s m o v e i s m a i n l y f o r c o n f r o n t i n g M a r s h a l l P l a n . A f t e r W W \u2161 , t h e U n i t e d S t a t e s a n d t h e U n i o n o f S o v i e t S o c i a l i s t R e p u b l i c s g r a d u a l l y b e g a n c o m p e t i n g i n a l l f i e l d s i n c l u d i n g p o l i t i c s , e c o n o m y a n d m i l i t a r y . I n o r d e r t o c o o r d i n a t e a n d p r o m o t e e c o n o m i c a l d e v e l o p m e n t o f m e m b e r c o u n t r i e s i n s o c i a l i s t p a r t y , U S S R e s t a b l i s h e d C o u n c i l f o r M u t u a l E c o n o m i c A s s i s t a n c e i n 1 9 4 9 . T h i s m o v e i s m a i n l y f o r c o n f r o n t i n g M a r s h a l l P l a", |
| "ref_id": "TABREF2" |
| }, |
| { |
| "start": 5364, |
| "end": 5370, |
| "text": "Figure", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "NN Approach", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "P r o v i s i o n a l M e m o r y M o d u l e I n p u t M o d u l e \u516c \u5143 9 7 \u5e74 \u4e1c \u6c49 \u73ed \u8d85 \u66fe \u6d3e \u2f08 \u51fa \u4f7f \u6b27 \u6d32 \u5f3a \u56fd \u2f24 \u79e6 \u4e1c \u6c49 \u2f24 \u79e6 \u90fd \u521b \u9020 \u8f89 \u714c \u6587 \u5316 \u5c5e \u4e8e \u5b83 \u4eec \u7684 \u6587 \u5316 \u6210 \u5c31 \u5206 \u522b \u662f T h", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NN Approach", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "1. Permanent Memory Module that plays the same role as a knowledge base and stores the original text from history textbooks or other relevant resource.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NN Approach", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "2. Provisional Memory Module that generates some contents based on the current word in background sentences, permanent knowledge and the lead-in sentence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NN Approach", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "3. Input Module that reads the words sequentially in background sentences and maps them into high-dimensional vector space.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NN Approach", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "4. Similarity Judger that scores the similarity between the output of provisional memory and the vector representations of answer candidates.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NN Approach", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "5. Sentence Encoder that encodes lead-in sentence, sentences in permanent memory and answer candidates.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NN Approach", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We denote the sentences encoded by sentence encoder in this module as {k 1 , k 2 , ..., k K }, where K is the scale of permanent memory. The permanent memory is a constant matrix composed by the concatenation of representation vectors of these sentences, namely [k 1 ; k 2 ; k 3 ; ...; k K ]. Considering the time complexity of training PPMN, we only take the syllabus of all history courses including 198 sentences, i.e. K = 198, as the permanent memory. If necessary, all of the history text books can be taken into the permanent memory.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "Provisional Memory Module: It first inquires the current word of background sentences in the permanent memory, then use an attention vector generated by current word and lead-in sentence as well as the following words to decide how to adjust itself. The update equations are as follow:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "h t =GRU (w t , h t\u22121 ) (11) p =sof tmax(pW p h t ) (12) M t = K i=1 p i k i (13) x =[h t , M t , l, h t \u2022 l, M t \u2022 l, h t \u2022 M t , |h t \u2212 l|, |M t \u2212 l|, |h t \u2212 M t |] (14) g =\u03c3(W g tanh(W t x + b t ) + b g ) (15) m t =g \u2022 M t + (1 \u2212 g) \u2022 m t\u22121", |
| "eq_num": "(16)" |
| } |
| ], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "In the above equations, w t denotes the t-th word in the background sentences, GRU is defined in equation (19) (20) (21) (22) , h t\u22121 and h t are the hidden representation of w t\u22121 and w t respectively, l stands for the lead-in sentence encoded by the sentence encoder, \u2022 is element-wise multiplication and m t is the computational result of current step. The final output of this module is the last provisional memory vector m n where n is the length of background sentences.", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 110, |
| "text": "(19)", |
| "ref_id": null |
| }, |
| { |
| "start": 111, |
| "end": 115, |
| "text": "(20)", |
| "ref_id": null |
| }, |
| { |
| "start": 116, |
| "end": 120, |
| "text": "(21)", |
| "ref_id": null |
| }, |
| { |
| "start": 121, |
| "end": 125, |
| "text": "(22)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "Input Module: This module takes the same weight matrices in sentence encoder and calculates the hidden states of every word sequentially. All the words in background sentences are first mapped into the hidden states in this module and then can be taken as input by other modules. The calculation of hidden states are the same as equation (19) (20) (21) (22) .", |
| "cite_spans": [ |
| { |
| "start": 338, |
| "end": 342, |
| "text": "(19)", |
| "ref_id": null |
| }, |
| { |
| "start": 343, |
| "end": 347, |
| "text": "(20)", |
| "ref_id": null |
| }, |
| { |
| "start": 348, |
| "end": 352, |
| "text": "(21)", |
| "ref_id": null |
| }, |
| { |
| "start": 353, |
| "end": 357, |
| "text": "(22)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "Similarity Judger: This module takes the concatenation of the output from provisional memory and representation of answer candidate as input and use a classifier based on logistic regression to score it. The judging procedure is defined as follow:p", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "= \u03c3(W l [m K ; a] + b l )", |
| "eq_num": "(17)" |
| } |
| ], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "score = sof tmax(p) 0 1 (18)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "where W l is a matrix that can map the concatenation vector [m K ; a] into a vectorp of length 2 and a stands for the answer candidate encoded by sentence encoder.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "Sentence Encoder: We experimented several recurrent neural networks with different structures as the sentence encoder. Both of Long-Short Term Momery (LSTM) (Hochreiter and Schmidhuber, 1997) and Gated Recurrent Unit (GRU) (Cho et al., 2014) perform much better than the standard tanh RNN. However, considering that the computation of LSTM is more complicated and timeconsuming, we choose GRU as the sentence encoder. The calculation of GRU denoted as h t = GRU (w t , h t\u22121 ) is as follow:", |
| "cite_spans": [ |
| { |
| "start": 157, |
| "end": 191, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 223, |
| "end": 241, |
| "text": "(Cho et al., 2014)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "z = \u03c3(W z w t + U z w t + b z )", |
| "eq_num": "(19)" |
| } |
| ], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "r = \u03c3(W r w t + U z w t + b r )", |
| "eq_num": "(20)" |
| } |
| ], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "s = tanh(W s w t + U s (r \u2022 h t\u22121 ) + b s ) (21) h t = (1 \u2212 z) \u2022 s + z \u2022 h t\u22121", |
| "eq_num": "(22)" |
| } |
| ], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "In the above equations, w t is extracted from a word embedding matrix W e initialized by word2vec (Mikolov et al., 2013) through an id number that indicates which word it is.", |
| "cite_spans": [ |
| { |
| "start": 98, |
| "end": 120, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "Loss Function: Intuitively, as we want to encourage the score as same to the true score (0 or 1) as possible, a negative log-likelihood loss function is introduced:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L = \u2212log(py)", |
| "eq_num": "(23)" |
| } |
| ], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "where y would be [0 1] if a is the right answer or [1 0] otherwise.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "Optimization Algorithm: We use the AdaDelta introduced by (Zeiler, 2012) to minimize the loss L, and use back propagation through time to optimize the calculation results of intermediate results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "Accuracy EQ-WEQ 49.38% SQ-WSQ 28.60% Table 3 : Accuracy of SQs and EQs with their corresponding best weights.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 37, |
| "end": 44, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Permanent Memory Module:", |
| "sec_num": null |
| }, |
| { |
| "text": "To find the best weights for EQ and SQ, We use TIKU as the training dataset. Using gradient descent to optimize parameters, we get the best weights for EQs and SQs separately, that is, W EQ is the weight best for EQs and W SQ is the weight best for SQs. We test the weights on EQs and SQs of GKHMC with their corresponding weights, and result is shown in Table 3 . As we can see, with these weights, we achieve promising result. We use GKHMC as the dataset to test the performance of IR approach with naive bayes classifier. The precision of EQs and SQs are 48.75%, 28.42% respectively. It's clear that the accuracy of both EQs and SQs decreased with automatic classification. But still, IR approach achieves much better results on EQs than SQs.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 355, |
| "end": 362, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments of IR Approach", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We take some other neural network models with memory capability as our baseline models including the standard tanh recurrent neural network(RNN), long-short term memory network(LSTM) (Hochreiter and Schmidhuber, 1997) , gated recurrent unite(GRU) (Cho et al., 2014) , end-to-end memory network(MemNN) (Sukhbaatar et al., 2015) and dynamic memory network(DMN) (Kumar et al., 2016) . As for our PPMN, we summarize the syllabus of all history textbooks for senior school students to cover as much knowledge points as possible and we get 198 sentences which are taken into the permanent memory module. For all the above models, we used rmsprop (Hinton et al., 2012) with 0.001 as the learning rate to train them, the size of hidden units as well as the size of memory were both set to 400 and the size of batches were set to 1000. Also, we used dropout (Srivastava et al., 2014) to prevent the models from overfitting and the probability of it was set to 0.5. We test all these models and the results are shown in Table 4 .", |
| "cite_spans": [ |
| { |
| "start": 183, |
| "end": 217, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 247, |
| "end": 265, |
| "text": "(Cho et al., 2014)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 301, |
| "end": 326, |
| "text": "(Sukhbaatar et al., 2015)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 359, |
| "end": 379, |
| "text": "(Kumar et al., 2016)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 640, |
| "end": 661, |
| "text": "(Hinton et al., 2012)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 849, |
| "end": 874, |
| "text": "(Srivastava et al., 2014)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1010, |
| "end": 1017, |
| "text": "Table 4", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results of NN Approach", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "From the result, we observe that our PPMN ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results of NN Approach", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "It can be easily observed from the above experiments that IR approach and NN approach are some kind of complementary, namely they performs better to each other on different categories of questions. So, we combine the two approaches together via a weights matrix W c \u2208 R 2\u00d72 as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Combine IR Approach and NN Approach", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "score EQ = W c 1\u2022 score IR score N N", |
| "eq_num": "(24)" |
| } |
| ], |
| "section": "Combine IR Approach and NN Approach", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "score SQ = W c 2\u2022 score IR score N N", |
| "eq_num": "(25)" |
| } |
| ], |
| "section": "Combine IR Approach and NN Approach", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where the W c i\u2022 means the i-th row of W c and score IR , score N N are the scores calculated by IR and NN approaches respectively. Here, the categories of questions are given by the naive bayes classifier. The performance of combined model and its comparison to the two individual approaches are illustrated in Figure 4 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 312, |
| "end": 320, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Combine IR Approach and NN Approach", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "From the global aspect, it can be easily observed that IR approach are more proficient on EQs(49.38% vs 40.63%), whereas NN approach expand superior to it on SQs(28.60% vs 40.24%). And the hybrid method composed by two approaches get the best performance(42.60%).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "As for the IR approach itself, the performance on EQs is much better than on SQs. This may because that IR approach is based on the relevance between candidates and question stem. In EQs, the information given by the question stem is usually the description of the key entity which only disappeared in the right candidate. So it's easy for the correct candidate to achieve a higher relevance score than others. And, that's why IR approach achieves promising result on EQs. Whereas, in SQs, the key entity doesn't appear in any candidate. And, it needs to be inferred out from question stem. No matter in aspect of lexical matching, entity co-occurrence or page link, the relevance between question stem and correct candidate may be as low as other candidates. Therefor, it's not surprised that IR approach is not sufficient to figure out the right choice on SQs. After adding the classifier in IR approach, we notice the decrease of accuracy on both EQs and SQs. This is because of the misclassification on the questions, which demonstrates that the weights W EQ , W SQ are particularly efficient on EQs, SQs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The experiment of NN approach declared that our PPMN does show its advantages on GKHMC questions. During the training, the performance of RNN model is labile, i.e. the precision are still variational when loss is convergent. In contrast, other model's performance is more stable. Hence, we consider that the memory mechanism helps model to \"remember\" the knowledge that appeared in the training data. Compared with the \"inside\" 4 memory of LSTM and GRU, the specially designed memory component in MemNN, DMN and PPMN are more powerful to find out the relationships between the question stem and answer candidates in GKHMC questions. However, the limited performance of MemNN on SQs indicates that the sequences of words in GKHMC questions are especially important for questions containing no distinct entities. Last but not least, the best performance of PPMN may due highly on the novel permanent memory module which can helps finding the implicit relationships with the stored background knowledge.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The state-of-the-art performance of hybrid method indicates that combination of IR approach and NN approach is the best strategy to address the GKHMC questions. As illustrated in Figure 4 , the combined method shows its enormous advantage on EQs. This may because both character and word embedding are more sufficient to cover the lexical meaning. And, some of EQs may be more suitable to be handled as SQs. Compared to the NN approach separately, the hybrid way does ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 179, |
| "end": 188, |
| "text": "Figure 4", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Answering real world questions in various subjects already gained attention from the beginning of this century. The ambitious Project Halo (Friedland et al., 2004) was proposed to create a \"digital\" Aristotle that can encompass most of the worlds's scientific knowledge and be capable of addressing complex problems with novel answers. In this project, (Angele et al., 2003) employed handcrafted rule to answer chemistry questions, (Gunning et al., 2010) took the physics and biology into account. Another important trial is solving the mathematical questions. (Mukherjee and Garain, 2008 ) attempted to answer them via transforming the natural language description into formal queries with hand-crafted rules, whereas recent works (Hosseini et al., 2014) started to employing learning techniques. However, none of these methods are suitable for history questions which requires large background knowledge, the same to the Aristo Challenge (Clark, 2015) focused on Elementary Grade Tests which is for 6-11 year olds. The Todai Robot Project (Fujita et al., 2014) aims to build a system that can pass the University of Tokyo's entrance examination. As parts of this project, mainly focus on addressing the yes-no questions via determining the correctness of the original proposition, and mainly focus on recognizing textual entailment between a description in Wikipedia and each option of question. But, these two methods are separated for different kinds of questions and none of them introduced neural network approach.", |
| "cite_spans": [ |
| { |
| "start": 139, |
| "end": 163, |
| "text": "(Friedland et al., 2004)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 353, |
| "end": 374, |
| "text": "(Angele et al., 2003)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 432, |
| "end": 454, |
| "text": "(Gunning et al., 2010)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 561, |
| "end": 588, |
| "text": "(Mukherjee and Garain, 2008", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 732, |
| "end": 755, |
| "text": "(Hosseini et al., 2014)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 940, |
| "end": 953, |
| "text": "(Clark, 2015)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1041, |
| "end": 1062, |
| "text": "(Fujita et al., 2014)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "It's inevitable to compare the GKHMC with the factoid questions. (Berant and Liang, 2014) takes the question as a kind of semantic parsing which can not handle the specific expressions with lots of background knowledge. Although (Yih et al., 2015) employed knowledge base, but still failed on multiple sentences questions which is beyond the scope of semantic parsing. However, the diversity of candidates in GKHMC makes these models fail to match the question with the right candidate. Another nonnegligible task is machine comprehension, also called reading comprehension. Although in several different datasets introduced by (Smith et al., 2008; Richardson et al., 2013; , questions are open-domain and candidates may be entities or sentences, understanding these questions don't require as much background knowledge as in GKHMC and these models cannot handle the joint inference between the background knowledge and words in questions.", |
| "cite_spans": [ |
| { |
| "start": 65, |
| "end": 89, |
| "text": "(Berant and Liang, 2014)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 229, |
| "end": 247, |
| "text": "(Yih et al., 2015)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 628, |
| "end": 648, |
| "text": "(Smith et al., 2008;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 649, |
| "end": 673, |
| "text": "Richardson et al., 2013;", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We are not the first to take up the Gaokao challenge, but former information retrieval approach doesn't fit to part of the questions in GKHMC and resources in their system are limited. In contrast, we introduced two different approaches to this task, compared their performance on different types of questions, combined them and gained a state-of-the-art result.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this work, we detailed the multiple choice questions in subject History of Gaokao, present two different approaches to address them and compared these approaches' performance on all categories of questions. We find that the IR approach are more sufficient on EQs cause the words in these questions are usually the description of right answer, whereas the NN approach performs much better on SQs, and this may because neural network models can find out the semantic relationship between questions and candidates. When combining them together, we get the state-of-the-art performance on GKHMC, better than any individual approach. This points out that combining different approaches may be a better method to deal with the real-world questions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In future work, we will explore whether keyvalue memory network proposed by (Miller et al., 2016) can help improve the performance of PPMN, what content in textbook or encyclopedia should be taken into the permanent memory, how to mathematically organize the permanent mem-ory to make it can be reasoned on as well as whether transforming the knowledge described in natural language into formal representation is beneficial. As a long-term goal, it's necessary to introduce discourse analysis, semantic parsing to help the model truly understand the material sentences, questions and candidates.", |
| "cite_spans": [ |
| { |
| "start": 76, |
| "end": 97, |
| "text": "(Miller et al., 2016)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://github.com/IACASNLPIR/GKHMC/tree/master/data", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The codes of this project can be obtained at https://github.com/IACASNLPIR/GKHMC", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://lucene.apache.org/core/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We consider that the memory of LSTM and GRU are kind of stored inside the weight matrices.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank for the anonymous reviewers for helpful comments. This work was supported by the National High Technology Development 863 Program of China (No.2015AA015405) and the Natural Science Foundation of China (No.61533018). And this research work was also supported by Google through focused research awards program.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Ontologybased query and answering in chemistry: Ontonova project Halo", |
| "authors": [ |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Angele", |
| "suffix": "" |
| }, |
| { |
| "first": "Eddie", |
| "middle": [], |
| "last": "M\u00f6nch", |
| "suffix": "" |
| }, |
| { |
| "first": "Henrik", |
| "middle": [], |
| "last": "Oppermann", |
| "suffix": "" |
| }, |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Staab", |
| "suffix": "" |
| }, |
| { |
| "first": "Dirk", |
| "middle": [], |
| "last": "Wenke", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "The Semantic Web-ISWC 2003", |
| "volume": "", |
| "issue": "", |
| "pages": "913--928", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J\u00fcrgen Angele, Eddie M\u00f6nch, Henrik Oppermann, Steffen Staab, and Dirk Wenke. 2003. Ontology- based query and answering in chemistry: Ontonova project Halo. In The Semantic Web-ISWC 2003, pages 913-928. Springer, Sanibel Island, Florida, USA.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Semantic parsing via paraphrasing", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Berant", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1415--1425", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan Berant and Percy Liang. 2014. Seman- tic parsing via paraphrasing. In Proceedings of the 52nd Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 1415-1425, Baltimore, Maryland, USA. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "On the properties of neural machine translation: Encoder-decoder approaches", |
| "authors": [ |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merrienboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of SSST-8, Eighth Workshop on Syntax, Semantics and Structure in Statistical Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "103--111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyunghyun Cho, Bart van Merrienboer, Dzmitry Bah- danau, and Yoshua Bengio. 2014. On the properties of neural machine translation: Encoder-decoder ap- proaches. In Proceedings of SSST-8, Eighth Work- shop on Syntax, Semantics and Structure in Statisti- cal Translation, pages 103-111, Doha, Qatar. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The google similarity distance", |
| "authors": [ |
| { |
| "first": "Rudi", |
| "middle": [], |
| "last": "Cilibrasi", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Vitanyi", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "IEEE Transactions on Knowledge and Data Engineering", |
| "volume": "19", |
| "issue": "3", |
| "pages": "370--383", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rudi Cilibrasi and Paul Vitanyi. 2007. The google similarity distance. IEEE Transactions on Knowl- edge and Data Engineering, 19(3):370-383.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Elementary school science and math tests as a driver for AI: Take the Aristo challenge!", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Twenty-Seventh AAAI Conference on Artificial Intelligence (AAAI-15)", |
| "volume": "", |
| "issue": "", |
| "pages": "4019--4021", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Clark. 2015. Elementary school science and math tests as a driver for AI: Take the Aristo chal- lenge! In Proceedings of the Twenty-Seventh AAAI Conference on Artificial Intelligence (AAAI- 15), pages 4019-4021, Austin, Texas, USA.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Project Halo: Towards a digital Aristotle. AI magazine", |
| "authors": [ |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "Friedland", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Allen", |
| "suffix": "" |
| }, |
| { |
| "first": "Gavin", |
| "middle": [], |
| "last": "Matthews", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Witbrock", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Baxter", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [], |
| "last": "Curtis", |
| "suffix": "" |
| }, |
| { |
| "first": "Blake", |
| "middle": [], |
| "last": "Shepard", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierluigi", |
| "middle": [], |
| "last": "Miraglia", |
| "suffix": "" |
| }, |
| { |
| "first": "Jurgen", |
| "middle": [], |
| "last": "Angele", |
| "suffix": "" |
| }, |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Staab", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "25", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noah Friedland, Paul Allen, Gavin Matthews, Michael Witbrock, David Baxter, Jon Curtis, Blake Shepard, Pierluigi Miraglia, Jurgen Angele, Steffen Staab, et al. 2004. Project Halo: Towards a digital Aris- totle. AI magazine, 25(4):29.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Overview of Todai robot project and evaluation framework of its nlp-based problem solving", |
| "authors": [ |
| { |
| "first": "Akira", |
| "middle": [], |
| "last": "Fujita", |
| "suffix": "" |
| }, |
| { |
| "first": "Akihiro", |
| "middle": [], |
| "last": "Kameda", |
| "suffix": "" |
| }, |
| { |
| "first": "Ai", |
| "middle": [], |
| "last": "Kawazoe", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Miyao", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC-2014)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Akira Fujita, Akihiro Kameda, Ai Kawazoe, and Yusuke Miyao. 2014. Overview of Todai robot project and evaluation framework of its nlp-based problem solving. In Proceedings of the Ninth In- ternational Conference on Language Resources and Evaluation (LREC-2014), Reykjavik, Iceland. Euro- pean Language Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Project Halo update-progress toward digital Aristotle", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Gunning", |
| "suffix": "" |
| }, |
| { |
| "first": "Vinay", |
| "middle": [], |
| "last": "Chaudhri", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Ken", |
| "middle": [], |
| "last": "Barker", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaw-Yi", |
| "middle": [], |
| "last": "Chaw", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Greaves", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Grosof", |
| "suffix": "" |
| }, |
| { |
| "first": "Alice", |
| "middle": [], |
| "last": "Leung", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "Sunil", |
| "middle": [], |
| "last": "Mishra", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "AI Magazine", |
| "volume": "31", |
| "issue": "3", |
| "pages": "33--58", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Gunning, Vinay Chaudhri, Peter Clark, Ken Barker, Shaw-Yi Chaw, Mark Greaves, Benjamin Grosof, Alice Leung, David McDonald, Sunil Mishra, et al. 2010. Project Halo update-progress toward digital Aristotle. AI Magazine, 31(3):33-58.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Lecture 6a overview of minibatch gradient descent", |
| "authors": [ |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Nirsh", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Swersky", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Coursera Lecture slides", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Geoffrey Hinton, Nirsh Srivastava, and Kevin Swersky. 2012. Lecture 6a overview of mini- batch gradient descent. Coursera Lecture slides https://class.coursera.org/neuralnets-2012- 001/lecture.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Learning to solve arithmetic word problems with verb categorization", |
| "authors": [ |
| { |
| "first": "Javad Mohammad", |
| "middle": [], |
| "last": "Hosseini", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannaneh", |
| "middle": [], |
| "last": "Hajishirzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Etzioni", |
| "suffix": "" |
| }, |
| { |
| "first": "Nate", |
| "middle": [], |
| "last": "Kushman", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing(EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "523--533", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Javad Mohammad Hosseini, Hannaneh Hajishirzi, Oren Etzioni, and Nate Kushman. 2014. Learning to solve arithmetic word problems with verb catego- rization. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Process- ing(EMNLP), pages 523-533, Doha, Qatar. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Answering yes/no questions via question inversion", |
| "authors": [ |
| { |
| "first": "Hiroshi", |
| "middle": [], |
| "last": "Kanayama", |
| "suffix": "" |
| }, |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Miyao", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Prager", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "The COLING 2012 Organizing Committee", |
| "volume": "", |
| "issue": "", |
| "pages": "1377--1392", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hiroshi Kanayama, Yusuke Miyao, and John Prager. 2012. Answering yes/no questions via question in- version. In Proceedings of COLING 2012, pages 1377-1392, Mumbai, India. The COLING 2012 Or- ganizing Committee.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A survey on question answering technology from an information retrieval perspective", |
| "authors": [ |
| { |
| "first": "Oleksandrs", |
| "middle": [], |
| "last": "Kolomiyet", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Francine", |
| "middle": [], |
| "last": "Moens", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Information Science", |
| "volume": "181", |
| "issue": "24", |
| "pages": "5412--5434", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oleksandrs Kolomiyet and Marie-Francine Moens. 2011. A survey on question answering technology from an information retrieval perspective. Informa- tion Science, 181(24):5412-5434.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Ask me anything: Dynamic memory networks for natural language processing", |
| "authors": [ |
| { |
| "first": "Ankit", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Ozan", |
| "middle": [], |
| "last": "Irsoy", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Bradbury", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "English", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Pierce", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Ondruska", |
| "suffix": "" |
| }, |
| { |
| "first": "Ishaan", |
| "middle": [], |
| "last": "Gulrajani", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1378--1387", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankit Kumar, Ozan Irsoy, Jonathan Su, James Brad- bury, Robert English, Brian Pierce, Peter Ondruska, Ishaan Gulrajani, and Richard Socher. 2016. Ask me anything: Dynamic memory networks for nat- ural language processing. pages 1378-1387, New York City, New York, USA. ACM.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Scaling semantic parsers with on-the-fly ontology matching", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Eunsol", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1545--1556", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kwiatkowski, Eunsol Choi, Yoav Artzi, and Luke Zettlemoyer. 2013. Scaling semantic parsers with on-the-fly ontology matching. In Proceedings of the 2013 Conference on Empirical Methods in Natu- ral Language Processing, pages 1545-1556, Seattle, Washington, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word represen- tations in vector space. Computing Research Repos- itory, abs/1301.3781.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Key-value memory networks for directly reading documents", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Fisch", |
| "suffix": "" |
| }, |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Dodge", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Amir-Hossein", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Karimi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1400--1409", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander Miller, Adam Fisch, Jesse Dodge, Amir- Hossein Karimi, Antoine Bordes, and Jason We- ston. 2016. Key-value memory networks for di- rectly reading documents. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 1400-1409, Austin, Texas, USA. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Evaluating textual entailment recognition for university entrance examinations", |
| "authors": [ |
| { |
| "first": "Yusuke", |
| "middle": [], |
| "last": "Miyao", |
| "suffix": "" |
| }, |
| { |
| "first": "Hideki", |
| "middle": [], |
| "last": "Shima", |
| "suffix": "" |
| }, |
| { |
| "first": "Hiroshi", |
| "middle": [], |
| "last": "Kanayama", |
| "suffix": "" |
| }, |
| { |
| "first": "Teruko", |
| "middle": [], |
| "last": "Mitamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "ACM Transactions on Asian Language Information Processing (TALIP)", |
| "volume": "11", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yusuke Miyao, Hideki Shima, Hiroshi Kanayama, and Teruko Mitamura. 2012. Evaluating textual entail- ment recognition for university entrance examina- tions. ACM Transactions on Asian Language Infor- mation Processing (TALIP), 11(4):13.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A review of methods for automatic understanding of natural language mathematical problems", |
| "authors": [ |
| { |
| "first": "Anirban", |
| "middle": [], |
| "last": "Mukherjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Utpal", |
| "middle": [], |
| "last": "Garain", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Artificial Intelligence Review", |
| "volume": "29", |
| "issue": "2", |
| "pages": "93--122", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anirban Mukherjee and Utpal Garain. 2008. A review of methods for automatic understanding of natural language mathematical problems. Artificial Intelli- gence Review, 29(2):93-122.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "The pagerank citation ranking: bringing order to the web", |
| "authors": [ |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "Page", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Brin", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajeev", |
| "middle": [], |
| "last": "Motwani", |
| "suffix": "" |
| }, |
| { |
| "first": "Terry", |
| "middle": [], |
| "last": "Winograd", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lawrence Page, Sergey Brin, Rajeev Motwani, and Terry Winograd. 1999. The pagerank citation rank- ing: bringing order to the web. Technical Report 1, Stanford InfoLab.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Overview of question answering for machine reading evaluation", |
| "authors": [ |
| { |
| "first": "Anselmo", |
| "middle": [], |
| "last": "Pe\u00f1as", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Pamela", |
| "middle": [], |
| "last": "Forner", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00c1lvaro", |
| "middle": [], |
| "last": "Rodrigo", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Sutcliffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Roser", |
| "middle": [], |
| "last": "Morante", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "International Conference of the Cross-Language Evaluation Forum for European Languages", |
| "volume": "", |
| "issue": "", |
| "pages": "303--320", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anselmo Pe\u00f1as, Eduard Hovy, Pamela Forner,\u00c1lvaro Rodrigo, Richard Sutcliffe, and Roser Morante. 2013. Qa4mre 2011-2013: Overview of question answering for machine reading evaluation. In Inter- national Conference of the Cross-Language Evalu- ation Forum for European Languages, pages 303- 320. Springer.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Mctest: A challenge dataset for the open-domain machine comprehension of text", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "C" |
| ], |
| "last": "Matthew Richardson", |
| "suffix": "" |
| }, |
| { |
| "first": "Erin", |
| "middle": [], |
| "last": "Christopher Burges", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Renshaw", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "193--203", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Richardson, J.C. Christopher Burges, and Erin Renshaw. 2013. Mctest: A challenge dataset for the open-domain machine comprehension of text. In Proceedings of the 2013 Conference on Em- pirical Methods in Natural Language Processing, pages 193-203, Seattle, Washington, USA. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Question generation as a competitive undergraduate course project", |
| "authors": [ |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Heilman", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Hwa", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the NSF Workshop on the Question Generation Shared Task and Evaluation Challenge", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noah A. Smith, Michael Heilman, and Rebecca Hwa. 2008. Question generation as a competitive un- dergraduate course project. In Proceedings of the NSF Workshop on the Question Generation Shared Task and Evaluation Challenge, Arlington, Mas- sachusetts, USA.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Dropout: a simple way to prevent neural networks from overfitting", |
| "authors": [ |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "15", |
| "issue": "1", |
| "pages": "1929--1958", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nitish Srivastava, Geoffrey E. Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdi- nov. 2014. Dropout: a simple way to prevent neural networks from overfitting. Journal of Machine Learning Research, 15(1):1929-1958.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "End-to-end memory networks", |
| "authors": [ |
| { |
| "first": "Sainbayar", |
| "middle": [], |
| "last": "Sukhbaatar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Rob", |
| "middle": [], |
| "last": "Fergus", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "2440--2448", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sainbayar Sukhbaatar, Jason Weston, Rob Fergus, et al. 2015. End-to-end memory networks. In Advances in neural information processing systems, pages 2440-2448, Montr\u00e9al, Canada.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Towards ai-complete question answering: A set of prerequisite toy tasks", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Computing Research Repository", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Weston, Antoine Bordes, Sumit Chopra, and Tomas Mikolov. 2015. Towards ai-complete ques- tion answering: A set of prerequisite toy tasks. Computing Research Repository, abs/1502.05698.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Semantic parsing via staged query graph generation: Question answering with knowledge base", |
| "authors": [ |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Wen-Tau Yih", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1321--1331", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wen-tau Yih, Ming-Wei Chang, Xiaodong He, and Jianfeng Gao. 2015. Semantic parsing via staged query graph generation: Question answering with knowledge base. In Proceedings of the 53rd Annual Meeting of the Association for Computational Lin- guistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1321-1331, Beijing, China. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Adadelta: An adaptive learning rate method. Computing Research Repository", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zeiler", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew D. Zeiler. 2012. Adadelta: An adaptive learn- ing rate method. Computing Research Repository, abs/1212.5701.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "text": "Examples of questions and their types. The upper one is an entity question. The lower one is a sentence question.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "text": "Pipeline of IR approach.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "text": "Diagram of PPMN. The questions hasn't been translated into English.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "text": "Result of different methods. a little poorly on SQs, which may caused by the loss of classification.", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "text": "After the World War II, U.S. and Soviet Union are fighting against each other in politics, economics and military. To promote the development of economics in Socialist Countries, Soviet Union establish The Council for Mutual Economic Assistance. From Qin and Han Dynasties to Ming Dynasty, businessmen are always at the bottom of hierarchy. One reason for this is that the ruling class thought the businessmen A. are not engaged in production B. do not respect Confucianism C. do not respect the clan D. do not pay tax", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td/><td>This is against</td></tr><tr><td>A. Truman Doctrine</td><td>B. Marshall Plan</td></tr><tr><td>C. NATO</td><td>D. Federal Republic of Germany</td></tr><tr><td/><td>Entity Question</td></tr><tr><td/><td>Sentence Question</td></tr></table>" |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "text": "The GKHMC dataset.", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF5": { |
| "type_str": "table", |
| "text": "Results of all neural network models.", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>gains best performance on all kinds of GKHMC</td></tr><tr><td>questions and all memory-capable neural network</td></tr><tr><td>models beat RNN. It's interesting that MemNN</td></tr><tr><td>performs much worse than other memory-capable</td></tr><tr><td>models on SQs whereas it shows promising capa-</td></tr><tr><td>bility on EQs.</td></tr></table>" |
| } |
| } |
| } |
| } |