| { |
| "paper_id": "C12-1030", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:23:26.406182Z" |
| }, |
| "title": "Towards Automatic Topical Question Generation", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [ |
| "L" |
| ], |
| "last": "Lias Chal", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Lethbridge", |
| "location": { |
| "settlement": "Lethbridge", |
| "region": "AB", |
| "country": "Canada" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Sadid", |
| "middle": [ |
| "A H" |
| ], |
| "last": "Asan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Lethbridge", |
| "location": { |
| "settlement": "Lethbridge", |
| "region": "AB", |
| "country": "Canada" |
| } |
| }, |
| "email": "hasan@cs.uleth.ca" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "We address the challenge of automatically generating questions from topics. We consider that each topic is associated with a body of texts containing useful information about the topic. Questions are generated by exploiting the named entity information and the predicate argument structures of the sentences present in the body of texts. To measure the importance of the generated questions, we use Latent Dirichlet Allocation (LDA) to identify the sub-topics (which are closely related to the original topic) in the given body of texts and apply the Extended String Subsequence Kernel (ESSK) to calculate their similarity with the questions. We also propose the use of syntactic tree kernels for computing the syntactic correctness of the questions. The questions are ranked by considering their importance (in the context of the given body of texts) and syntactic correctness. To the best of our knowledge, no other study has accomplished this task in our setting before. Experiments show that our approach can significantly outperform the state-of-the-art results.", |
| "pdf_parse": { |
| "paper_id": "C12-1030", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "We address the challenge of automatically generating questions from topics. We consider that each topic is associated with a body of texts containing useful information about the topic. Questions are generated by exploiting the named entity information and the predicate argument structures of the sentences present in the body of texts. To measure the importance of the generated questions, we use Latent Dirichlet Allocation (LDA) to identify the sub-topics (which are closely related to the original topic) in the given body of texts and apply the Extended String Subsequence Kernel (ESSK) to calculate their similarity with the questions. We also propose the use of syntactic tree kernels for computing the syntactic correctness of the questions. The questions are ranked by considering their importance (in the context of the given body of texts) and syntactic correctness. To the best of our knowledge, no other study has accomplished this task in our setting before. Experiments show that our approach can significantly outperform the state-of-the-art results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "When a user is served with a ranked list of relevant documents by the standard document retrieval systems (i.e. search engines), his/her search task is usually not over (Chali et al., 2009b) . The next step for him/her is to look into the documents themselves and search for the precise piece of information he/she was looking for. This method is time consuming, and a correct answer could easily be missed, by either an incorrect query resulting in missing documents or by careless reading. This is why, Question Answering (QA) has received immense attention from the information retrieval, information extraction, machine learning, and natural language processing communities (Kotov and Zhai, 2010) . One of the main requirements of a QA system is that it must receive a well-formed question as input in order to come up with the best possible correct answer as output. Available studies revealed that humans are not very skilled in asking good questions about a topic of their interest. They are forgetful in nature which often restricts them to properly express whatever that is peeking in their mind. Therefore, they would benefit from automated Question Generation (QG) systems that can assist in meeting their inquiry needs (Olney et al., 2012; Ali et al., 2010; Kotov and Zhai, 2010; Rus and Graesser, 2009; Lauer et al., 1992; Graesser et al., 2001) . Question asking and Question Generation are important components in advanced learning technologies such as intelligent tutoring systems, and inquiry-based environments (Graesser et al., 2001 ). A QG system would be useful for building better question asking facilities in intelligent tutoring systems. Another benefit of QG is that it can be a good tool to help improve the quality of the Question Answering (QA) systems (Graesser et al., 2001; Rus and Graesser, 2009) .", |
| "cite_spans": [ |
| { |
| "start": 169, |
| "end": 190, |
| "text": "(Chali et al., 2009b)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 678, |
| "end": 700, |
| "text": "(Kotov and Zhai, 2010)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1231, |
| "end": 1251, |
| "text": "(Olney et al., 2012;", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 1252, |
| "end": 1269, |
| "text": "Ali et al., 2010;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1270, |
| "end": 1291, |
| "text": "Kotov and Zhai, 2010;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1292, |
| "end": 1315, |
| "text": "Rus and Graesser, 2009;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 1316, |
| "end": 1335, |
| "text": "Lauer et al., 1992;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 1336, |
| "end": 1358, |
| "text": "Graesser et al., 2001)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1529, |
| "end": 1551, |
| "text": "(Graesser et al., 2001", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1782, |
| "end": 1805, |
| "text": "(Graesser et al., 2001;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 1806, |
| "end": 1829, |
| "text": "Rus and Graesser, 2009)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The main motivation of this work is to generate all possible questions about a given topic. For example, given the topic \"Apple Inc. Logos\", we can generate questions such as \"What is Apple Inc.?\", \"Where is Apple Inc. located?\", \"Who designed Apple's Logo?\" etc. We consider this task of automatically generating questions from topics and assume that each topic is associated with a body of texts having useful information about the topic. Our main goal is to generate fact-based questions 1 about a given topic from its associated content information. We generate questions by exploiting the named entity information and the predicate argument structures of the sentences (along with semantic roles) present in the given body of texts. The named entities and the semantic role labels are used to identify relevant parts of a sentence in order to form relevant questions over them. The importance of the generated questions is measured in two steps. In the first step, we identify whether the question is asking something about the topic or something that is very closely related to the topic. We call this the measure of topic relevance. For this purpose, we use Latent Dirichlet Allocation (LDA) (Blei et al., 2003) to identify the sub-topics (which are closely related to the original topic) in the given body of texts and apply the Extended String Subsequence Kernel (ESSK) (Hirao et al., 2003) to calculate their similarity with the questions. In the second step, we judge the syntactic correctness of each generated question. We apply the tree kernel functions (Collins and Duffy, 2001 ) and re-implement the syntactic tree kernel model according to Moschitti et al. (2007) for computing the syntactic similarity of each question with the associated content information. We rank the questions by considering their topic relevance and syntactic correctness scores. Experimental results show the effectiveness of our approach for automatically generating topical questions. The remainder of the paper is organized as follows. Section 2 describes the related work and motivation followed by Section 3 that presents the description of our QG system. Section 4 explains the experiments and shows evaluation results. We conclude the paper in the next section.", |
| "cite_spans": [ |
| { |
| "start": 1199, |
| "end": 1218, |
| "text": "(Blei et al., 2003)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1379, |
| "end": 1399, |
| "text": "(Hirao et al., 2003)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 1568, |
| "end": 1592, |
| "text": "(Collins and Duffy, 2001", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1657, |
| "end": 1680, |
| "text": "Moschitti et al. (2007)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recently, question generation has got immense attention from the researchers and hence, different methods have been proposed to accomplish the task in different relevant fields (Andrenucci and Sneiders, 2005) . McGough et al. (2001) proposed an approach to build a web-based testing system with the facility of dynamic question generation. Wang et al. (2008) showed a method to automatically generate questions based on question templates (which are created from training on medical articles). Brown et al. (2005) described an approach to automatically generate questions to assess the user's vocabulary knowledge. To mimic the reader's self-questioning strategy during reading, Chen et al. (2009) developed a method to generate questions automatically from informational text. On the other hand, Agarwal et al. (2011) considered the question generation problem beyond sentence level and proposed an approach that uses discourse connectives to generate questions from a given text. Several other QG models have been proposed over the years that deal with transforming answers to questions and utilizing question generation as an intermediate step in the question answering process (Echihabi and Marcu, 2003; Hickl et al., 2005) . There are some other researchers who have approached the task of generating questions for educational purposes (Mitkov and Ha, 2003; Heilman and Smith, 2010b) .", |
| "cite_spans": [ |
| { |
| "start": 177, |
| "end": 208, |
| "text": "(Andrenucci and Sneiders, 2005)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 211, |
| "end": 232, |
| "text": "McGough et al. (2001)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 340, |
| "end": 358, |
| "text": "Wang et al. (2008)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 494, |
| "end": 513, |
| "text": "Brown et al. (2005)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 679, |
| "end": 697, |
| "text": "Chen et al. (2009)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 797, |
| "end": 818, |
| "text": "Agarwal et al. (2011)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1181, |
| "end": 1207, |
| "text": "(Echihabi and Marcu, 2003;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1208, |
| "end": 1227, |
| "text": "Hickl et al., 2005)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1341, |
| "end": 1362, |
| "text": "(Mitkov and Ha, 2003;", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 1363, |
| "end": 1388, |
| "text": "Heilman and Smith, 2010b)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work and Motivation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Tutoring System, and Information Retrieval (IR) communities have currently identified the Text-to-Question generation task as promising candidates for shared tasks 2 (Rus and Graesser, 2009; Boyer and Piwek, 2010) . In the Text-to-Question generation task, a QG system is given a text, and the goal is to generate a set of questions for which the text contains answers. The task of generating a question about a given text can be typically decomposed into three subtasks. First, given the source text, a content selection step is necessary to select a target to ask about, such as the desired answer. Second, given a target answer, an appropriate question type is selected, i.e., the form of question to ask is determined. Third, given the content, and question type, the actual question is constructed. Based on this principle, several approaches have been described in Boyer and Piwek (2010) that use named entity information, syntactic knowledge and semantic structures of the sentences to perform the task of generating questions from sentences and paragraphs (Heilman and Smith, 2010a; Mannem et al., 2010) . Inspired by these works, we perform the task of topic to question generation using named entity information and semantic structures of the sentences. A task that is similar to ours is the task of keywords to question generation that has been addressed recently in Zheng et al. (2011) . They propose a user model for jointly generating keywords and questions. However, their approach is based on generating question templates from existing questions which requires a large set of English questions as training data. In recent years, some other related researches have proposed the tasks of high quality question generation (Ignatova et al., 2008) and generating questions from queries (Lin, 2008) . Fact-based question generation has been accomplished previously by Rus et al. (2007) ; Heilman and Smith (2010b) . We also focus on generating fact-based questions in this research.", |
| "cite_spans": [ |
| { |
| "start": 166, |
| "end": 190, |
| "text": "(Rus and Graesser, 2009;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 191, |
| "end": 213, |
| "text": "Boyer and Piwek, 2010)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 871, |
| "end": 893, |
| "text": "Boyer and Piwek (2010)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1064, |
| "end": 1090, |
| "text": "(Heilman and Smith, 2010a;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1091, |
| "end": 1111, |
| "text": "Mannem et al., 2010)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 1378, |
| "end": 1397, |
| "text": "Zheng et al. (2011)", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 1736, |
| "end": 1759, |
| "text": "(Ignatova et al., 2008)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1798, |
| "end": 1809, |
| "text": "(Lin, 2008)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 1879, |
| "end": 1896, |
| "text": "Rus et al. (2007)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 1899, |
| "end": 1924, |
| "text": "Heilman and Smith (2010b)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Natural Language Processing (NLP), Natural Language Generation (NLG), Intelligent", |
| "sec_num": null |
| }, |
| { |
| "text": "Besides grammaticality, an effective QG system should focus deeply on the importance of the generated questions (Vanderwende, 2008) . This motivates the use of a question ranking module in a typical QG system. Over-generated questions can be ranked using different approaches such as statistical ranking methods, dependency parsing, identifying the presence of pronouns and named entities, and topic scoring (Heilman and Smith, 2010a; Mannem et al., 2010; McConnell et al., 2011) . However, most of these automatic ranking approaches ignore the aspects of complex paraphrasing by not considering lexical semantic variations (e.g. synonymy) while measuring the importance of the questions. In our work, we use Latent Dirichlet Allocation (LDA) (Blei et al., 2003) to identify the sub-topics (which are closely related to the original topic) in the given body of texts. In recent years, LDA has become one of the most popular topic modeling techniques and has been shown to be effective in several text-related tasks such as document classification, information retrieval, and question answering (Misra et al., 2008; Wei and Croft, 2006; Celikyilmaz et al., 2010) . Hirao et al. (2003) introduced ESSK considering all possible senses to each word to perform their summarization task. Their method is effective. However, the fact that they do not disambiguate word senses cannot be disregarded. In our task, we apply ESSK to calculate the similarity between important topics (discovered using LDA) and the generated questions in order to measure the importance of each question. We use disambiguated word senses for this purpose.", |
| "cite_spans": [ |
| { |
| "start": 112, |
| "end": 131, |
| "text": "(Vanderwende, 2008)", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 408, |
| "end": 434, |
| "text": "(Heilman and Smith, 2010a;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 435, |
| "end": 455, |
| "text": "Mannem et al., 2010;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 456, |
| "end": 479, |
| "text": "McConnell et al., 2011)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 743, |
| "end": 762, |
| "text": "(Blei et al., 2003)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1094, |
| "end": 1114, |
| "text": "(Misra et al., 2008;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 1115, |
| "end": 1135, |
| "text": "Wei and Croft, 2006;", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 1136, |
| "end": 1161, |
| "text": "Celikyilmaz et al., 2010)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1164, |
| "end": 1183, |
| "text": "Hirao et al. (2003)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Natural Language Processing (NLP), Natural Language Generation (NLG), Intelligent", |
| "sec_num": null |
| }, |
| { |
| "text": "Syntactic information has been used successfully in question answering previously (Chali et al., 2009a (Chali et al., , 2011 Zhang and Lee, 2003; Moschitti et al., 2007; Moschitti and Basili, 2006) . Pasca and Harabagiu (2001) argued that with the syntactic form of a sentence one can see which words depend on other words. We also feel that there should be a similarity between the words which are dependent in the sentences present in the associated body of texts and the dependency between words of the generated question. This motivates us to propose the use of syntactic kernels in judging the syntactic correctness of the generated questions automatically.", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 102, |
| "text": "(Chali et al., 2009a", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 103, |
| "end": 124, |
| "text": "(Chali et al., , 2011", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 125, |
| "end": 145, |
| "text": "Zhang and Lee, 2003;", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 146, |
| "end": 169, |
| "text": "Moschitti et al., 2007;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 170, |
| "end": 197, |
| "text": "Moschitti and Basili, 2006)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 200, |
| "end": 226, |
| "text": "Pasca and Harabagiu (2001)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Natural Language Processing (NLP), Natural Language Generation (NLG), Intelligent", |
| "sec_num": null |
| }, |
| { |
| "text": "The main goal of our work is to generate as many questions as possible related to the topic. We use NE information and the predicate argument structures of the sentences to accomplish this goal. Our approach is different from the setup in shared tasks (Rus and Graesser, 2009; Boyer and Piwek, 2010) as we generate a set of basic questions which are useful to add variety in the question space. A paragraph associated with each topic is used as the source of relevant information about the topic. We evaluate our systems in terms of topic relevance which is different from the prior works (Heilman and Smith, 2010a; Mannem et al., 2010) . Syntactic correctness is also an important property of a good question. For this reason, we evaluate our system in terms of syntactic correctness as well. The proposed system will be useful to generate topic related questions from the associated content information which can be used to incorporate a \"question suggestions for a certain topic\" facility in the search systems. For example, if a user searches for some information related to a certain topic, the search system could generate all possible topic-relevant questions from a preexistent related body of texts to provide suggestions. Kotov and Zhai (2010) approached a similar task by proposing a technique to augment the standard ranked list presentation of search results with a question based interface to refine user given queries.", |
| "cite_spans": [ |
| { |
| "start": 252, |
| "end": 276, |
| "text": "(Rus and Graesser, 2009;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 277, |
| "end": 286, |
| "text": "Boyer and", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 589, |
| "end": 615, |
| "text": "(Heilman and Smith, 2010a;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 616, |
| "end": 636, |
| "text": "Mannem et al., 2010)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Natural Language Processing (NLP), Natural Language Generation (NLG), Intelligent", |
| "sec_num": null |
| }, |
| { |
| "text": "The major contributions of our work can be summarized as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Natural Language Processing (NLP), Natural Language Generation (NLG), Intelligent", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 We perform the task of topic to question generation which can help users in expressing their information needs. Questions are generated using a set of general-purpose rules based on named entity information and the predicate argument structures of the sentences (along with semantic roles) present in the associated body of texts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Natural Language Processing (NLP), Natural Language Generation (NLG), Intelligent", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 We use LDA to identify the sub-topics (which are closely related to the original topic) in the given body of texts and apply ESSK (with disambiguated word senses) to calculate their similarity with the questions. This helps us to measure the importance of each question.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Natural Language Processing (NLP), Natural Language Generation (NLG), Intelligent", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 We apply the tree kernel functions and re-implement the syntactic tree kernel model for computing the syntactic similarity of each question with the associated content information. In this way, we judge the syntactic correctness of each generated question automatically.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Natural Language Processing (NLP), Natural Language Generation (NLG), Intelligent", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 The ESSK similarity scores and the syntactic similarity scores are used to rank the generated questions. In doing so, we show that the use of ESSK and syntactic kernels improve the relevance and the syntactic correctness of the top-ranked questions, respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Natural Language Processing (NLP), Natural Language Generation (NLG), Intelligent", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 We also run experiments by narrowing down the topic focus. Experiments with the topics about persons (biographical focus) reveal improvements in the overall results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Natural Language Processing (NLP), Natural Language Generation (NLG), Intelligent", |
| "sec_num": null |
| }, |
| { |
| "text": "Our QG approach mainly builds on four steps. In the first step, complex sentences (from the given body of texts) related to a topic are simplified as it is easier to generate questions from simple sentences. In the next step, named entity information and predicate argument structures of the sentences are extracted and then, questions are generated using them. In the third step, LDA is used to identify important sub-topics from the given body of texts and then ESSK is applied to find their similarity with the generated questions. In the final step, syntactic tree kernel is employed and syntactic similarity between the generated questions and the sentences present in the body of texts determines the syntactic correctness of the questions. Questions are then ranked by considering the ESSK similarity scores and the syntactic similarity scores. We describe the overall procedure in the following subsections.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic to Question Generation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Sentences may have complex grammatical structure with multiple embedded clauses. Therefore, we simplify the complex sentences with the intention to generate more accurate questions. We use the simplified factual statement extractor model 3 of Heilman and Smith (2010a) . Their model extracts the simpler forms of the complex source sentence by altering lexical items, syntactic structure, and semantics and by removing phrase types such as leading conjunctions, sentence-level modifying phrases, and appositives. For example, given a complex sentence s, we get a corresponding simple sentence as follows:", |
| "cite_spans": [ |
| { |
| "start": 243, |
| "end": 268, |
| "text": "Heilman and Smith (2010a)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Simplification", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Complex Sentence (s): Apple's first logo, designed by Jobs and Wayne, depicts Sir Isaac Newton sitting under an apple tree.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Simplification", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Simple Sentence: Apple's first logo is designed by Jobs and Wayne.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence Simplification", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We use the Illinois Named Entity Tagger 4 , a state of the art NE tagger that tags a plain text with named entities (people, organizations, locations, miscellaneous) (Ratinov and Roth, 2009 ).", |
| "cite_spans": [ |
| { |
| "start": 166, |
| "end": 189, |
| "text": "(Ratinov and Roth, 2009", |
| "ref_id": "BIBREF46" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Named Entity (NE) Information and Semantic Role Labeling (SRL) for QG", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Once we tag the topic in consideration and its associated body of texts, we use some general purpose rules to create some basic questions even though the answer is not present in the body of texts. For example, \"Apple Inc.\" is tagged as an organization, so we generate a question: \"Where is Apple Inc. located?\". The main motivation behind generating such questions is to add variety to the generated question space. Table 1 shows some example rules for basic questions generated in this work.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 417, |
| "end": 424, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Named Entity (NE) Information and Semantic Role Labeling (SRL) for QG", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Who is person? organization Where is organization located? location", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tag Example Question person", |
| "sec_num": null |
| }, |
| { |
| "text": "Where is location? misc.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tag Example Question person", |
| "sec_num": null |
| }, |
| { |
| "text": "What do you know about misc.? Table 1 : Example basic question rules Our next task is to generate specific questions from the sentences present in the given body of texts. For this purpose, we parse the sentences semantically using a Semantic Role Labeling (SRL) system (Kingsbury and Palmer, 2002; Hacioglu et al., 2003) , ASSERT 5 . ASSERT is an automatic statistical semantic role tagger, that can annotate naturally occuring text with semantic arguments. When presented with a sentence, it performs a full syntactic analysis of the sentence, automatically identifies all the verb predicates in that sentence, extracts features for all constituents in the parse tree relative to the predicate, and identifies and tags the constituents with the appropriate semantic arguments. For example, the output of the SRL system for the sentence \"Apple's first logo is designed by Jobs and Wayne.\" is: .] can be replaced and the question: \"Who designed Apple's first logo?\" can be generated. The semantic roles ARG0...ARG5 are called mandatory arguments. There are some additional arguments or semantic roles that can be tagged by ASSERT. They are called optional arguments and they start with the prefix ARGM. These are defined by the annotation guidelines set in (Palmer et al., 2005) . A set of about 350 general purpose rules are used to transform the semantic-role labeled sentences into the questions. The rules were set up in a way that we could use the semantic role information to find the potential answer words in a sentence which would be replaced by suitable question words. In case of a mandatory argument, the choice of question word depends on the argument's named entity tag (e.g. \"Who\" for a person, \"Where\" for a location etc.). Table 2 shows how different semantic roles can be replaced by possible question words in order to generate a question.", |
| "cite_spans": [ |
| { |
| "start": 270, |
| "end": 298, |
| "text": "(Kingsbury and Palmer, 2002;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 299, |
| "end": 321, |
| "text": "Hacioglu et al., 2003)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1257, |
| "end": 1278, |
| "text": "(Palmer et al., 2005)", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 30, |
| "end": 37, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 1740, |
| "end": 1747, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Tag Example Question person", |
| "sec_num": null |
| }, |
| { |
| "text": "[ARG1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Tag Example Question person", |
| "sec_num": null |
| }, |
| { |
| "text": "To measure the importance of the generated questions, we use LDA (Blei et al., 2003) to identify the important sub-topics from the given body of texts. LDA is a probabilistic topic modeling technique where the main principle is to view each document as a mixture of various topics. Here each topic is a probability distribution over words. LDA assumes that documents are made up of words and word ordering is not important (\"bag-of-words\" assumption) (Misra et al., 2008) . The main idea is to choose a distribution over topics while generating a new document. For each word in the new document, a topic is randomly chosen according to this distribution and a word is drawn from that topic. LDA uses a generative topic modeling approach to specify the following distribution over words within a document:", |
| "cite_spans": [ |
| { |
| "start": 65, |
| "end": 84, |
| "text": "(Blei et al., 2003)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 451, |
| "end": 471, |
| "text": "(Misra et al., 2008)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Latent Dirichlet Allocation (LDA)", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P(w i ) = K j=1 P(w i |z i = j)P(z i = j)", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Latent Dirichlet Allocation (LDA)", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "where K is the number of topics, P(w i |z i = j) is the probability of word w i under topic j and P(z i = j) is the sampling probability of topic j for the i th word. The multinomial distributions \u03c6 ( j) = P(w|z i = j) and \u03b8 (d) = P(z) are termed as topic-word distribution and document-topic distribution, respectively (Blei et al., 2003) . A Dirichlet (\u03b1) prior is placed on \u03b8 and a Dirichlet (\u03b2) prior is set on \u03c6 to refine this basic model (Blei et al., 2003; Griffiths and Steyvers, 2002) . Now the main goal is to estimate the two parameters: \u03b8 and \u03c6. We apply this framework directly to solve our problem by considering each topic-related body of texts as a document. We use a GUI-based toolkit for topic modeling 6 that uses the popular MALLET (McCallum, 2002) toolkit for the back-end. The process starts by removing a list of \"stop words\" from the document and runs 200 iterations of Gibbs sampling (Geman and Geman, 1984) to estimate the parameters: \u03b8 and \u03c6. From each body of texts, we discover K topics and choose the most frequent words from the most likely unigrams as the desired sub-topics. For example, from the associated body of texts of the topic Apple Inc. Logos, we get these sub-topics: janoff, themes, logo, color, apple.", |
| "cite_spans": [ |
| { |
| "start": 225, |
| "end": 228, |
| "text": "(d)", |
| "ref_id": null |
| }, |
| { |
| "start": 320, |
| "end": 339, |
| "text": "(Blei et al., 2003)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 444, |
| "end": 463, |
| "text": "(Blei et al., 2003;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 464, |
| "end": 493, |
| "text": "Griffiths and Steyvers, 2002)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 752, |
| "end": 768, |
| "text": "(McCallum, 2002)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 909, |
| "end": 932, |
| "text": "(Geman and Geman, 1984)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Latent Dirichlet Allocation (LDA)", |
| "sec_num": "3.3.1" |
| }, |
| { |
| "text": "Once we identify the sub-topics, we apply ESSK to measure their similarity with the generated questions. ESSK is the simple extension of the Word Sequence Kernel (WSK) (Cancedda et al., 2003) and String Subsequence Kernel (SSK) (Lodhi et al., 2002) . WSK receives two sequences of words as input and maps each of them into a high-dimensional vector space. WSK's value is just the inner product of the two vectors. But, WSK disregards synonyms, hyponyms, and hypernyms. On the other hand, SSK measures the similarity between two sequences of \"alphabets\". In ESSK, each \"alphabet\" in SSK is replaced by a disjunction of an \"alphabet\" and its alternative (Hirao et al., 2003) . In ESSK, each word in a sentence is considered an \"alphabet\", and the alternative is its all possible senses. However, our ESSK implementation considers the alternative of each word as its disambiguated sense. We use a dictionary based Word Sense Disambiguation (WSD) System assuming one sense per discourse. We use WordNet (Fellbaum, 1998) to find the semantic relations (such as repetition, synonym, hypernym and hyponym, holonym and meronym, and gloss) for all the words in a text. We assign a weight to each semantic relation and used all of them. Our WSD technique is decomposed into two steps: (1) building a representation of all possible senses of the words and (2) disambiguating the words based on the highest score. To be specific, each candidate word from the context is expanded to all of its senses. A disambiguation graph is constructed as the intermediate representation where the nodes denote word instances with their WordNet senses and the weighted edges (connecting the senses of two different words) represent semantic relations. This graph is exploited to perform the WSD. We sum the weights of all edges leaving the nodes under their different senses. The sense with the highest score is considered to be the most probable sense.", |
| "cite_spans": [ |
| { |
| "start": 168, |
| "end": 191, |
| "text": "(Cancedda et al., 2003)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 228, |
| "end": 248, |
| "text": "(Lodhi et al., 2002)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 652, |
| "end": 672, |
| "text": "(Hirao et al., 2003)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 999, |
| "end": 1015, |
| "text": "(Fellbaum, 1998)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended String Subsequence Kernel (ESSK)", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "In case of a tie between two or more senses, we select the sense that comes first in WordNet, since WordNet orders the senses of a word by decreasing order of their frequency.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended String Subsequence Kernel (ESSK)", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "ESSK is used to measure the similarity between all possible subsequences of the question words/senses and topic words/senses. We calculate the similarity score Sim(T i , Q j ) using ESSK where T i denotes a topic/sub-topic word sequence and Q j stands for a generated question. Formally, ESSK is defined as follows 7 :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended String Subsequence Kernel (ESSK)", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "K essk (T, Q) = d m=1 t i \u2208T q j \u2208Q K m (t i , q j ) K m (t i , q j ) = val(t i , q j ) if m = 1 K m\u22121 (t i , q j ) \u2022 val(t i , q j )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended String Subsequence Kernel (ESSK)", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "Here, K m (t i , q j ) is defined below. t i and q j are nodes of T and Q, respectively. The function val(t, q) returns the number of attributes common (i.e. the number of common words/senses) to the given nodes t and q.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended String Subsequence Kernel (ESSK)", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "K m (t i , q j ) = 0 if j = 1 \u03bbK m (t i , q j\u22121 ) + K m (t i , q j\u22121 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended String Subsequence Kernel (ESSK)", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "Here \u03bb is the decay parameter for the number of skipped words. K m (t i , q j ) is defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended String Subsequence Kernel (ESSK)", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "K m (t i , q j ) = 0 if i = 1 \u03bbK m (t i\u22121 , q j ) + K m (t i\u22121 , q j )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended String Subsequence Kernel (ESSK)", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "Finally, the similarity measure is defined after normalization as below:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended String Subsequence Kernel (ESSK)", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "sim essk (T, Q) = K essk (T, Q) K essk (T, T )K essk (Q, Q)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extended String Subsequence Kernel (ESSK)", |
| "sec_num": "3.3.2" |
| }, |
| { |
| "text": "The generated questions might be syntactically incorrect due to the process of automatic question generation. It is time consuming and a lot of human intervention is necessary to check for the syntactically incorrect questions manually. We strongly believe that a question should have a similar syntactic structure to a sentence from which it is generated. For example, the sentence \"Apple's first logo is designed by Jobs and Wayne.\", and the generated question \"What is designed by Jobs and Wayne?\" are syntactically similar. Hence, to judge the syntactic correctness of each generated question automatically, we apply the tree kernel functions and re-implement the syntactic tree kernel model for computing the syntactic similarity of each question with the associated content information. We first parse the sentences and the questions into syntactic trees using the Charniak parser 8 (Charniak, 1999) . Then we calculate the similarity between the two corresponding trees using the tree kernel method (Collins and Duffy, 2001) . We convert each parenthetic representation generated by the Charniak parser into its corresponding tree and give the trees as input to the tree kernel functions for measuring the syntactic similarity.", |
| "cite_spans": [ |
| { |
| "start": 889, |
| "end": 905, |
| "text": "(Charniak, 1999)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1006, |
| "end": 1031, |
| "text": "(Collins and Duffy, 2001)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Each tree T is represented by an m dimensional vector", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "v(T ) = v 1 (T ), v 2 (T ), \u2022 \u2022 \u2022 v m (T )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": ", where the i-th element v i (T ) is the number of occurrences of the i-th tree fragment in tree T . The tree fragments of a tree are all of its sub-trees which include at least one production with the restriction that no production rules can be broken into incomplete parts. Figure 1 shows an example tree and a portion of its subtrees. The sub-trees of the NP covering \"the press\". Implicitly we enumerate all the possible tree fragments 1, 2, \u2022 \u2022 \u2022 , m. These fragments are the axis of this m-dimensional space. Note that this could be done only implicitly, since the number m is extremely large. Because of this, Collins and Duffy Collins and Duffy (2001) defined the tree kernel algorithm whose computational complexity does not depend on m. The tree kernel of two syntactic trees T 1 and T 2 is actually the inner product of v(T 1 ) and v(T 2 ):", |
| "cite_spans": [ |
| { |
| "start": 635, |
| "end": 659, |
| "text": "Collins and Duffy (2001)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 276, |
| "end": 284, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "T K(T 1 , T 2 ) = v(T 1 ).v(T 2 )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We define the indicator function I i (n) to be 1 if the sub-tree i is seen rooted at node n and 0 otherwise. It follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "v i (T 1 ) = n 1 \u2208N 1 I i (n 1 ) v i (T 2 ) = n 2 \u2208N 2 I i (n 2 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where, N 1 and N 2 are the set of nodes in T 1 and T 2 respectively. So, we can derive:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "T K(T 1 , T 2 ) = v(T 1 ).v(T 2 ) = i v i (T 1 )v i (T 2 ) = n 1 \u2208N 1 n 2 \u2208N 2 i I i (n 1 )I i (n 2 ) = n 1 \u2208N 1 n 2 \u2208N 2 C(n 1 , n 2 )", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where, we define C(n 1 , n 2 ) = i I i (n 1 )I i (n 2 ). Next, we note that C(n 1 , n 2 ) can be computed in polynomial time, due to the following recursive definition:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "1. If the productions at n 1 and n 2 are different then C(n 1 , n 2 ) = 0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "2. If the productions at n 1 and n 2 are the same, and n 1 and n 2 are pre-terminals, then C(n 1 , n 2 ) = 1 3. Else if the productions at n 1 and n 2 are not pre-terminals,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "C(n 1 , n 2 ) = nc(n 1 ) j=1 (1 + C(ch(n 1 , j), ch(n 2 , j)))", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where, nc(n 1 ) is the number of children of n 1 in the tree; because the productions at n 1 and n 2 are the same, we have nc(n 1 ) = nc(n 2 ). The i-th child-node of n 1 is ch(n 1 , i).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Note that, the tree kernel (TK) function computes the number of common subtrees between two trees. Such subtrees are subject to the constraint that their nodes are taken with all or none of the children they have in the original tree. The TK (tree kernel) function gives the similarity score between each sentence in the given body of texts and the generated question based on the syntactic structure. Each sentence 9 contributes a score to the questions and then the questions are ranked by considering the average of similarity scores.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judging Syntactic Correctness", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We consider the task of automatically generating questions from topics where each topic is associated with a body of texts having a useful description about the topic. The proposed QG system ranks the questions by combining the topic relevance scores and the syntactic similarity scores of Section 3.3 and Section 3.4 using the formula as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "w * ESSK scor e + (1 \u2212 w) * SY N scor e", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "System Description", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Here w is the importance parameter which holds the value in [0, 1]. We kept w = 0.5 to give equal importance 10 to topic relevance and syntactic correctness.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Description", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "To run our experiments, we use the dataset provided in the Question Generation Shared Task and Evaluation Challenge 11 (QGSTEC, 2010) for the task of question generation from paragraphs. This dataset consists of 60 paragraphs about 60 topics that were originally collected from several Wikipedia, OpenLearn, and Yahoo!Answers articles. The paragraphs contain around 5 \u2212 7 sentences for a total of 100 \u2212 200 tokens (including punctuation). This dataset includes a diversity of topics of general interest. We consider these topics and treat the paragraphs as their associated useful content information in order to generate a set of questions using our proposed QG approach. We use 10 topics and their associated paragraphs as the development data 12 . A total of 2186 questions are generated from the remaining 50 topics (test data) to be ranked.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Corpus", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We use a methodology derived from Boyer and Piwek (2010) ; Heilman and Smith (2010b) to evaluate the performance of our QG systems. Three native English-speaking university graduate students judge 13 the quality of the top-ranked 20% questions using two criteria: topic relevance and syntactic correctness. For topic relevance, the given score is an integer between 1 (very poor) and 5 (very good) and is guided by the consideration of the following aspects: 1. Semantic correctness (i.e. the question is meaningful and related to the topic), 2. Correctness of question type (i.e. a correct question word is used), and 3. Referential clarity (i.e. it is clearly possible to understand what the question refers to). For syntactic correctness, the assigned score is also an integer between 1 (very poor) and 5 (very good). Whether a question is grammatically correct or not is checked here. For each question, we calculate the average of the judges' scores.", |
| "cite_spans": [ |
| { |
| "start": 34, |
| "end": 56, |
| "text": "Boyer and Piwek (2010)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 59, |
| "end": 84, |
| "text": "Heilman and Smith (2010b)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "We report the performance of the following systems in order to do a meaningful comparison with our proposed QG system:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Systems for Comparison", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "(1) Baseline1: This is our QG system without any question-ranking method applied to it. Here, we randomly select 20% questions and rate them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Systems for Comparison", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "(2) Baseline2: For our second baseline, we build a QG system using an alternative topic modeling approach. Here we use a topic signature model (instead of using LDA as discussed in Section 3.3.1) (Lin and Hovy, 2000) to identify the important sub-topics from the sentences present in the body of texts. The sub-topics are the important words in the context which are closely related to the topic and have significantly greater probability of occurring in the given text compared to that in a large background corpus. We use a topic signature computation tool 14 for this purpose. The background corpus that is used in this tool contains 5000 documents from the English GigaWord Corpus. For example, from the given body of texts of the topic Apple Inc.", |
| "cite_spans": [ |
| { |
| "start": 196, |
| "end": 216, |
| "text": "(Lin and Hovy, 2000)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Systems for Comparison", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "importance to topic relevance and syntactic correctness. The parameter w can be tuned to investigate its impact on the system performance. 11 http://www.questiongeneration.org/mediawiki 12 We use this data to build necessary general purpose rules for our QG model. 13 The inter-annotator agreement of Fleiss' \u03ba = 0.41, 0.45, 0.62, and 0.33 are computed for the three judges for the results in Table 3 to Table 6 , indicating moderate (for the first two tables), substantial and fair agreement (Landis and Koch, 1977) between the raters, respectively.", |
| "cite_spans": [ |
| { |
| "start": 265, |
| "end": 267, |
| "text": "13", |
| "ref_id": null |
| }, |
| { |
| "start": 493, |
| "end": 516, |
| "text": "(Landis and Koch, 1977)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 393, |
| "end": 411, |
| "text": "Table 3 to Table 6", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Systems for Comparison", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "14 Available at http://www.cis.upenn.edu/ lannie/topicS.html Logos, we get these sub-topics: jobs, logo, themes, rainbow, monochromatic. Then we use the same steps of Section 3.3.2 and Section 3.4, and use equation 5 to combine the scores. We evaluate the top-ranked 20% questions and show the results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Systems for Comparison", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "(3) State-of-the-art: We choose a publicly available state-of-the-art QG system 15 to generate questions from the sentences in the body of texts. This system was shown to achieve good performance in generating fact-based questions about the content of a given article (Heilman and Smith, 2010b) . Their method ranks the questions automatically using a logistic regression model. Given a paragraph as input, this system processes each sentence and generates a set of ranked questions for the entire paragraph. We evaluate the top-ranked 20% questions 16 and report the results. Table 3 shows the average topic relevance and syntactic correctness scores for all the systems. From these results we can see that the proposed QG system improves the topic relevance and syntactic correctness scores over the Baseline1 system by 61.86%, and 34.98%, respectively, and improves the topic relevance and syntactic correctness scores over the Baseline2 system by 7.40%, and 7.57%, respectively. On the other hand, the proposed QG system improves the topic relevance and syntactic correctness scores over the state-of-the-art system by 3.88%, and 2.89%, respectively. From these results, we can clearly observe the effectiveness of our proposed QG system. The improvements in the results are statistically significant 17 (p < 0.05).", |
| "cite_spans": [ |
| { |
| "start": 268, |
| "end": 294, |
| "text": "(Heilman and Smith, 2010b)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 550, |
| "end": 552, |
| "text": "16", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 577, |
| "end": 584, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Systems for Comparison", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "The main goal of this work was to generate as many questions as possible related to the topic. For this reason, we considered generating the basic questions. These questions were also useful to provide variety in the question space. We generated these questions using the NE information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "4.3.3" |
| }, |
| { |
| "text": "As the performance of the NE-taggers were not that great, we had a few of these questions generated. In most cases, these questions were outranked by other important questions that included a combination of topics and sub-topics to show higher topic relevance score measured by ESSK. Therefore, they do not have a considerable impact on the evaluation statistics. We claim that the overall performance of our systems could be further improved if the accuracy of the NE-tagger and the semantic role labeler could be increased. 58.4 44.5 Table 6 : Acceptability of the questions in % (narrowed focus)", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 536, |
| "end": 543, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "4.3.3" |
| }, |
| { |
| "text": "Narrowing Down the Focus We run further experiments by narrowing down the topic focus. We consider only the topics about persons (biographical focus). We choose 10 persons as our topics from the list of the 20th century's 100 most influential people, published in Time magazine in 1999 and obtained the paragraphs containing their biographical information from Wikipedia articles 18 . We generate a total of 390 questions from the considered 10 topics and rank them using different ranking schemes as discussed before. We evaluate the top 20% questions using the similar evaluation methodologies and report the results in Table 5 . Again, we evaluate the top 15% and top 30% questions separately for each QG system and report the results indicating the percentage of questions rated as acceptable in Table 6 . From these tables, we can clearly see the improvements in all the scores for all the QG approaches. This is reasonable because the accuracy of the NE tagger and the semantic role labeler is increased for the biographical data. These results further demonstrate that the proposed system is significantly better (at p < 0.05) than the other considered systems. We plan to make our created resources available to other researchers. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 622, |
| "end": 629, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 800, |
| "end": 807, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Discussion", |
| "sec_num": "4.3.3" |
| }, |
| { |
| "text": "An input to our systems is for instance, the topic \"Apple Inc. Logos\" with the associated content information (body of texts): \"Apple's first logo, designed by Jobs and Wayne, depicts Sir Isaac Newton sitting under an apple tree. Almost immediately, though, this was replaced by Rob Janoff's \"rainbow Apple\", the now-familiar rainbow-colored silhouette of an apple with a bite taken out of it. Janoff presented Jobs with several different monochromatic themes for the \"bitten\" logo, and Jobs immediately took a liking to it. While Jobs liked the logo, he insisted it be in color to humanize the company. The Apple logo was designed with a bite so that it would be recognized as an apple rather than a cherry. The colored stripes were conceived to make the logo more accessible, and to represent the fact the monitor could reproduce images in color. In 1998, with the roll-out of the new iMac, Apple discontinued the rainbow theme and began to use monochromatic themes, nearly identical in shape to its previous rainbow incarnation.\" The output of our systems is the ranked lists of questions. We show an example output in Table 7 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1122, |
| "end": 1129, |
| "text": "Table 7", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "An Input-Output Example", |
| "sec_num": "4.3.4" |
| }, |
| { |
| "text": "In this paper, we have considered the task of automatically generating questions from topics where each topic is associated with a body of texts containing useful information. We have exploited the named entity and semantic role labeling information to accomplish the task. A key aspect of our approach is the use of LDA to automatically discover the hidden sub-topics from the sentences. We have proposed a method to rank the generated questions by considering: 1) sub-topical similarity determined using ESSK algorithm in combination with word sense disambiguation, and 2) syntactic similarity determined using the syntactic tree kernel based method. We have compared the proposed QG system with two baseline systems and one stateof-the-art system. The evaluation results have shown that the proposed QG system significantly outperforms all other considered systems as our system generated top-ranked questions are found to be better in topic-relevance and syntactic correctness than those of the other systems. We have conducted another experiment by narrowing down the topic focus. In this experiment, we have considered persons as topics. Our experiments have demonstrated the effectiveness of the proposed topic to question generation approach. We hope to carry on this ideas and develop further mechanisms to question generation based on the dependency features of the answers and answer finding (Li and Roth, 2006; Pinchak and Lin, 2006) .", |
| "cite_spans": [ |
| { |
| "start": 1403, |
| "end": 1422, |
| "text": "(Li and Roth, 2006;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1423, |
| "end": 1445, |
| "text": "Pinchak and Lin, 2006)", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": null |
| }, |
| { |
| "text": "We mainly focus on generating Who, What, Where, Which, When, Why and How questions in this research.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://www.questiongeneration.org/QGSTEC2010", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Available at http://www.ark.cs.cmu.edu/mheilman/ 4 Available at http://cogcomp.cs.illinois.edu/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Available at http://cemantix.org/assert.html", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Available at http://code.google.com/p/topic-modeling-tool/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The formulae denotes a dynamic programming technique to compute the ESSK similarity score where d is the vector space dimension i.e. the number of all possible subsequences of up to length d. More information about these formulae can be obtained fromHirao et al. (2003Hirao et al. ( , 2004", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Available at ftp://ftp.cs.brown.edu/pub/nlparser/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We consider that a question is syntactically fluent as well as relevant to the topic if it has similar syntactic sub-trees as those of the most sentences in the body of texts.10 A syntactically incorrect question is not useful even if it is relevant to the topic. This motivated us to give equal", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Available at http://www.ark.cs.cmu.edu/mheilman/questions/16 We ignore the yes-no questions for our task.17 We tested statistical significance using Student's t-test.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "http://en.wikipedia.org/wiki/Time_100", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "The research reported in this paper was supported by the Natural Sciences and Engineering Research Council (NSERC) of Canada -discovery grant and the University of Lethbridge.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Automatic Question Generation Using Discourse Cues", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Agarwal", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Mannem", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 6th Workshop on Innovative Use of NLP for Building Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Agarwal, M., Shah, R., and Mannem, P. (2011). Automatic Question Generation Using Discourse Cues. In Proceedings of the 6th Workshop on Innovative Use of NLP for Building Educational Applications, pages 1-9. ACL.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Automation of Question Generation from Sentences", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Ali", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Chali", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "A" |
| ], |
| "last": "Hasan", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of QG2010: The Third Workshop on Question Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ali, H., Chali, Y., and Hasan, S. A. (2010). Automation of Question Generation from Sentences. In Proceedings of QG2010: The Third Workshop on Question Generation, Pittsburgh, USA.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Automated Question Answering: Review of the Main Approaches", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Andrenucci", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Sneiders", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 3rd International Conference on Information Technology and Applications (ICITA'05)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrenucci, A. and Sneiders, E. (2005). Automated Question Answering: Review of the Main Approaches. In Proceedings of the 3rd International Conference on Information Technology and Applications (ICITA'05), Sydney, Australia.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Latent Dirichlet Allocation", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [ |
| "M" |
| ], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "I" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "993--1022", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Blei, D. M., Ng, A. Y., and Jordan, M. I. (2003). Latent Dirichlet Allocation. Journal of Machine Learning Research, 3:993-1022.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Proceedings of QG2010: The Third Workshop on Question Generation. Pittsburgh: questiongeneration.org", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [ |
| "E" |
| ], |
| "last": "Boyer", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Piwek", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Boyer, K. E. and Piwek, P. (2010). Proceedings of QG2010: The Third Workshop on Question Generation. Pittsburgh: questiongeneration.org.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Automatic Question Generation for Vocabulary Assessment", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "C" |
| ], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "A" |
| ], |
| "last": "Frishkoff", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Eskenazi", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the conference on Human Language Technology and Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brown, J. C., Frishkoff, G. A., and Eskenazi, M. (2005). Automatic Question Generation for Vocabulary Assessment. In Proceedings of the conference on Human Language Technology and Empirical Methods in Natural Language Processing, Vancouver, British Columbia, Canada.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Word Sequence Kernels", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Cancedda", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Gaussier", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Goutte", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "M" |
| ], |
| "last": "Renders", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "1059--1082", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cancedda, N., Gaussier, E., Goutte, C., and Renders, J. M. (2003). Word Sequence Kernels. Journal of Machine Learning Research, 3:1059-1082.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "LDA based Similarity Modeling for Question Answering", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Celikyilmaz", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Hakkani-Tur", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Tur", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the NAACL HLT 2010 Workshop on Semantic Search, SS '10", |
| "volume": "", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Celikyilmaz, A., Hakkani-Tur, D., and Tur, G. (2010). LDA based Similarity Modeling for Question Answering. In Proceedings of the NAACL HLT 2010 Workshop on Semantic Search, SS '10, pages 1-9. ACL.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Do Automatic Annotation Techniques Have Any Impact on Supervised Complex Question Answering?", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Chali", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "A" |
| ], |
| "last": "Hasan", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "R" |
| ], |
| "last": "Joty", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint conference of the 47th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "329--332", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chali, Y., Hasan, S. A., and Joty, S. R. (2009a). Do Automatic Annotation Techniques Have Any Impact on Supervised Complex Question Answering? In Proceedings of the Joint conference of the 47th Annual Meeting of the Association for Computational Linguistics (ACL-IJCNLP 2009), pages 329-332, Suntec, Singapore.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Improving Graph-based Random Walks for Complex Question Answering using Syntactic, Shallow Semantic and Extended String Subsequence Kernels", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Chali", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "A" |
| ], |
| "last": "Hasan", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "R" |
| ], |
| "last": "Joty", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Information Processing & Management", |
| "volume": "47", |
| "issue": "6", |
| "pages": "843--855", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chali, Y., Hasan, S. A., and Joty, S. R. (2011). Improving Graph-based Random Walks for Complex Question Answering using Syntactic, Shallow Semantic and Extended String Subsequence Kernels. Information Processing & Management, 47(6):843-855.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Complex Question Answering: Unsupervised Learning Approaches and Experiments", |
| "authors": [ |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Chali", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "R" |
| ], |
| "last": "Joty", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "A" |
| ], |
| "last": "Hasan", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "35", |
| "issue": "", |
| "pages": "1--47", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chali, Y., Joty, S. R., and Hasan, S. A. (2009b). Complex Question Answering: Unsupervised Learning Approaches and Experiments. Journal of Artificial Intelligence Research, 35:1-47.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A Maximum-Entropy-Inspired Parser", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Charniak", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Charniak, E. (1999). A Maximum-Entropy-Inspired Parser. In Technical Report CS-99-12, Brown University, Computer Science Department.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Generating Questions Automatically from Informational Text", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Aist", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Mostow", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 2nd Workshop on Question Generation (AIED 2009)", |
| "volume": "", |
| "issue": "", |
| "pages": "17--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen, W., Aist, G., and Mostow, J. (2009). Generating Questions Automatically from Informa- tional Text. In Proceedings of the 2nd Workshop on Question Generation (AIED 2009), pages 17-24.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Convolution Kernels for Natural Language", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Duffy", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "625--632", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Collins, M. and Duffy, N. (2001). Convolution Kernels for Natural Language. In Proceedings of Neural Information Processing Systems, pages 625-632, Vancouver, Canada.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A Noisy-channel Approach to Question Answering", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Echihabi", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 41st Annual Meeting on Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "16--23", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Echihabi, A. and Marcu, D. (2003). A Noisy-channel Approach to Question Answering. In Proceedings of the 41st Annual Meeting on Association for Computational Linguistics -Volume 1, pages 16-23. ACL.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "WordNet -An Electronic Lexical Database. Cambridge, MA", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Fellbaum", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fellbaum, C. (1998). WordNet -An Electronic Lexical Database. Cambridge, MA. MIT Press.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Stochastic Relaxation, Gibbs Distributions, and the Bayesian Restoration of Images", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Geman", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Geman", |
| "suffix": "" |
| } |
| ], |
| "year": 1984, |
| "venue": "IEEE Trans. Pattern Anal. Mach. Intell", |
| "volume": "6", |
| "issue": "", |
| "pages": "721--741", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Geman, S. and Geman, D. (1984). Stochastic Relaxation, Gibbs Distributions, and the Bayesian Restoration of Images. IEEE Trans. Pattern Anal. Mach. Intell., 6:721-741.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Intelligent Tutoring Systems with Conversational Dialogue", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "C" |
| ], |
| "last": "Graesser", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Vanlehn", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "P" |
| ], |
| "last": "Rose", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "W" |
| ], |
| "last": "Jordan", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Harter", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "AI Magazine", |
| "volume": "22", |
| "issue": "4", |
| "pages": "39--52", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Graesser, A. C., VanLehn, K., Rose, C. P., Jordan, P. W., and Harter, D. (2001). Intelligent Tutoring Systems with Conversational Dialogue. AI Magazine, 22(4):39-52.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Prediction and Semantic Association", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "L" |
| ], |
| "last": "Griffiths", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Steyvers", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "NIPS'02", |
| "volume": "", |
| "issue": "", |
| "pages": "11--18", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Griffiths, T. L. and Steyvers, M. (2002). Prediction and Semantic Association. In NIPS'02, pages 11-18.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Shallow Semantic Parsing Using Support Vector Machines", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Hacioglu", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "H" |
| ], |
| "last": "Martin", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hacioglu, K., Pradhan, S., Ward, W., Martin, J. H., and Jurafsky, D. (2003). Shallow Semantic Parsing Using Support Vector Machines. In Technical Report TR-CSLR-2003-03, University of Colorado.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Extracting Simplified Statements for Factual Question Generation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Heilman", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Third Workshop on Question Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heilman, M. and Smith, N. A. (2010a). Extracting Simplified Statements for Factual Question Generation. In Proceedings of the Third Workshop on Question Generation.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Good Question! Statistical Ranking for Question Generation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Heilman", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "609--617", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Heilman, M. and Smith, N. A. (2010b). Good Question! Statistical Ranking for Question Generation. In Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics, pages 609-617.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Experiments with Interactive Question-Answering", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Hickl", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Lehmann", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Harabagiu", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL'05)", |
| "volume": "", |
| "issue": "", |
| "pages": "60--69", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hickl, A., Lehmann, J., Williams, J., and Harabagiu, A. (2005). Experiments with Interactive Question-Answering. In In Proceedings of the 43rd Annual Meeting of the Association for Computational Linguistics (ACL'05), pages 60-69.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "NTT's Multiple Document Summarization System for DUC2003", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Hirao", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Suzuki", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Isozaki", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Maeda", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the Document Understanding Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hirao, T., Suzuki, J., Isozaki, H., and Maeda, E. (2003). NTT's Multiple Document Summariza- tion System for DUC2003. In Proceedings of the Document Understanding Conference.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Dependency-based Sentence Alignment for Multiple Document Summarization", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Hirao", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Suzuki", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Isozaki", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Maeda", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of COLING 2004", |
| "volume": "", |
| "issue": "", |
| "pages": "446--452", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hirao, T., Suzuki, J., Isozaki, H., and Maeda, E. (2004). Dependency-based Sentence Alignment for Multiple Document Summarization. In Proceedings of COLING 2004, pages 446-452, Geneva, Switzerland. COLING.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Generating High Quality Questions from Low Quality Questions", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Ignatova", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Bernhard", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Workshop on the Question Generation Shared Task and Evaluation Challenge", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ignatova, K., Bernhard, D., and Gurevych, I. (2008). Generating High Quality Questions from Low Quality Questions. In Proceedings of the Workshop on the Question Generation Shared Task and Evaluation Challenge, Arlington, VA. NSF.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "From Treebank to PropBank", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Kingsbury", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the International Conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kingsbury, P. and Palmer, M. (2002). From Treebank to PropBank. In Proceedings of the International Conference on Language Resources and Evaluation, Las Palmas, Spain.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Towards Natural Question Guided Search", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Kotov", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 19th international conference on World wide web, WWW '10", |
| "volume": "", |
| "issue": "", |
| "pages": "541--550", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kotov, A. and Zhai, C. (2010). Towards Natural Question Guided Search. In Proceedings of the 19th international conference on World wide web, WWW '10, pages 541-550. ACM.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "The Measurement of Observer Agreement for Categorical Data", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "R" |
| ], |
| "last": "Landis", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [ |
| "G" |
| ], |
| "last": "Koch", |
| "suffix": "" |
| } |
| ], |
| "year": 1977, |
| "venue": "Biometrics", |
| "volume": "33", |
| "issue": "1", |
| "pages": "159--174", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Landis, J. R. and Koch, G. G. (1977). The Measurement of Observer Agreement for Categorical Data. Biometrics, 33(1):159-174.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Questions and Information Systems", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [ |
| "W" |
| ], |
| "last": "Lauer", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Peacock", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "C" |
| ], |
| "last": "Graesser", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lauer, T. W., Peacock, E., and Graesser, A. C. (1992). Questions and Information Systems.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Learning Question Classifiers: The Role of Semantic Information", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Journal of Natural Language Engineering", |
| "volume": "12", |
| "issue": "3", |
| "pages": "229--249", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li, X. and Roth, D. (2006). Learning Question Classifiers: The Role of Semantic Information. Journal of Natural Language Engineering, 12(3):229-249.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Automatic Question Generation from Queries", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Workshop on the Question Generation Shared Task and Evaluation Challenge", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lin, C. Y. (2008). Automatic Question Generation from Queries. In Proceedings of the Workshop on the Question Generation Shared Task and Evaluation Challenge, Arlington, VA. NSF.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "The Automated Acquisition of Topic Signatures for Text Summarization", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "H" |
| ], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of the 18th conference on Computational linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "495--501", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lin, C. Y. and Hovy, E. H. (2000). The Automated Acquisition of Topic Signatures for Text Summarization. In Proceedings of the 18th conference on Computational linguistics, pages 495-501.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Text Classification using String Kernels", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Lodhi", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Saunders", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Shawe-Taylor", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Cristianini", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Watkins", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "2", |
| "issue": "", |
| "pages": "419--444", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lodhi, H., Saunders, C., Shawe-Taylor, J., Cristianini, N., and Watkins, C. (2002). Text Classification using String Kernels. Journal of Machine Learning Research, 2:419-444.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Question Generation from Paragraphs at Upenn", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Mannem", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Third Workshop on Question Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mannem, P., Prasad, R., and Joshi., A. (2010). Question Generation from Paragraphs at Upenn. In Proceedings of the Third Workshop on Question Generation.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "MALLET: A Machine Learning for Language Toolkit", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "K" |
| ], |
| "last": "Mccallum", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "McCallum, A. K. (2002). MALLET: A Machine Learning for Language Toolkit.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "A New Approach to Ranking Over-Generated Questions", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [ |
| "C" |
| ], |
| "last": "Mcconnell", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Mannem", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the AAAI Fall Symposium on Question Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "McConnell, C. C., Mannem, P., Prasad, R., and Joshi, A. (2011). A New Approach to Ranking Over-Generated Questions. In Proceedings of the AAAI Fall Symposium on Question Generation.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "A Web-based Testing System with Dynamic Question Generation", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Mcgough", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Mortensen", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Fadali", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "ASEE/IEEE Frontiers in Education Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "McGough, J., Mortensen, J., Johnson, J., and Fadali, S. (2001). A Web-based Testing System with Dynamic Question Generation. In ASEE/IEEE Frontiers in Education Conference.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Using LDA to Detect Semantically Incoherent Documents", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Misra", |
| "suffix": "" |
| }, |
| { |
| "first": "O", |
| "middle": [], |
| "last": "Capp\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Yvon", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Twelfth Conference on Computational Natural Language Learning, CoNLL '08", |
| "volume": "", |
| "issue": "", |
| "pages": "41--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Misra, H., Capp\u00e9, O., and Yvon, F. (2008). Using LDA to Detect Semantically Incoherent Documents. In Proceedings of the Twelfth Conference on Computational Natural Language Learning, CoNLL '08, pages 41-48. ACL.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Computer-aided Generation of Multiple-Choice Tests", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mitkov", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "A" |
| ], |
| "last": "Ha", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the HLT-NAACL 03 workshop on Building educational applications using natural language processing", |
| "volume": "2", |
| "issue": "", |
| "pages": "17--22", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mitkov, R. and Ha, L. A. (2003). Computer-aided Generation of Multiple-Choice Tests. In Proceedings of the HLT-NAACL 03 workshop on Building educational applications using natural language processing -Volume 2, pages 17-22.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "A Tree Kernel Approach to Question and Answer Classification in Question Answering Systems", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Moschitti", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Basili", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 5th International Conference on Language Resources and Evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Moschitti, A. and Basili, R. (2006). A Tree Kernel Approach to Question and Answer Classifi- cation in Question Answering Systems. In Proceedings of the 5th International Conference on Language Resources and Evaluation, Genoa, Italy.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Exploiting Syntactic and Shallow Semantic Kernels for Question/Answer Classificaion", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Moschitti", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Quarteroni", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Basili", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Manandhar", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "776--783", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Moschitti, A., Quarteroni, S., Basili, R., and Manandhar, S. (2007). Exploiting Syntactic and Shallow Semantic Kernels for Question/Answer Classificaion. In Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics, pages 776-783, Prague, Czech Republic. ACL.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Question Generation from Concept Maps", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "M" |
| ], |
| "last": "Olney", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "C" |
| ], |
| "last": "Graesser", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "K" |
| ], |
| "last": "Person", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Dialogue and Discourse", |
| "volume": "3", |
| "issue": "2", |
| "pages": "75--99", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Olney, A. M., Graesser, A. C., and Person, N. K. (2012). Question Generation from Concept Maps. Dialogue and Discourse, 3(2):75-99.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "The Proposition Bank: An Annotated Corpus of Semantic Roles", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Gildea", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Kingsbury", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Computational Linguistics", |
| "volume": "31", |
| "issue": "1", |
| "pages": "71--106", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Palmer, M., Gildea, D., and Kingsbury, P. (2005). The Proposition Bank: An Annotated Corpus of Semantic Roles. Computational Linguistics, 31(1):71-106.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Answer Mining from On-Line Documents", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Pasca", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "M" |
| ], |
| "last": "Harabagiu", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of the Association for Computational Linguistics 39th Annual Meeting and 10th Conference of the European Chapter Workshop on Open-Domain Question Answering", |
| "volume": "", |
| "issue": "", |
| "pages": "38--45", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pasca, M. and Harabagiu, S. M. (2001). Answer Mining from On-Line Documents. In Proceedings of the Association for Computational Linguistics 39th Annual Meeting and 10th Conference of the European Chapter Workshop on Open-Domain Question Answering, pages 38-45, Toulouse, France.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "A Probabilistic Answer Type Model", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Pinchak", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 11th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "393--400", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pinchak, C. and Lin, D. (2006). A Probabilistic Answer Type Model. In Proceedings of the 11th Conference of the European Chapter of the Association for Computational Linguistics, pages 393-400.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Design Challenges and Misconceptions in Named Entity Recognition", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Ratinov", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Thirteenth Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "147--155", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ratinov, L. and Roth, D. (2009). Design Challenges and Misconceptions in Named Entity Recognition. In Proceedings of the Thirteenth Conference on Computational Natural Language Learning, pages 147-155. ACL.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Experiments on Generating Questions About Facts", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Rus", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "C" |
| ], |
| "last": "Graesser", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 8th International Conference on Computational Linguistics and Intelligent Text Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "444--455", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rus, V., Cai, Z., and Graesser, A. C. (2007). Experiments on Generating Questions About Facts. In Proceedings of the 8th International Conference on Computational Linguistics and Intelligent Text Processing, pages 444-455. Springer-Verlag.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "The Question Generation Shared Task and Evaluation Challenge", |
| "authors": [ |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Rus", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "C" |
| ], |
| "last": "Graesser", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Workshop on the Question Generation Shared Task and Evaluation Challenge, Final Report", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rus, V. and Graesser, A. C. (2009). The Question Generation Shared Task and Evaluation Challenge. In Workshop on the Question Generation Shared Task and Evaluation Challenge, Final Report, The University of Memphis. National Science Foundation.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "The Importance of Being Important: Question Generation", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Vanderwende", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Workshop on the Question Generation Shared Task and Evaluation Challenge", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vanderwende, L. (2008). The Importance of Being Important: Question Generation. In Proceedings of the Workshop on the Question Generation Shared Task and Evaluation Challenge, Arlington, VA. NSF.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Automatic Question Generation for Learning Evaluation in Medicine", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Tianyong", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Wenyin", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "LNCS", |
| "volume": "4823", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wang, W., Tianyong, H., and Wenyin, L. (2008). Automatic Question Generation for Learning Evaluation in Medicine. In LNCS Volume 4823.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "LDA-based Document Models for Ad-hoc Retrieval", |
| "authors": [ |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "B" |
| ], |
| "last": "Croft", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of the 29th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '06", |
| "volume": "", |
| "issue": "", |
| "pages": "178--185", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei, X. and Croft, W. B. (2006). LDA-based Document Models for Ad-hoc Retrieval. In Proceedings of the 29th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '06, pages 178-185. ACM.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Question Classification using Support Vector Machines", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the Special Interest Group on Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "26--32", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhang, A. and Lee, W. (2003). Question Classification using Support Vector Machines. In Proceedings of the Special Interest Group on Information Retrieval, pages 26-32, Toronto, Canada. ACM.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "K2Q: Generating Natural Language Questions from Keywords with User Refinements", |
| "authors": [ |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Si", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 5th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "947--955", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zheng, Z., Si, X., Chang, E. Y., and Zhu, X. (2011). K2Q: Generating Natural Language Questions from Keywords with User Refinements. In Proceedings of the 5th International Joint Conference on Natural Language Processing, pages 947-955.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "text": "(a) An example tree (b)", |
| "uris": null |
| }, |
| "TABREF0": { |
| "html": null, |
| "text": "Apple 's first logo] is [TARGET designed ] [ARG0 by Jobs and Wayne]. The output contains one verb (predicate) with its arguments (i.e. semantic roles). These arguments are used to generate specific questions from the sentences. For example, we can replace [ARG1 ..] with What and generate a question as: \"What is designed by Jobs and Wayne?\". Similarly, [ARG0 .", |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "text": "Semantic roles with possible question words", |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "html": null, |
| "text": "Topic relevance and syntactic correctness scores Acceptability Test In another evaluation setting, the three annotators judge the questions for their overall acceptability as a good question. If a question shows no deficiency in terms of the criteria considered for topic relevance and syntactic correctness, it is termed as acceptable. We evaluate the top 15% and top 30% questions separately for each QG system and report the results indicating the percentage of questions rated as acceptable inTable 4. The results indicate that the percentage of the questions rated acceptable is reduced when we evaluate more number of questions which proves the effectiveness of our QG system.", |
| "content": "<table><tr><td>Systems</td><td colspan=\"2\">Top 15%</td><td>Top 30%</td></tr><tr><td>Baseline1 (No Ranking)</td><td/><td>35.2</td><td>32.6</td></tr><tr><td>Baseline2 (Topic Signature)</td><td/><td>45.9</td><td>33.8</td></tr><tr><td colspan=\"2\">State-of-the-art (Heilman and Smith, 2010b)</td><td>44.7</td><td>38.5</td></tr><tr><td>Proposed QG System</td><td/><td>46.5</td><td>40.6</td></tr><tr><td colspan=\"4\">Table 4: Acceptability of the questions (in %)</td></tr><tr><td>Systems</td><td colspan=\"2\">Topic Relevance</td><td>Syntactic Correctness</td></tr><tr><td>Baseline1 (No Ranking)</td><td>3.20</td><td/><td>3.54</td></tr><tr><td>Baseline2 (Topic Signature)</td><td>3.80</td><td/><td>3.92</td></tr><tr><td>State-of-the-art (Heilman and Smith, 2010b)</td><td>4.01</td><td/><td>4.15</td></tr><tr><td>Proposed QG System</td><td>4.12</td><td/><td>4.25</td></tr></table>", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "html": null, |
| "text": "Topic relevance and syntactic correctness scores (narrowed focus)", |
| "content": "<table><tr><td>Systems</td><td>Top 15%</td><td>Top 30%</td></tr><tr><td>Baseline1 (No Ranking)</td><td>41.3</td><td>37.1</td></tr><tr><td>Baseline2 (Topic Signature)</td><td>53.5</td><td>43.6</td></tr><tr><td>State-of-the-art (Heilman and Smith, 2010b)</td><td>57.5</td><td>43.2</td></tr><tr><td>Proposed QG System</td><td/><td/></tr></table>", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF6": { |
| "html": null, |
| "text": "Systems Top-ranked questions Baseline2 Who presented Jobs with several different monochromatic themes for the bitten logo? What were conceived to make the logo more accessible? Who liked the logo? State-of-the-art Whose first logo depicts Sir Isaac Newton sitting under an apple tree? What depicts Sir Isaac Newton sitting under an apple tree? What did Janoff present Jobs with? Proposed QG System Who designed Apple's first logo? What was replaced by Rob Janoff 's \"rainbow Apple\"? What were conceived to make the logo more accessible?", |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF7": { |
| "html": null, |
| "text": "System output", |
| "content": "<table/>", |
| "num": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |