| { |
| "paper_id": "W06-0108", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T04:00:59.580837Z" |
| }, |
| "title": "Cluster-based Language Model for Sentence Retrieval in Chinese Question Answering", |
| "authors": [ |
| { |
| "first": "Youzheng", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "National Laboratory of Pattern Recognition Institute of Automation Chinese Academy of Sciences No", |
| "institution": "", |
| "location": { |
| "addrLine": "95 Zhongguancun East Road", |
| "postCode": "100080", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "yzwu@nlpr.ia.ac.cn" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "National Laboratory of Pattern Recognition Institute of Automation Chinese Academy of Sciences No", |
| "institution": "", |
| "location": { |
| "addrLine": "95 Zhongguancun East Road", |
| "postCode": "100080", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "jzhao@nlpr.ia.ac.cn" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "National Laboratory of Pattern Recognition Institute of Automation Chinese Academy of Sciences No", |
| "institution": "", |
| "location": { |
| "addrLine": "95 Zhongguancun East Road", |
| "postCode": "100080", |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "boxu@nlpr.ia.ac.cn" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Sentence retrieval plays a very important role in question answering system. In this paper, we present a novel cluster-based language model for sentence retrieval in Chinese question answering which is motivated in part by sentence clustering and language model. Sentence clustering is used to group sentences into clusters. Language model is used to properly represent sentences, which is combined with sentences model, cluster/topic model and collection model. For sentence clustering, we propose two approaches that are One-Sentence-Multi-Topics and One-Sentence-One-Topic respectively. From the experimental results on 807 Chinese testing questions, we can conclude that the proposed cluster-based language model outperforms over the standard language model for sentence retrieval in Chinese question answering.", |
| "pdf_parse": { |
| "paper_id": "W06-0108", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Sentence retrieval plays a very important role in question answering system. In this paper, we present a novel cluster-based language model for sentence retrieval in Chinese question answering which is motivated in part by sentence clustering and language model. Sentence clustering is used to group sentences into clusters. Language model is used to properly represent sentences, which is combined with sentences model, cluster/topic model and collection model. For sentence clustering, we propose two approaches that are One-Sentence-Multi-Topics and One-Sentence-One-Topic respectively. From the experimental results on 807 Chinese testing questions, we can conclude that the proposed cluster-based language model outperforms over the standard language model for sentence retrieval in Chinese question answering.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "To facilitate the answer extraction of question answering, the task of retrieval module is to find the most relevant passages or sentences to the question. So, the retrieval module plays a very important role in question answering system, which influences both the performance and the speed of question answering. In this paper, we mainly focus on the research of improving the performance of sentence retrieval in Chinese question answering.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Many retrieval approaches have been proposed for sentence retrieval in English question answering. For example, Ittycheriach [Ittycheriah, et al. 2002] and H. Yang [Hui Yang, et al. 2002] proposed vector space model. Andres [Andres, et al. 2004] and Vanessa proposed language model and translation model respectively. Compared to vector space model, language model is theoretically attractive and a potentially very effective probabilistic framework for researching information retrieval problems [Jian-Yun Nie. 2005] . However, language model for sentence retrieval is not mature yet, which has a lot of difficult problems that cannot be solved at present. For example, how to incorporate the structural information, how to resolve data sparseness problem. In this paper, we mainly focus on the research of the smoothing approach of language model because sparseness problem is more serious for sentence retrieval than for document retrieval.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 151, |
| "text": "[Ittycheriah, et al. 2002]", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 159, |
| "end": 187, |
| "text": "Yang [Hui Yang, et al. 2002]", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 224, |
| "end": 245, |
| "text": "[Andres, et al. 2004]", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 497, |
| "end": 517, |
| "text": "[Jian-Yun Nie. 2005]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "At present, the most popular smoothing approaches for language model are Jelinek-Mercer method, Bayesian smoothing using Dirichlet priors, absolute discounting and so on [C. Zhai, et al. 2001] . The main disadvantages of all these smoothing approaches are that each document model (which is estimated from each document) is interpolated with the same collection model (which is estimated from the whole collection) through a unified parameter. Therefore, it does not make any one particular document more probable than any other, on the condition that neither the documents originally contains the query term. In other word, if a document is relevant, but does not contain the query term, it is still no more probable, even though it may be topically related.", |
| "cite_spans": [ |
| { |
| "start": 174, |
| "end": 192, |
| "text": "Zhai, et al. 2001]", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As we know, most smoothing approaches of sentence retrieval in question answering are learned from document retrieval without many adaptations. In fact, question answering has some characteristics that are different from traditional document retrieval, which could be used to improve the performance of sentence retrieval. These characteristics lie in:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "1. The input of question answering is natural language question which is more unambiguous than query in traditional document retrieval.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "For traditional document retrieval, it's difficult to identify which kind of information the users want to know. For example, if the user submit the query {\u53d1\u660e/invent, \u7535\u8bdd/telephone}, search engine does not know what information is needed, who invented telephone, when telephone was invented, or other information. On the other hand, for question answering system, if the user submit the question {\u8c01\u53d1\u660e\u4e86\u7535\u8bdd\uff1f/who invented the telephone?}, it's easy to know that the user want to know the person who invented the telephone, but not other information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Although the first retrieved sentences are related to the question, they usually deal with one or more topics. That is, relevant sentences for a question may be distributed over several topics. Therefore, treating the question's words in retrieved sentences with different topics equally is unreasonable. One of the solutions is to organize the related sentences into several clusters, where a sentence can belong to about one or more clusters, each cluster is regarded as a topic. This is sentence clustering. Obviously, cluster and topic have the same meaning and can be replaced each other. In the other word, a particular entity type was expected for each question, and every special entity of that type found in a retrieved sentence was regarded as a cluster/topic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Candidate answers extracted according to the semantic category of the question's answer could be used for sentence clustering of question answering.", |
| "sec_num": "2." |
| }, |
| { |
| "text": "In this paper, we propose two novel approaches for sentence clustering. The main idea of the approaches is to conduct sentence clustering according to the candidate answers which are also considered as the names of the clusters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Candidate answers extracted according to the semantic category of the question's answer could be used for sentence clustering of question answering.", |
| "sec_num": "2." |
| }, |
| { |
| "text": "For example, given the question {\u8c01\u53d1\u660e\u4e86\u7535 \u8bdd\uff1f/who invented telephone?}, the top ten retrieved sentences and the corresponding candidate answers are shown as Based on the above analysis, this paper presents cluster-based language model for sentence retrieval of Chinese question answering. It differs from most of the previous approaches mainly as follows. 1. Sentence Clustering is conducted according to the candidate answers extracted from the top 1000 sentences. 2. The information of the cluster of the sentence, which is also called as topic, is incorporated into language model through aspect model. For sentence clustering, we propose two novel approaches that are One-Sentence-Multi-Topics and One-Sentence-One-Topic respectively. The experimental results show that the performances of cluster-based language model for sentence retrieval are improved significantly.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Candidate answers extracted according to the semantic category of the question's answer could be used for sentence clustering of question answering.", |
| "sec_num": "2." |
| }, |
| { |
| "text": "\u897f\u95e8\u5b50/ Siemens \u8d1d\u5c14/Bell \u7231\u8fea\u751f/ Edison S3 \u6700 \u8fd1 \uff0c \" \u79fb \u52a8 \u7535 \u8bdd \u4e4b \u7236 \" \u5e93 \u73c0 \u518d \u6b21 \u6210 \u4e3a \u516c \u4f17 \u7126 \u70b9 \u3002 /", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Candidate answers extracted according to the semantic category of the question's answer could be used for sentence clustering of question answering.", |
| "sec_num": "2." |
| }, |
| { |
| "text": "The framework of cluster-based language model for sentence retrieval is shown as Figure 1 . ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 81, |
| "end": 89, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Candidate answers extracted according to the semantic category of the question's answer could be used for sentence clustering of question answering.", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Language model for information retrieval is presented by Ponte & Croft in 1998 [J. Ponte, et al. 1998 ] which has more advantages than vector space model. After that, many improved models are proposed like J.F. Gao [J.F Gao, et al. 2004] , C. Zhai [C. Zhai, et al. 2001] , and so on. In 1999, Berger & Lafferty [A. Berger, et al. 1999] presented statistical translation model for information retrieval.", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 78, |
| "text": "Ponte & Croft in 1998", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 79, |
| "end": 101, |
| "text": "[J. Ponte, et al. 1998", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 211, |
| "end": 237, |
| "text": "Gao [J.F Gao, et al. 2004]", |
| "ref_id": null |
| }, |
| { |
| "start": 243, |
| "end": 270, |
| "text": "Zhai [C. Zhai, et al. 2001]", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 293, |
| "end": 335, |
| "text": "Berger & Lafferty [A. Berger, et al. 1999]", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Model for Information Retrieval", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The basic approach of language model for information retrieval is to model the process of generating query Q. The approach has two steps. 1. Constructing document model for each document in the collection; 2. Ranking the documents according to the probabilities p(Q|D). A classical unigram language model for IR could be expressed in equation 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Model for Information Retrieval", |
| "sec_num": "2" |
| }, |
| { |
| "text": "( ) ( ) \u220f Q w i i D | w p D | Q p \u2208 = (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Model for Information Retrieval", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where, w i is a query term, p(w i |D) is document model which represents terms distribution over document. Obviously, estimating the probability p(w i |D) is the key of document model. To solve the sparseness problem, Jelinek-Mercer is commonly used which could be expressed by equation (2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Model for Information Retrieval", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "( ) ( ) ( ) ( ) C | w p \u03b1 1 D | w p \u03b1 D | w p ML ML \u00d7 + \u00d7 = -", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Language Model for Information Retrieval", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where, p ML (w|D) and p ML (w|C) are document model and collection model respectively estimated via maximum likelihood. As described above, the disadvantages of standard language model is that it does not make any one particular document any more probable than any other, on the condition that neither the documents originally contain the query term. In the other word, if a document is relevant, but does not contain the query term, it is still no more probable, even though it may be topically related. Thus, the smoothing approaches based on standard language model are improper. In this paper, we propose a novel cluster-based language model to overcome it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language Model for Information Retrieval", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Note that document model p(w|D) in document retrieval is replace by p(w|S) called sentence model in sentence retrieval.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cluster-based Language Model for Sentence Retrieval", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The assumption of cluster-based language model for retrieval is that topic-related sentences tend to be relevant to the same query. So, incorporating the topic of sentences into language model can improve the performance of sentence retrieval based on standard language model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cluster-based Language Model for Sentence Retrieval", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The proposed cluster-based language model is a mixture model of three components, that are sentence model p ML (w|S) , cluster/topic model p_topic ML (w|T) and collection model p ML (w|C) . We can formulate our model as equation 3.", |
| "cite_spans": [ |
| { |
| "start": 111, |
| "end": 116, |
| "text": "(w|S)", |
| "ref_id": null |
| }, |
| { |
| "start": 182, |
| "end": 187, |
| "text": "(w|C)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cluster-based Language Model for Sentence Retrieval", |
| "sec_num": "3" |
| }, |
| { |
| "text": "( ) ( ) ( ) ( ) ( ) ( ) ( ) C w p \u03b2 1 T w p_topic \u03b2 \u03b1 1 S w p \u03b1 S | w p ML ML ML | \u00d7 + | \u00d7 \u00d7 + | \u00d7 = - - (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cluster-based Language Model for Sentence Retrieval", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In fact, the cluster-based language model can also be viewed as a two-stage smoothing approach. The cluster model is first smoothed using the collection model, and the sentence model is then smoothed with the smoothed cluster model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cluster-based Language Model for Sentence Retrieval", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this paper, the cluster model is in the form of term distribution over cluster/topic, associated with the distribution of clusters/topics over sentence, which can be expressed by equation 4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cluster-based Language Model for Sentence Retrieval", |
| "sec_num": "3" |
| }, |
| { |
| "text": "( ) ( ) ( ) \u2211 \u2208T t S t p t w p T w p_topic | | = | (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cluster-based Language Model for Sentence Retrieval", |
| "sec_num": "3" |
| }, |
| { |
| "text": "where, T is the set of clusters/topics. p_topic(w|T) is cluster model. p(t|S) is topic sentence distribution which means the distribution of topic over sentence. And p(w|t) is term topic distribution which means the term distribution over topics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cluster-based Language Model for Sentence Retrieval", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Before estimating the sentence model p(w|S), topic-related sentences should be organized into clusters/topics to estimate p(t|S) and p(w|t) probabilities. For sentence clustering, this paper presents two novel approaches that are One-Sentence-Multi-Topics and One-Sentence-One-Topic respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cluster-based Language Model for Sentence Retrieval", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The main idea of One-Sentence-Multi-Topics can be summarized as follows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "One-Sentence-Multi-Topics", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "For example, the sentence S5 in Table 1 includes two topics which are \"\u8d1d\u5c14\u53d1\u660e\u7535\u8bdd/Bell invented telephone\" and \"\u7231\u8fea\u751f\u53d1\u660e\u7535\u706f/Edison invented electric light\" respectively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 32, |
| "end": 39, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "If a sentence includes M different candidate answers, then the sentence consists of M different topics.", |
| "sec_num": "1." |
| }, |
| { |
| "text": "For example, the sentence S4 and S5 in Table 1 have the same topic \"\u8d1d\u5c14\u53d1\u660e\u7535\u8bdd/Bell invented telephone\" because both of sentences have the same candidate answer \"\u8d1d\u5c14/Bell\".", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 39, |
| "end": 46, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Different sentences have the same topic if two candidate answers are same.", |
| "sec_num": "2." |
| }, |
| { |
| "text": "Based on the above ideas, the result of sentence clustering based on One-Sentence-Multi-Topics is shown in Table 2 . Table 2 The Result of One-Sentence-Multi-Topics Sentence Clustering So, we could estimate term topic distribution using equation 5.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 107, |
| "end": 114, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 117, |
| "end": 124, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Different sentences have the same topic if two candidate answers are same.", |
| "sec_num": "2." |
| }, |
| { |
| "text": "\u8d1d\u5c14/Bell S1 S2 S4 S5 S6 S7 S8 \u897f\u95e8\u5b50/Siemens S2 \u7231\u8fea\u751f/Edison S2 S5 \u5e93\u73c0/Cooper S3 S8 S9 \u65af\u8482\u82ac/Stephen S10", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Name of Clusters Sentences", |
| "sec_num": null |
| }, |
| { |
| "text": "( ) ( ) ( ) \u2211 w' t , w' n t w n t w p , = | (5)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Name of Clusters Sentences", |
| "sec_num": null |
| }, |
| { |
| "text": "Topic sentence distribution can be estimated using equation 6and (7).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Name of Clusters Sentences", |
| "sec_num": null |
| }, |
| { |
| "text": "( ) \u2211 / / = | t st st kl 1 kl 1 S t p (6) ( ) ( ) ( ) ( ) \u2211 w ML ML ML st t | w p s w p log s | w p t s KL kl | \u00d7 = || = (7)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Name of Clusters Sentences", |
| "sec_num": null |
| }, |
| { |
| "text": "where, kl st means the Kullback-Leibler divergence between the sentence with the cluster/topic. k denotes the number of cluster/topic. The main idea of equation (6) is that the closer the Kullback-Leibler divergence, the larger the topic sentence probability p(t|S).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Name of Clusters Sentences", |
| "sec_num": null |
| }, |
| { |
| "text": "The main idea of One-Sentence-One-Topic also could be summarized as follows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "One-Sentence-One-Topic", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "For example, the kernel topic of sentence S5 in Table 1 is \"\u8d1d\u5c14\u53d1\u660e\u7535\u8bdd/Bell invented telephone\" though it includes three different candidate answers.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 48, |
| "end": 55, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "A sentence only has one kernel candidate answer which represents the kernel topic no matter how many candidate answers is included.", |
| "sec_num": "1." |
| }, |
| { |
| "text": "2. Different sentences have the same topic if two kernel candidate answers are same. For example, the sentence S4 and S5 in Table 1 have the same topic \"\u8d1d\u5c14\u53d1\u660e\u7535\u8bdd/Bell invented telephone\".", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 124, |
| "end": 131, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "A sentence only has one kernel candidate answer which represents the kernel topic no matter how many candidate answers is included.", |
| "sec_num": "1." |
| }, |
| { |
| "text": "Based on the above ideas, the result of sentence clustering based on One-Sentence-One-Topic is shown in Table 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 104, |
| "end": 111, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The kernel candidate answer has shortest average distance to all query terms.", |
| "sec_num": "3." |
| }, |
| { |
| "text": "Sentences \u8d1d\u5c14/Bell S1 S2 S4 S5 S6 S7 \u5e93\u73c0/Cooper S3 S8 S9 \u65af\u8482\u82ac/Stephen S10 Table 3 The Result of One-Sentence-One-Topic Sentence Clustering", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 71, |
| "end": 78, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Name of Clusters", |
| "sec_num": null |
| }, |
| { |
| "text": "Equation 8and (9) can be used to estimate the kernel candidate answer and the distances of candidate answers respectively. Term topic distribution in One-Sentence-One-Topic can be estimated via equation (5). And topic sentence distribution is equal to 1 because a sentence only belongs to one cluster/topic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Name of Clusters", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "{ } i i a a * i SemDis a argmin = (8) ( ) N q , a SemDis SemDis j j i a i \u2211 = (9) ( ) j i q a j i Position Position q a SemDis - = ,", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Name of Clusters", |
| "sec_num": null |
| }, |
| { |
| "text": "where, a i * is the kernel candidate answer. a i is the i-th candidate answer, i a SemDis is the average distance of i-th candidate answer. q j is the j-th query term, N is the number of all query terms. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Name of Clusters", |
| "sec_num": null |
| }, |
| { |
| "text": "Research on Chinese question answering, is still at its early stage. And there is no public evaluation platform for Chinese question answering. So in this paper, we use the evaluation environment presented by [Youzheng Wu, et al. 2004 ] which is similar to TREC question answering track [Ellen. M. Voorhees. 2004] . The documents collection is downloaded from Internet which size is 1.8GB. The testing questions are collected via four different approaches which has 7050 Chinese questions currently.", |
| "cite_spans": [ |
| { |
| "start": 209, |
| "end": 234, |
| "text": "[Youzheng Wu, et al. 2004", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 298, |
| "end": 313, |
| "text": "Voorhees. 2004]", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and Analysis", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In this section, we randomly select 807 testing questions which are fact-based short-answer questions. Moreover, the answers of all testing questions are named entities identified by [Youzheng Wu, et al. 2005] . Figure 2 gives the details. Note that, LOC, ORG, PER, NUM and TIM denote the questions which answer types are location, organization, person, number and time respectively, SUM means all question types. ", |
| "cite_spans": [ |
| { |
| "start": 183, |
| "end": 209, |
| "text": "[Youzheng Wu, et al. 2005]", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 212, |
| "end": 220, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments and Analysis", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Based on the standard language model for information retrieval, we can get the baseline performance, as is shown in Table 4 The Baseline MRR5 Performance In the following chapter, we conduct experiments to answer two questions.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 116, |
| "end": 123, |
| "text": "Table 4", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Baseline: Standard Language Model for Sentence Retrieval", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "1. Whether cluster-based language model for sentence retrieval could improve the performance of standard language model for sentence retrieval?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline: Standard Language Model for Sentence Retrieval", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "2. What are the performances of sentence clustering for various question types?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baseline: Standard Language Model for Sentence Retrieval", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In this part, we will conduct experiments to validate the performances of cluster-based language models which are based on One-Sentence-Multi-Topics and One-Sentence-One-Topic sentence clustering respectively. In the following experiments, \u03b2 = 0.9.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cluster-based Language Model for Sentence Retrieval", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The experimental results of cluster-based language model based on One-Sentence-Multi-Topics sentence clustering are shown in From the experimental results, we can find that by integrating the clusters/topics of the sentence into language model, we can achieve much improvement at each stage of \u03b1. For example, the largest and smallest improvements for all types of questions are about 7.7% and 2.8% respectively. This experiment shows that the proposed cluster-based language model based on One-Sentence-Multi-Topics is effective for sentence retrieval in Chinese question answering.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Cluster-based Language Model Based on One-Sentence-Multi-Topics", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "The performance of cluster-based language model based on One-Sentence-One-Topic sentence clustering is shown in Table 5 , we can find that the improvement of cluster-based language model based on One-Sentence-One-Topic is slightly lower than that of cluster-based language model based on One-Sentence-Multi-Topics. The reasons lie in that Clusters based on One-Sentence-One-Topic approach are very coarse and much information is lost. But the improvements over baseline system are obvious. Table 7 shows that MRR1 and MRR20 scores of cluster-based language models for all question types. The relative improvements over the baseline are listed in the bracket. This experiment is to validate whether the conclusion based on different measurements is consistent or not. Table 7 also shows that the performances of two cluster-based language models are higher than that of the baseline system under different measurements. For MRR1 scores, the largest improvements of cluster-based language models based on One-Sentence-Multi-Topics and One-Sentence-One-Topic are about 15% and 10% respectively. For MRR20, the largest improvements are about 7% and 4% respectively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 112, |
| "end": 119, |
| "text": "Table 5", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 490, |
| "end": 497, |
| "text": "Table 7", |
| "ref_id": null |
| }, |
| { |
| "start": 767, |
| "end": 774, |
| "text": "Table 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Cluster-based Language Model Based on One-Sentence-One-Topic", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "The experiments show that the proposed cluster-based language model can improve the performance of sentence retrieval in Chinese question answering under the various measurements. Moreover, the performance of clustering-based language model based on One-Sentence-Multi-Topics is better than that based on One-Sentence-One-Topic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion 1:", |
| "sec_num": null |
| }, |
| { |
| "text": "The parameter \u03b2 in equation 3denotes the balancing factor of the cluster model and the collection model. The larger \u03b2, the larger contribution of the cluster model. The small \u03b2, the larger contribution of the collection model. If the performance of sentence retrieval decreased with the increasing of \u03b2, it means that there are many noises in sentence clustering. Otherwise, sentence clustering is satisfactory for cluster-based language model. So the task of this experiment is to find the performances of sentence clustering for various question types, which is helpful to select the most proper \u03b2 to obtain the best performance of sentence retrieval. With the change of \u03b2 and the fixed \u03b1 (\u03b1 = 0.9), the performances of cluster-based language model based on One-Sentence-Multi-Topics are shown in Figure 3 . In Figure 3 , the performances of TIM and NUM type questions decreased with the increasing of the parameter \u03b2 (from 0.6 to 0.9), while the performances of LOC, PER and ORG type questions increased. This phenomenon showed that the performance of sentence clustering based on One-Sentence-Multi-Topics for TIM and NUM type questions is not as good as that for LOC, PER and ORG type questions. This is in fact reasonable. The number and time words frequently appeared in the sentence, which does not represent a cluster/topic when they appear. While PER, LOC and ORG entities can represent a topic when they appeared in the sentence.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 799, |
| "end": 807, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 813, |
| "end": 821, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Analysis of Sentence Clustering for Various Question Types", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Similarly, with the change of \u03b2 and the fixed \u03b1 (\u03b1=0.9), the performances of cluster-based language model based on One-Sentence-One-Topic are shown in Figure 4 . In Figure 4 , the performances of TIM, NUM, LOC and SUM type questions decreased with the increasing of \u03b2 (from 0.6 to 0.9). This phenomenon shows that the performances of sentence clustering based on One-Sentence-One-Topic are not satisfactory for most of question types. But, compared to the baseline system, the clusterbased language model based on this kind of sentence clustering can still improve the performances of sentence retrieval in Chinese question answering.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 151, |
| "end": 159, |
| "text": "Figure 4", |
| "ref_id": "FIGREF4" |
| }, |
| { |
| "start": 165, |
| "end": 173, |
| "text": "Figure 4", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Analysis of Sentence Clustering for Various Question Types", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The performance of the proposed sentence clustering based on One-Sentence-Multi-Topics for PER, LOC and ORG type questions is higher than that for TIM and NUM type questions. Thus, for PER, LOC and ORG questions, we should choose the larger \u03b2 value (about 0.9) in cluster-based language model based on One-Sentence-Multi-Topics. While for TIM and NUM type questions, the value of \u03b2 should be smaller (about 0.5). But, the performance of sentence clustering based on One-Sentence-One-Topic for all questions is not ideal, so the value for cluster-based language model based on One-Sentence-One-Topic should be smaller (about 0.5) for all questions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion 2:", |
| "sec_num": null |
| }, |
| { |
| "text": "The input of a question answering system is natural language question which contains richer information than the query in traditional document retrieval. Such richer information can be used in each module of question answering system. In this paper, we presented a novel clusterbased language model for sentence retrieval in Chinese question answering which combines the sentence model, the cluster/topic model and the collection model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For sentence clustering, we presented two approaches that are One-Sentence-Multi-Topics and One-Sentence-One-Topic respectively. The experimental results showed that the proposed cluster-based language model could improve the performances of sentence retrieval in Chinese question answering significantly.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "However, we only conduct sentence clustering for questions, which have the property that their answers are named entities in this paper. In the future work, we will focus on all other type questions and improve the performance of the sentence retrieval by introducing the structural, syntactic and semantic information into language model.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "A Language Modeling Approach to Information Retrieval", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Ponte", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "Bruce" |
| ], |
| "last": "Croft", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "the Proceedings of ACM SIGIR 1998", |
| "volume": "", |
| "issue": "", |
| "pages": "275--281", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Ponte, W. Bruce Croft. A Language Modeling Ap- proach to Information Retrieval. In the Proceedings of ACM SIGIR 1998, pp 275-281, 1998.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A Study of Smoothing Techniques for Language Modeling Applied to ad hoc Information Retrieval", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Zhai", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Lafferty", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "the Proceedings of the ACM SIGIR Conference on Research and Development in Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "C. Zhai, J. Lafferty. A Study of Smoothing Tech- niques for Language Modeling Applied to ad hoc Information Retrieval. In the Proceedings of the ACM SIGIR Conference on Research and Devel- opment in Information Retrieval, 2001.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "IBM's Statistical Question Answering System-TREC 11", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ittycheriah", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "the Eleventh Text Retrieval Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ittycheriah, S. Roukos. IBM's Statistical Question Answering System-TREC 11. In the Eleventh Text Retrieval Conference (TREC 2002), Gaithersburg, Maryland, November 2002.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The Integration of Lexical Knowledge and External Resources for Question Answering", |
| "authors": [ |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tat-Seng", |
| "middle": [], |
| "last": "Chua", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "the Proceedings of the Eleventh Text REtrieval Conference (TREC'2002)", |
| "volume": "", |
| "issue": "", |
| "pages": "155--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hui Yang, Tat-Seng Chua. The Integration of Lexical Knowledge and External Resources for Question Answering. In the Proceedings of the Eleventh Text REtrieval Conference (TREC'2002), Mary- land, USA, 2002, page 155-161.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Answer Passage Retrieval for Question Answering", |
| "authors": [ |
| { |
| "first": "Andres", |
| "middle": [], |
| "last": "Corrada-Emmanuel", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "Bruce" |
| ], |
| "last": "Croft", |
| "suffix": "" |
| }, |
| { |
| "first": "Vanessa", |
| "middle": [], |
| "last": "Murdock", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "the Proceedings of the 27th Annual International Conference on Research and Development in Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "516--517", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andres Corrada-Emmanuel, W.Bruce Croft, Vanessa Murdock. Answer Passage Retrieval for Question Answering. In the Proceedings of the 27th Annual International Conference on Research and Devel- opment in Information Retrieval, pp. 516 -517, 2004.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Overview of the TREC 2004 Question Answering Track", |
| "authors": [ |
| { |
| "first": "Ellen", |
| "middle": [ |
| "M" |
| ], |
| "last": "Voorhees", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Twelfth Text REtrieval Conference (TREC 2004)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellen M. Voorhees. Overview of the TREC 2004 Question Answering Track. In Proceedings of the Twelfth Text REtrieval Conference (TREC 2004), 2004.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Simple Translation Models for Sentence Retrieval in Factoid Question Answering", |
| "authors": [ |
| { |
| "first": "Vanessa", |
| "middle": [], |
| "last": "Murdock", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "Bruce" |
| ], |
| "last": "Croft", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "the Proceedings of the SIGIR 2004 Workshop on Information Retrieval for Question Answering", |
| "volume": "", |
| "issue": "", |
| "pages": "31--35", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vanessa Murdock, W. Bruce Croft. Simple Transla- tion Models for Sentence Retrieval in Factoid Question Answering. In the Proceedings of the SIGIR 2004 Workshop on Information Retrieval for Question Answering, pp.31-35, 2004.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Probabilistic Latent Semantic Indexing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Hofmann", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "the Proceedings of the Twenty-Second Annual International SIGIR Conference on Research and Development in Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Hofmann. Probabilistic Latent Semantic In- dexing. In the Proceedings of the Twenty-Second Annual International SIGIR Conference on Re- search and Development in Information Retrieval, 1999.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Information Retrieval as Statistical Translation", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Berger", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Lafferty", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "the Proceedings of ACM SIGIR-1999", |
| "volume": "", |
| "issue": "", |
| "pages": "222--229", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Berger and J. Lafferty. Information Retrieval as Statistical Translation. In the Proceedings of ACM SIGIR-1999, pp. 222-229, Berkeley, CA, August 1999.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A noisy-channel approach to question answering", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Echihabi", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Marcu", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "the Proceeding of the 41st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Echihabi and D.Marcu. A noisy-channel approach to question answering. In the Proceeding of the 41st Annual Meeting of the Association for Com- putational Linguistics, Sappora, Japan, 2003.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Topic Based Language Models for ad hoc Information Retrieval", |
| "authors": [ |
| { |
| "first": "Leif", |
| "middle": [], |
| "last": "Azzopardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Girolami", |
| "suffix": "" |
| }, |
| { |
| "first": "Keith", |
| "middle": [], |
| "last": "Van Rijsbergen", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "the Proceeding of IJCNN 2004 & FUZZ-IEEE 2004", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Leif Azzopardi, Mark Girolami and Keith van Rijsbergen. Topic Based Language Models for ad hoc Information Retrieval. In the Proceeding of IJCNN 2004 & FUZZ-IEEE 2004, July 25-29, 2004, Budapest, Hungary.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Integrating Term Relationships into Language Models for Information Retrieval", |
| "authors": [ |
| { |
| "first": "Jian-Yun", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jian-Yun Nie. Integrating Term Relationships into Language Models for Information Retrieval. Re- port at ICT-CAS.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Dependence language model for information retrieval", |
| "authors": [ |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian-Yun", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| }, |
| { |
| "first": "Guangyuan", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Guihong", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "SIGIR-2004", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jianfeng Gao, Jian-Yun Nie, Guangyuan Wu and Guihong Cao. 2004b. Dependence language model for information retrieval. In SIGIR-2004. Sheffield, UK, July 25-29.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Chinese Named Entity Recognition Model Based on Multiple Features", |
| "authors": [ |
| { |
| "first": "Youzheng", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "the Proceeding of HLT/EMNLP 2005", |
| "volume": "", |
| "issue": "", |
| "pages": "427--434", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Youzheng Wu, Jun Zhao, Bo Xu. Chinese Named Entity Recognition Model Based on Multiple Fea- tures. In the Proceeding of HLT/EMNLP 2005, Vancouver, B.C., Canada, pp.427-434, 2005.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Building an Evaluation Platform for Chinese Question Answering Systems", |
| "authors": [ |
| { |
| "first": "Youzheng", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiangyu", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceeding of the First National Conference on Information Retrieval and Content Security", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Youzheng Wu, Jun Zhao, Xiangyu Duan and Bo Xu. Building an Evaluation Platform for Chinese Ques- tion Answering Systems. In Proceeding of the First National Conference on Information Retrieval and Content Security. Shanghai, China, December, 2004.(In Chinese)", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "text": "The Framework of Cluster-based Language Model for Sentence Retrieval", |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "uris": null, |
| "text": "position of query term q j and candidate answer a i .", |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "num": null, |
| "uris": null, |
| "text": "The Distribution of Various Question Types over Testing Questions Chinese question answering system is to return a ranked list of five answer sentences per question and will be strictly evaluated (unsupported answers counted as wrong) using mean reciprocal rank (MRR).", |
| "type_str": "figure" |
| }, |
| "FIGREF3": { |
| "num": null, |
| "uris": null, |
| "text": "MRR5 Performances of Cluster-based Language Model Based on One-Sentence-Multi-Topics with the Change of \u03b2", |
| "type_str": "figure" |
| }, |
| "FIGREF4": { |
| "num": null, |
| "uris": null, |
| "text": "MRR5 Performance of Cluster-based Language Model Based on One-Sentence-One-Topic with the Change of \u03b2", |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td>. Thus, we can</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "" |
| }, |
| "TABREF1": { |
| "content": "<table><tr><td/><td>\u5e93\u73c0\u8868\u793a\uff0c\u6d88\u8d39\u8005\u91c7\u7eb3\u79fb\u52a8\u7535\u8bdd\u7684\u901f\u5ea6\u4e4b\u5feb\u4ee4\u4ed6\u610f\u5916\uff0c</td><td/></tr><tr><td/><td>\u4f46\u79fb\u52a8\u7535\u8bdd\u7684\u666e\u53ca\u7387\u8fd8\u6ca1\u6709\u8fbe\u5230\u65e0\u6240\u4e0d\u5728\uff0c\u8fd9\u8ba9\u4ed6\u6709\u4e9b</td><td/></tr><tr><td>S9</td><td>\u5931\u671b\u3002/Cooper said, he was surprised at the speed that the consumers switched to mobile phones; but the populariza-</td><td>\u5e93\u73c0/Cooper</td></tr><tr><td/><td>tion of mobile phone isn't omnipresent, which made him a</td><td/></tr><tr><td/><td>little bit disappointed.</td><td/></tr><tr><td/><td>\u82f1\u56fd\u53d1\u660e\u5bb6\u65af\u8482\u82ac\u5c06\u79fb\u52a8\u7535\u8bdd\u7684\u6240\u6709\u7535\u5b50\u5143\u4ef6\u8bbe\u8ba1\u5728\u4e00</td><td/></tr><tr><td>S10</td><td>\u5f20\u7eb8\u4e00\u6837\u539a\u8584\u7684\u82af\u7247\u4e0a\u3002/England inventor Stephen de-signed the paper-clicked CMOS chip which included all</td><td>\u65af\u8482\u82ac/Stephen</td></tr><tr><td/><td>electronic components.</td><td/></tr><tr><td/><td colspan=\"2\">Table 1 The Top 10 Retrieved Sentences and its Candidate Answers</td></tr><tr><td/><td/><td>\u5e93\u73c0/Cooper</td></tr><tr><td>S4</td><td>1876 \u5e74\uff0c\u53d1\u660e\u5bb6\u8d1d\u5c14\u53d1\u660e\u4e86\u7535\u8bdd\u3002/In 1876, Bell in-vented telephone.</td><td>\u8d1d\u5c14/Bell</td></tr><tr><td/><td>\u63a5\u7740\uff0c1876 \u5e74\uff0c\u7f8e\u56fd\u79d1\u5b66\u5bb6\u8d1d\u5c14\u53d1\u660e\u4e86\u7535\u8bdd\uff1b1879 \u5e74</td><td/></tr><tr><td>S5</td><td>\u7f8e\u56fd\u79d1\u5b66\u5bb6\u7231\u8fea\u751f\u53d1\u660e\u4e86\u7535\u706f\u3002/Subsequently, American scientist Bell invented the phone in 1876; Edison invented</td><td>\u8d1d\u5c14/Bell \u7231\u8fea\u751f/Edison</td></tr><tr><td/><td>the electric light in 1879.</td><td/></tr><tr><td>S6</td><td>1876 \u5e74 3 \u6708 7 \u65e5\uff0c\u8d1d\u5c14\u6210\u4e3a\u7535\u8bdd\u53d1\u660e\u7684\u4e13\u5229\u4eba\u3002/On March 7th, 1876, Bell became the patentee of telephone.</td><td>\u8d1d\u5c14/Bell</td></tr><tr><td/><td>\u8d1d\u5c14\u4e0d\u4ec5\u53d1\u660e\u4e86\u7535\u8bdd\uff0c\u8fd8\u6210\u529f\u5730\u5efa\u7acb\u4e86\u81ea\u5df1\u7684\u516c\u53f8\u63a8\u5e7f</td><td/></tr><tr><td>S7</td><td>\u7535\u8bdd\u3002/Bell not only invented telephone, but also estab-</td><td>\u8d1d\u5c14/Bell</td></tr><tr><td/><td>lished his own company for spreading his invention.</td><td/></tr><tr><td/><td>\u5728\u9996\u53ea\u79fb\u52a8\u7535\u8bdd\u6295\u5165\u4f7f\u7528 30 \u5e74\u4ee5\u540e\uff0c\u5176\u53d1\u660e\u4eba\u5e93\u73c0\u4ecd</td><td/></tr><tr><td>S8</td><td>\u68a6\u60f3\u7740\u672a\u6765\u7535\u8bdd\u6280\u672f\u5b9e\u73b0\u4e4b\u65e5\u5230\u6765\u3002/Thirty years after the invention of first mobile phone, Cooper still anticipated</td><td>\u5e93\u73c0/Cooper</td></tr><tr><td/><td>the date of the realization of future phone's technology.</td><td/></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "" |
| }, |
| "TABREF3": { |
| "content": "<table><tr><td>, where \u03b1 is the</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "" |
| }, |
| "TABREF4": { |
| "content": "<table><tr><td>.</td></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "" |
| }, |
| "TABREF5": { |
| "content": "<table><tr><td>\u03b1</td><td>0.6</td><td>0.7</td><td>0.8</td><td>0.9</td></tr><tr><td>LOC</td><td>53.02 (+6.15)</td><td>54.27 (+5.38)</td><td>56.14 (+6.67)</td><td>56.28 (+3.19)</td></tr><tr><td>ORG</td><td>58.75 (+9.42)</td><td>58.75 (+17.2)</td><td>59.46 (+18.6)</td><td>59.46 (+16.6)</td></tr><tr><td>PER</td><td>66.57 (+5.50)</td><td>67.07 (+4.11)</td><td>67.44 (+2.27)</td><td>67.29 (+2.44)</td></tr><tr><td>NUM</td><td>49.95 (+3.14)</td><td>50.87 (+2.02)</td><td>52.15 (+0.71)</td><td>53.51 (+0.47)</td></tr><tr><td>TIM</td><td>59.75 (+4.88)</td><td>60.65 (+3.89)</td><td>62.71 (+6.70)</td><td>62.20 (+1.15)</td></tr><tr><td>SUM</td><td>56.48 (+4.63)</td><td>57.65 (+4.29)</td><td>58.82 (+4.29)</td><td>59.22 (+2.23)</td></tr><tr><td colspan=\"5\">Table 6 MRR5 Performance of Cluster-based</td></tr><tr><td colspan=\"5\">Language Model Based on One-Sentence-One-</td></tr><tr><td/><td/><td>Topic</td><td/><td/></tr><tr><td colspan=\"3\">In Comparison with</td><td/><td/></tr></table>", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "text": "The relative improvements are listed in the bracket." |
| } |
| } |
| } |
| } |