| { |
| "paper_id": "C16-1004", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:04:45.499349Z" |
| }, |
| "title": "A Redundancy-Aware Sentence Regression Framework for Extractive Summarization", |
| "authors": [ |
| { |
| "first": "Pengjie", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Shandong University / Jinan", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Research Asia", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "fuwei@microsoft.com" |
| }, |
| { |
| "first": "Zhumin", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Shandong University / Jinan", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "chenzhumin@sdu.edu.cn" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Shandong University / Jinan", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "majun@sdu.edu.cn" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Microsoft Research Asia", |
| "location": { |
| "settlement": "Beijing", |
| "country": "China" |
| } |
| }, |
| "email": "mingzhou@microsoft.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Existing sentence regression methods for extractive summarization usually model sentence importance and redundancy in two separate processes. They first evaluate the importance f (s) of each sentence s and then select sentences to generate a summary based on both the importance scores and redundancy among sentences. In this paper, we propose to model importance and redundancy simultaneously by directly evaluating the relative importance f (s|S) of a sentence s given a set of selected sentences S. Specifically, we present a new framework to conduct regression with respect to the relative gain of s given S calculated by the ROUGE metric. Besides the single sentence features, additional features derived from the sentence relations are incorporated. Experiments on the DUC 2001, 2002 and 2004 multi-document summarization datasets show that the proposed method outperforms state-of-the-art extractive summarization approaches.", |
| "pdf_parse": { |
| "paper_id": "C16-1004", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Existing sentence regression methods for extractive summarization usually model sentence importance and redundancy in two separate processes. They first evaluate the importance f (s) of each sentence s and then select sentences to generate a summary based on both the importance scores and redundancy among sentences. In this paper, we propose to model importance and redundancy simultaneously by directly evaluating the relative importance f (s|S) of a sentence s given a set of selected sentences S. Specifically, we present a new framework to conduct regression with respect to the relative gain of s given S calculated by the ROUGE metric. Besides the single sentence features, additional features derived from the sentence relations are incorporated. Experiments on the DUC 2001, 2002 and 2004 multi-document summarization datasets show that the proposed method outperforms state-of-the-art extractive summarization approaches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Sentence regression is one of the branches of extractive summarization methods that achieves state-ofthe-art performances (Cao et al., 2015b; Wan et al., 2015) and is commonly used in practical systems (Hu and Wan, 2013; Wan and Zhang, 2014; Hong and Nenkova, 2014) . Existing sentence regression methods usually model sentence importance and sentence redundancy in two separate processes, namely sentence ranking and sentence selection. Specifically, in the sentence ranking process, they evaluate the importance f (s) of each sentence s with a ranking model (Osborne, 2002; Conroy et al., 2004; Galley, 2006; through either directly measuring the salience of sentences or firstly ranking words (or bi-grams) and then combining these scores to rank sentences (Lin and Hovy, 2000; Yih et al., 2007; Gillick and Favre, 2009; Li et al., 2013) . Then, in the sentence selection process, they discard the redundant sentences that are similar to the already selected sentences.", |
| "cite_spans": [ |
| { |
| "start": 122, |
| "end": 141, |
| "text": "(Cao et al., 2015b;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 142, |
| "end": 159, |
| "text": "Wan et al., 2015)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 202, |
| "end": 220, |
| "text": "(Hu and Wan, 2013;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 221, |
| "end": 241, |
| "text": "Wan and Zhang, 2014;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 242, |
| "end": 265, |
| "text": "Hong and Nenkova, 2014)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 560, |
| "end": 575, |
| "text": "(Osborne, 2002;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 576, |
| "end": 596, |
| "text": "Conroy et al., 2004;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 597, |
| "end": 610, |
| "text": "Galley, 2006;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 760, |
| "end": 780, |
| "text": "(Lin and Hovy, 2000;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 781, |
| "end": 798, |
| "text": "Yih et al., 2007;", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 799, |
| "end": 823, |
| "text": "Gillick and Favre, 2009;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 824, |
| "end": 840, |
| "text": "Li et al., 2013)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we propose a novel regression framework to directly model the relative importance f (s|S) of a sentence s given the sentences S. Specifically, we evaluate the relative importance f (s|S) with a regression model where additional features involving the sentence relations are incorporated. Then we generate the summary by greedily selecting the next sentence which maximizes f (s|S) with respect to the current selected sentences S. Our method improves the existing regression framework from three aspects. First, our method is redundancy-aware by considering importance and redundancy simultaneously instead of two separate processes. Second, we treat the scores computed using the official evaluation tool as the groundtruth and find that our method has a higher upper bound. Third, there is no manually tuned parameters, which is more convenient in practice. We carry out experiments on three benchmark datasets from DUC 2001 DUC , 2002 DUC , and 2004 multi-document summarization tasks. Experimental results show that our method achieves the best performance in terms of ROUGE-2 recall metric and outperforms state-of-the-art extractive summarization approaches on all three datasets.", |
| "cite_spans": [ |
| { |
| "start": 933, |
| "end": 941, |
| "text": "DUC 2001", |
| "ref_id": null |
| }, |
| { |
| "start": 942, |
| "end": 952, |
| "text": "DUC , 2002", |
| "ref_id": null |
| }, |
| { |
| "start": 953, |
| "end": 967, |
| "text": "DUC , and 2004", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Formally, given a sentence set (from one or multiple documents) D \u2208 C, extractive summarization tries to select a sentence set S * as the summary that maximizes an utility function f (S) with respective to the length limit l, Existing sentence regression methods usually model the importance of each sentence independently (Osborne, 2002; Galley, 2006; . Then, they use a threshold parameter to control the redundancy (Cao et al., 2015b; Galanis et al., 2012) when selecting sentences with the Greedy algorithm or Integer Linear Programming (ILP) algorithm (Cao et al., 2015a) . The framework for these regression methods can be formulated as follows.", |
| "cite_spans": [ |
| { |
| "start": 323, |
| "end": 338, |
| "text": "(Osborne, 2002;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 339, |
| "end": 352, |
| "text": "Galley, 2006;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 418, |
| "end": 437, |
| "text": "(Cao et al., 2015b;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 438, |
| "end": 459, |
| "text": "Galanis et al., 2012)", |
| "ref_id": null |
| }, |
| { |
| "start": 557, |
| "end": 576, |
| "text": "(Cao et al., 2015a)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "f (s|S) = { f (s) 1 \u2212 SIM (s, S) \u2265 t 0 1 \u2212 SIM (s, S) < t (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where S is the set of already selected sentences, f (s) models the importance of sentence s. SIM (s, S) evaluates the similarity of sentence s with the current generated summary S. Usually, SIM (s,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "S) = bi-gram-overlap(s,S) Len(s)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": ", which is the bi-gram overlap ratio. Len(s) is the length of s. t is a threshold parameter used to control the redundancy, which is usually set heuristically.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "In this paper, we propose to directly model the relative importance f (s|S) instead of the independent importance of each sentence f (s). Specially, we model the importance of s given the sentences S as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Framework", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "f (s|S) = min s \u2032 \u2208S f (s|s \u2032 )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Our Framework", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "which considers the minimum relative importance of sentence s with respect to each sentence of S. f (s|s \u2032 ) models the relative importance of sentence s given sentence s \u2032 , which makes Equation 2 a redundancy-aware framework. When generating summaries, we select the first sentence by treating s \u2032 = \u2205 or using a f (s) model. Then, we select the rest summary sentences with the following greedy algorithm:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Framework", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "s * = arg max s\u2282D\\S min s \u2032 \u2208S f (s|s \u2032 )", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Our Framework", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The algorithm starts with the first selected sentence. In each step, a new sentence is added to the summary that results in the maximum relative increase according to Equation 3. The algorithm terminates when the summary length constraint is reached.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Framework", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Next we conduct experiments to analyze the upper bounds of the new framework compared with the existing framework (Equation 1). To this end, we compute f (s) and f (s|s \u2032 ) as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Framework", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "f (s) = ROU GE-2(s|S ref ) f (s|s \u2032 ) = f ({s, s \u2032 }) \u2212 f (s \u2032 ) = ROU GE-2({s, s \u2032 }|S ref ) \u2212 ROU GE-2(s \u2032 |S ref )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Our Framework", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where S ref is one or several summaries written by people. The ROUGE-2 recall metric gives a score to a set of sentences with respective to the human written summaries. We compute f (s|s \u2032 ) as the total gain of s and s \u2032 (f ({s, s \u2032 })) subtracted by the individual gain of s \u2032 (f (s \u2032 )). Equation 4 can be seen as the groundtruth computation of f (s) and f (s|s \u2032 ). The experimental upper bounds of different frameworks are shown in Figure 1 . Similar results of ROUGE-1 and ROUGE-2 are achieved on all three benchmark datasets from DUC 2001 DUC , 2002 DUC and 2004 The advantages of the new framework (Equation 2) are three-fold compared with the framework of Equation 1. First, there is no parameter to be tuned manually. By comparison, Equation 1 has a threshold parameter t, which is very sensitive around the best performance, as shown in the red dashed line parts of Figure 1 . Second, the new framework has a higher upper bound, which means there is a bigger potential for improvement. Finally, besides single sentence features, additional features involving the relations of two sentences can be extracted to improve the regression performance.", |
| "cite_spans": [ |
| { |
| "start": 537, |
| "end": 545, |
| "text": "DUC 2001", |
| "ref_id": null |
| }, |
| { |
| "start": 546, |
| "end": 556, |
| "text": "DUC , 2002", |
| "ref_id": null |
| }, |
| { |
| "start": 557, |
| "end": 569, |
| "text": "DUC and 2004", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 437, |
| "end": 445, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 877, |
| "end": 885, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Our Framework", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "The new proposed framework also has some challenges. First, the groundtruth of f (s|s \u2032 ) is usually unavailable for many tasks. Fortunately, in the text summarization task, the groundtruth of f (s|s \u2032 ) can be computed according to Equation 4. Second, the number of training instances is O(|C||D| 2 ) (O(|C||D|) for Equation 1). We come up with two ways to speed up the training process in the next session.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Framework", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We implement f (s|s \u2032 ) with MultiLayer Perceptron (MLP) (Ruck et al., 1990; Gardner and Dorling, 1998) .", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 76, |
| "text": "(Ruck et al., 1990;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 77, |
| "end": 103, |
| "text": "Gardner and Dorling, 1998)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objective Function", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "f (s|s \u2032 ) = M LP ( \u03a6(s|s \u2032 )|\u03b8 )", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Objective Function", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where \u03a6(s|s \u2032 ) is the set of features and \u03b8 is the parameters to be learned. We use the standard Mean Square Error (MSE) as the loss function as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objective Function", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L(\u03b8) = 1 |C||D|(|D| \u2212 1) \u2211 D\u2208C \u2211 s\u2208D \u2211 s \u2032 \u2208D; s \u2032 \u0338 =s Err(s|s \u2032 ) Err(s|s \u2032 ) = ( M LP ( \u03a6(s|s \u2032 )|\u03b8 ) \u2212 ROU GE(s|s \u2032 , S ref ) ) 2 ROU GE(s|s \u2032 , S ref ) = ROU GE-2({s, s \u2032 }|S ref ) \u2212 ROU GE-2(s \u2032 |S ref )", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Objective Function", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We use ROUGE-2 recall as the groundtruth score due to its high capability of evaluating automatic summarization systems (Owczarzak et al., 2012) . The s \u2032 in f (s|s \u2032 ) should mainly refer to the sentences that have a big potential to be selected into the summary. To this end, we do not have to treat each sentence in D as s \u2032 during training. We can accelerate the training process by generating a set of sentences S \u2032 from D. We come up with two ways as shown in Algorithm 1. The first way is using the greedy strategy (Line 4 of Algorithm 1). Specifically, for each training episode of sentence s, we use the current model to generate the summary with greedy algorithm as a part of the S \u2032 . We refer to this part as S \u2032 1 . The advantage is that S \u2032 1 is adaptively generated with respective to the training status of the model. The second way is randomly sampling a small set of s \u2032 with respect to its groundtruth ROUGE score (Line 6 of Algorithm 1). Specifically, for each training episode of sentence s, we sample a small set S \u2032 2 according to the following rule:", |
| "cite_spans": [ |
| { |
| "start": 120, |
| "end": 144, |
| "text": "(Owczarzak et al., 2012)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objective Function", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "{ N otSelected rnd(0, 1) > 0.05 * ROU GE-2(s) + 0.05 Selected Otherwise (7)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objective Function", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where rnd(0, 1) generates random number from a uniform distribution within the range", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objective Function", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "[0, 1]. ROU GE-2(s) is normalized to [0, 1].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objective Function", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Each sentence is selected with at least 5% probability and sentences with higher ROUGE scores have higher probabilities. Different probabilities will influence the speed of the training process but will not make much difference in the final results according to our experiments. We use the randomly sampled S \u2032 2 to avoid the premature convergence caused by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objective Function", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "S \u2032 1 . Fi- nally, S \u2032 = S \u2032 1 \u222a S \u2032 2 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objective Function", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In this way, the number of training instances is O(|C||D||S \u2032 |) while originally it is O(|C||D| 2 ), where C is the set of all D in the training corpus. Note that |S \u2032 | is a very small number compared to |D|.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objective Function", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Algorithm 1 The adaptive & randomized training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Objective Function", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Training corpus, C; Max iterations, N ; Output:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input:", |
| "sec_num": null |
| }, |
| { |
| "text": "Model parameters, \u03b8; 1: Randomly initialize the parameters \u03b8; 2: for i = 1; i < N ; i++ do 3:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input:", |
| "sec_num": null |
| }, |
| { |
| "text": "for each D such that D \u2208 C do 4:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input:", |
| "sec_num": null |
| }, |
| { |
| "text": "Generate S \u2032 1 greedily according to Equation 3; 5:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input:", |
| "sec_num": null |
| }, |
| { |
| "text": "for each sentence s such that s \u2208 D do 6: Generate S \u2032 2 randomly according to Equation 7; 7:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input:", |
| "sec_num": null |
| }, |
| { |
| "text": "for each s \u2032 such that", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input:", |
| "sec_num": null |
| }, |
| { |
| "text": "s \u2032 \u2208 S \u2032 1 \u222a S \u2032 2 , s \u2032 \u0338 = s do 8:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input:", |
| "sec_num": null |
| }, |
| { |
| "text": "Make forward and backward propagation w.r.t the loss L(\u03b8) (Equation 6); 9:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input:", |
| "sec_num": null |
| }, |
| { |
| "text": "Update the model parameters \u03b8; 10: end for 11: end for 12: end for 13:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input:", |
| "sec_num": null |
| }, |
| { |
| "text": "if \u03b8 converges then 14:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input:", |
| "sec_num": null |
| }, |
| { |
| "text": "break; 15: end if 16: end for 17: return \u03b8;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input:", |
| "sec_num": null |
| }, |
| { |
| "text": "We employ two groups of features in terms of sentence importance and redundancy, namely Sentence Importance Features and Sentence Relation Features. The former are widely studied by existing methods (Gupta et al., 2007; Aker et al., 2010; Ouyang et al., 2011; Galanis et al., 2012; Hong et al., 2015) . However, to our knowledge, the latter are firstly incorporated into a regression model in this paper. Details of used features are listed in Table 1 . We use Sentence Importance Features to model the independent sentence importance of s. Len(s), P osition(s), Stop(s), T F (s) and DF (s) are commonly used features. Embedding feature Emb(s) is an effective feature that encodes the sentence content which can be seen as summary prior nature of the sentence (Cao et al., 2015b) . We use Sentence Relation Features to evaluate the content overlap between s and s \u2032 . M atch-P (s \u2229 s \u2032 ) and M atch-R(s \u2229 s \u2032 ) evaluate the ratio of the overlap words, while T F (s \u2229 s \u2032 ), DF (s \u2229 s \u2032 ) and Stop(s \u2229 s \u2032 ) evaluate the importance of the overlap words. Cos(s, s \u2032 ) evaluates the exact match similarity while Emb-Cos(s, s \u2032 ) evaluates the meaning match similarity. All features in Table 1 are basic features commonly used in summarization.", |
| "cite_spans": [ |
| { |
| "start": 199, |
| "end": 219, |
| "text": "(Gupta et al., 2007;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 220, |
| "end": 238, |
| "text": "Aker et al., 2010;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 239, |
| "end": 259, |
| "text": "Ouyang et al., 2011;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 260, |
| "end": 281, |
| "text": "Galanis et al., 2012;", |
| "ref_id": null |
| }, |
| { |
| "start": 282, |
| "end": 300, |
| "text": "Hong et al., 2015)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 760, |
| "end": 779, |
| "text": "(Cao et al., 2015b)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 444, |
| "end": 451, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 1182, |
| "end": 1189, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Feature", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Sentence Importance Features", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "Len(s) Length of s P osition(s) Position of s in its document Stop(s) = stop-count(s) Len(s) Stop words ratio of s T F (s) = \u2211 w\u2208s GT F (w) Len(s) Average Term Frequency GT F (w) is the Global Term Frequency DF (s) = \u2211 w\u2208s DF (w) Len(s)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "Average Document Frequency", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "Emb(s) \u2211 w\u2208s Emb(w) Len(s)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "Average Word Embedding Emb(w) is the word embedding", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "Sentence Relation Features M atch-P (s, s \u2032 ) = M atch(s,s \u2032 ) Len(s) Term match precision M atch-P (s, s \u2032 ) = 0 if s \u2229 s \u2032 = \u2205 M atch-R(s, s \u2032 ) = M atch(s,s \u2032 ) Len(s \u2032 ) Term match recall M atch-R(s, s \u2032 ) = 0 if s \u2229 s \u2032 = \u2205 T F (s, s \u2032 ) = Len(s\u2229s \u2032 ) \u2211 w\u2208s\u2229s \u2032 GT F (w) Average Global Term Frequency of overlap s \u2229 s \u2032 T F (s, s \u2032 ) = 0 if s \u2229 s \u2032 = \u2205 DF (s, s \u2032 ) = Len(s\u2229s \u2032 ) \u2211 w\u2208s\u2229s \u2032 DF (w)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "Average Document Frequency of overlap s \u2229 s", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2032 DF (s, s \u2032 ) = 0 if s \u2229 s \u2032 = \u2205 Stop(s, s \u2032 ) = 1 \u2212 Stop-Count(s\u2229s \u2032 ) Len(s\u2229s \u2032 ) Stop words ratio of overlap s \u2229 s \u2032 Stop(s, s \u2032 ) = 0 if s \u2229 s \u2032 = \u2205 Cos(s, s \u2032 ) = Cosine(GT F (s), GT F (s \u2032 ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "Cosine of Global Term Frequency vector", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "Emb-Cos(s, s \u2032 ) = Cosine(Emb(s), Emb(s \u2032 ))", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "Cosine of average embedding vector Datasets. The benchmark evaluation corpora for summarization are the ones published by the Document Understanding Conferences (DUC 1 ). We focus on the generic multi-document summarization task, so we carried out all our experiments on DUC 2001, 2002 and 2004 datasets. The documents are all from the news domain and are grouped into various thematic clusters. For each document set, we concatenated all the articles and split them into sentences using the tool provided with the DUC 2003 dataset. We train the model on two years' data and test it on the other year. Evaluation Metric. ROUGE metrics are the official metrics of the DUC extractive summarization tasks (Rankel et al., 2013) . We use the official ROUGE tool 2 to evaluate the performance of the baselines as well as our approach (Lin, 2004) . The parameter of length constraint is \"-l 100\" for DUC 2001/2002, and \"-b 665\" for DUC 2004. We take ROUGE-2 recall as the main metric for comparison because Owczarzak et al. prove its high capability of evaluating automatic summarization systems (Owczarzak et al., 2012) . Comparison Methods. The comparison methods used in the experiments are listed as follows.", |
| "cite_spans": [ |
| { |
| "start": 702, |
| "end": 723, |
| "text": "(Rankel et al., 2013)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 828, |
| "end": 839, |
| "text": "(Lin, 2004)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 1089, |
| "end": 1113, |
| "text": "(Owczarzak et al., 2012)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 LexRank: State-of-the-art summarization model (Erkan and Radev, 2004 ).", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 70, |
| "text": "(Erkan and Radev, 2004", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 ClusterHITS: State-of-the-art results on DUC 2001 (Wan and Yang, 2008 ).", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 71, |
| "text": "(Wan and Yang, 2008", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 ClusterCMRW: State-of-the-art results on DUC 2002 (Wan and Yang, 2008) .", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 72, |
| "text": "(Wan and Yang, 2008)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 REGSUM 3 : State-of-the-art results on DUC 2004 (Hong and Nenkova, 2014) .", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 74, |
| "text": "(Hong and Nenkova, 2014)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 R2N2 GA/R2N2 ILP: State-of-the-art results on DUC 2001 /2002 (Cao et al., 2015a with a neural network regression model.", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 56, |
| "text": "DUC 2001", |
| "ref_id": null |
| }, |
| { |
| "start": 57, |
| "end": 62, |
| "text": "/2002", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 63, |
| "end": 81, |
| "text": "(Cao et al., 2015a", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 PriorSum: To our knowledge, the best results on DUC 2001, 2002 and 2004 using regression approaches (Cao et al., 2015b) .", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 121, |
| "text": "(Cao et al., 2015b)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 SR (Sentence Regression): Evaluate sentence importance with MLP and the Sentence Importance Features in Table 1 and select the top ranks as the summary (without handling redundancy). Table 1 and generate the summary with greedy by directly discarding the redundant sentence according to Equation 1.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 106, |
| "end": 113, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 185, |
| "end": 192, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 RASR (Redundancy-Aware Sentence Regression): The proposed method in this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "Note that for the methods with the parameter t, we tried all values of ranging from 0 to 1 with a step size of 0.05. The final value of t on each dataset is decided by 3-fold cross validation on the training datasets. Model Configuration. The word embedding used in this paper is trained on the English Wikipedia Corpus 4 with Google's Word2Vec tool 5 . The dimension is 300. We use 4 hidden layers MLP with tanh activation function and the sizes of the layers are [300, 200, 100, 1] . To update the weights of MLP, we apply the diagonal variant of AdaGrad with mini-batches. We set the mini-batch size to 20.", |
| "cite_spans": [ |
| { |
| "start": 465, |
| "end": 470, |
| "text": "[300,", |
| "ref_id": null |
| }, |
| { |
| "start": 471, |
| "end": 475, |
| "text": "200,", |
| "ref_id": null |
| }, |
| { |
| "start": 476, |
| "end": 480, |
| "text": "100,", |
| "ref_id": null |
| }, |
| { |
| "start": 481, |
| "end": 483, |
| "text": "1]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Formulations Descriptions", |
| "sec_num": null |
| }, |
| { |
| "text": "First Sentence Selection. Remember that when generating a summary, our method first selects the first sentence then greedily selects the rest sentences with respective to f (s|S). We tried two strategies to select the first sentence with RASR. Strategy 1: treating RASR as an united model by setting the Sentence Relation Features to zero when fitting f (s) during training period or selecting the first sentence during test period. Strategy 2: treating RASR as two models that fit f (s) and f (s|S) respectively. The former is used to select the first sentence and the latter is used to select the rest sentences. We also use the sentence that gets the highest ROUGE-2 score as the first sentence as a comparison, namely BestSentence. The results are shown in Table 2 . As expected, BestSentence is much better than Strategy 1 and Strategy 2, which means selecting a better first sentence will greatly improve the performance of RASR. It does not make too much difference whether using Strategy 1 or Strategy 2. We report the results of Strategy 1 to compare with the baseline methods in Table 3 . Performance Analysis. As shown in Table 3 , the bold face indicates the best performance. Generally, our method RASR achieves the best performance in terms of ROUGE-2 metric on all three datasets. The improvement of ROUGE-2 on DUC 2001 is significant with p-value < 0.05 compared with LexRank, SR and t-SR. Although ClusterHITS and ClusterCMRW get higher ROUGE-1 scores, their ROUGE-2 scores are much lower. In contrast, our method works quite stably.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 761, |
| "end": 768, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1089, |
| "end": 1096, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 1133, |
| "end": 1140, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The improvements of our method come from two aspects. First, it is effective to model sentence importance and redundancy simultaneously with multiple nonlinear function transformations. This can be reflected by the following comparison experiments. SR does not handle redundancy at all, so it achieves bad performance especially on the DUC 2004 corpus. The other methods in Table 3 model sentence importance and redundancy in two separate processes by first ranking the sentences and then discarding the redundant ones whose bi-gram overlap ratio is larger than a threshold parameter. Although we tune the threshold parameter carefully, RASR still outperforms them. Second, effective features involving the sentence relations (i.e., Sentence Relation Features) are considered which cannot be incorporated by the baseline methods. Peer T/Peer 26/Peer 65 are the original results on DUC 2001/2002/2004 respectively. We cite the scores of some systems from their papers, indicated with the sign \"*\". Parameter Sensitiveness. We present the ROUGE-2 performance of t-SR with the threshold parameter t ranging from 0 to 0.9 with a step size of 0.05 shown in Figure 1 and 2a. The best achieved performances of the groundtruth implementation are around 0.75, 0.65, 0.6 ( Figure 1 ) while the best achieved performances in practice are around 0.7, 0.7, 0.65 (Figure 2a ). t is still very sensitive around the best performance, as shown in the red dashed line in both Figure 1 and 2a. Training Convergence. In order to speed up the training process of RASR, we randomly sample some pairwise training instances with Equation 7 for training of RASR. We want to know whether this will influence the convergence of RASR, so we present the decrease of loss with respect to training iterations in Figure 2b . We find that the random sampling has little influence on the convergence of RASR with t-SR as a comparison.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 374, |
| "end": 381, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 1152, |
| "end": 1160, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1263, |
| "end": 1271, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1349, |
| "end": 1359, |
| "text": "(Figure 2a", |
| "ref_id": null |
| }, |
| { |
| "start": 1458, |
| "end": 1466, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1781, |
| "end": 1790, |
| "text": "Figure 2b", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Existing work on extractive summarization can be divided into two categories: unsupervised and supervised. Two most famous unsupervised frameworks are Centroid based and Maximum Marginal Relevance based. Centroid-based methods evaluate the sentence centrality as its importance (Mihalcea, 2004) . Radev et al. first propose to model cluster centroids in their summarization system, MEAD (Radev et al., 2000; . Then LexRank (or TextRank) is proposed to compute sentence importance 05 0.1 0.15 0.2 0.25 0.3 0.35 0.4 0.45 0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0 based on the concept of eigenvector centrality in a graph of sentence similarities (Erkan and Radev, 2004; Mihalcea and Tarau, 2004) . Due to its expansibility and flexibility, centroid-based methods have a lot of extensions. Wan et al. propose several centroid-based approaches for different summarization tasks, e.g., cross-language summarization, etc (Wan, 2008; Wan and Xiao, 2009; Wan, 2011) . Maximum Marginal Relevance (MMR) based methods consider the linear trade-off between relevance and redundancy (Carbonell and Goldstein, 1998) . Goldstein et al. first extend MMR to support extractive summarization by incorporating additional information (Goldstein et al., 2000) . McDonald achieves good results by reformulating this as a knapsack packing problem and solving it using ILP (McDonald, 2007) . Later Lin and Bilmes propose a variant of MMR framework which maximizes an objective function that considers the linear trade-off between coverage and redundancy terms (Lin and Bilmes, 2010; Lin and Bilmes, 2011) . Supervised methods model the extractive summarization task from various perspectives. Kupiec et al. train a naive-Bayes classifier to decide whether to include a particular sentence in the summary or not. (Kupiec et al., 1995) . Li et al. evaluate the sentence importance with support vector regression, then a simple rule-based method is applied for removing redundant phrases . Gillick and Favre evaluate bi-grams importance and then use these scores to evaluate sentence importance and redundancy with a linear combination (Gillick and Favre, 2009) . Sipos et al. propose a structural SVM learning approach to learn the weights of feature combination using the MMR-like submodularity function proposed by Lin and Bilmes (Lin and Bilmes, 2010) . Cao et al. evaluate the sentence importance with a neural regression model, then they remove the redundant sentence larger than a threshold parameter during greedy algorithm (Cao et al., 2015b) . In another paper, they remove the redundant sentence by adding a redundancy constraint to the ILP objective which restricts the bi-gram redundancy of the selected sentences smaller than a threshold (Cao et al., 2015a) .", |
| "cite_spans": [ |
| { |
| "start": 278, |
| "end": 294, |
| "text": "(Mihalcea, 2004)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 387, |
| "end": 407, |
| "text": "(Radev et al., 2000;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 640, |
| "end": 663, |
| "text": "(Erkan and Radev, 2004;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 664, |
| "end": 689, |
| "text": "Mihalcea and Tarau, 2004)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 911, |
| "end": 922, |
| "text": "(Wan, 2008;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 923, |
| "end": 942, |
| "text": "Wan and Xiao, 2009;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 943, |
| "end": 953, |
| "text": "Wan, 2011)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 1081, |
| "end": 1097, |
| "text": "Goldstein, 1998)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1210, |
| "end": 1234, |
| "text": "(Goldstein et al., 2000)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1345, |
| "end": 1361, |
| "text": "(McDonald, 2007)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1532, |
| "end": 1554, |
| "text": "(Lin and Bilmes, 2010;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1555, |
| "end": 1576, |
| "text": "Lin and Bilmes, 2011)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1784, |
| "end": 1805, |
| "text": "(Kupiec et al., 1995)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 2105, |
| "end": 2130, |
| "text": "(Gillick and Favre, 2009)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 2302, |
| "end": 2324, |
| "text": "(Lin and Bilmes, 2010)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 2501, |
| "end": 2520, |
| "text": "(Cao et al., 2015b)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 2721, |
| "end": 2740, |
| "text": "(Cao et al., 2015a)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 480, |
| "end": 556, |
| "text": "05 0.1 0.15 0.2 0.25 0.3 0.35 0.4 0.45 0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In all above extractive summarization methods, redundancy is mainly considered in two ways. The first way is measuring the importance of each sentence then explicitly removing the redundant sentence larger than a threshold parameter during the sentence selection process. Another way is linearly substracting the sentence redundancy score or scoring the redundant parts with low weights. To the best of our knowledge, none of them studies the summarization task and models redundancy from the perspective of this paper.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "This paper presents a novel sentence regression framework to conduct regression with respect to the relative importance f (s|S) of sentence s given a set of sentences S. Additional features involving the sentence relations are incorporated. We conduct experiments on three DUC benchmark datasets. Generally, our approach achieves the best performance in terms of ROUGE metrics compared with state-of-the-art approaches.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We believe our work can be advanced and extended from many different perspectives. First, more features can be designed especially those involving the relations of two sentences. Second, the results can be further improved by exploring better strategies to select the first sentence. Third, the framework can be extended to other tasks, e.g., query-focused summarization, which can be achieved by introducing query-related features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "http://duc.nist.gov/ 2 ROUGE-1.5.5 with options: -n 2 -m -u -c 95 -x -r 1000 -f A -p 0.5 -t 0 3 REGSUM truncates a summary to 100 words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://en.wikipedia.org/wiki/Wikipedia:Database download 5 https://code.google.com/archive/p/word2vec/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work is supported by the Natural Science Foundation of China (61672322, 61672324, 61272240), Microsoft Fund (FY14-RES-THEME-025), the Natural Science Foundation of Shandong Province (ZR2012FM037), the Excellent Middle-Aged and Youth Scientists of Shandong Province (B-S2012DX017) and the Fundamental Research Funds of Shandong University.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgement", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Multi-document summarization using a* search and discriminative training", |
| "authors": [ |
| { |
| "first": "Ahmet", |
| "middle": [], |
| "last": "Aker", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Gaizauskas", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "482--491", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ahmet Aker, Trevor Cohn, and Robert Gaizauskas. 2010. Multi-document summarization using a* search and discriminative training. In Proceedings of EMNLP, pages 482-491. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Ranking with recursive neural networks and its application to multi-document summarization", |
| "authors": [ |
| { |
| "first": "Ziqiang", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "2153--2159", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ziqiang Cao, Furu Wei, Li Dong, Sujian Li, and Ming Zhou. 2015a. Ranking with recursive neural networks and its application to multi-document summarization. In Proceedings of AAAI, pages 2153-2159. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Learning summary prior representation for extractive summarization", |
| "authors": [ |
| { |
| "first": "Ziqiang", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenjie", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Houfeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "829--833", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ziqiang Cao, Furu Wei, Sujian Li, Wenjie Li, Ming Zhou, and Houfeng Wang. 2015b. Learning summary prior representation for extractive summarization. Proceedings of ACL, pages 829-833.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The use of mmr, diversity-based reranking for reordering documents and producing summaries", |
| "authors": [ |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Jade", |
| "middle": [], |
| "last": "Goldstein", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of SIGIR", |
| "volume": "", |
| "issue": "", |
| "pages": "335--336", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jaime Carbonell and Jade Goldstein. 1998. The use of mmr, diversity-based reranking for reordering documents and producing summaries. In Proceedings of SIGIR, pages 335-336. ACM.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Left-brain/right-brain multidocument summarization", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "Judith", |
| "middle": [ |
| "D" |
| ], |
| "last": "Conroy", |
| "suffix": "" |
| }, |
| { |
| "first": "Jade", |
| "middle": [], |
| "last": "Schlesinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Dianne", |
| "middle": [ |
| "P" |
| ], |
| "last": "Goldstein", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Oleary", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the DUC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John M Conroy, Judith D Schlesinger, Jade Goldstein, and Dianne P Oleary. 2004. Left-brain/right-brain multi- document summarization. In Proceedings of the DUC.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Lexrank: Graph-based lexical centrality as salience in text summarization", |
| "authors": [ |
| { |
| "first": "G\u00fcnes", |
| "middle": [], |
| "last": "Erkan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dragomir R Radev", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "", |
| "issue": "", |
| "pages": "457--479", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G\u00fcnes Erkan and Dragomir R Radev. 2004. Lexrank: Graph-based lexical centrality as salience in text summa- rization. Journal of Artificial Intelligence Research, pages 457-479.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Extractive multi-document summarization with integer linear programming and support vector regression", |
| "authors": [], |
| "year": 2012, |
| "venue": "Dimitrios Galanis, Gerasimos Lampouras, and Ion Androutsopoulos", |
| "volume": "", |
| "issue": "", |
| "pages": "911--926", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dimitrios Galanis, Gerasimos Lampouras, and Ion Androutsopoulos. 2012. Extractive multi-document summa- rization with integer linear programming and support vector regression. In Proceedings of COLING, pages 911-926. Citeseer.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A skip-chain conditional random field for ranking meeting utterances by importance", |
| "authors": [ |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "364--372", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michel Galley. 2006. A skip-chain conditional random field for ranking meeting utterances by importance. In Proceedings of EMNLP, pages 364-372. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Artificial neural networks (the multilayer perceptron)a review of applications in the atmospheric sciences", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Matt", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "R" |
| ], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dorling", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Atmospheric environment", |
| "volume": "32", |
| "issue": "14", |
| "pages": "2627--2636", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt W Gardner and SR Dorling. 1998. Artificial neural networks (the multilayer perceptron)a review of applica- tions in the atmospheric sciences. Atmospheric environment, 32(14):2627-2636.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "A scalable global model for summarization", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Gillick", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Benoit Favre", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Workshop on Integer Linear Programming for Natural Langauge Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "10--18", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Gillick and Benoit Favre. 2009. A scalable global model for summarization. In Proceedings of the Workshop on Integer Linear Programming for Natural Langauge Processing, pages 10-18. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Multi-document summarization by sentence extraction", |
| "authors": [ |
| { |
| "first": "Jade", |
| "middle": [], |
| "last": "Goldstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Vibhu", |
| "middle": [], |
| "last": "Mittal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Kantrowitz", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of the 2000 NAACL-ANLPWorkshop on Automatic summarization", |
| "volume": "", |
| "issue": "", |
| "pages": "40--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jade Goldstein, Vibhu Mittal, Jaime Carbonell, and Mark Kantrowitz. 2000. Multi-document summarization by sentence extraction. In Proceedings of the 2000 NAACL-ANLPWorkshop on Automatic summarization, pages 40-48. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Measuring importance and query relevance in topicfocused multi-document summarization", |
| "authors": [ |
| { |
| "first": "Surabhi", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "193--196", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Surabhi Gupta, Ani Nenkova, and Dan Jurafsky. 2007. Measuring importance and query relevance in topic- focused multi-document summarization. In Proceedings of ACL, pages 193-196. Association for Computation- al Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Improving the estimation of word importance for news multi-document summarization", |
| "authors": [ |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Hong", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "712--721", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Hong and Ani Nenkova. 2014. Improving the estimation of word importance for news multi-document summarization. In Proceedings of EACL, pages 712-721.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "System combination for multi-document summarization", |
| "authors": [ |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Hong", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitchell", |
| "middle": [], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "107--117", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Hong, Mitchell Marcus, and Ani Nenkova. 2015. System combination for multi-document summarization. In Proceedings of EMNLP, pages 107-117. The Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Ppsgen: Learning to generate presentation slides for academic papers", |
| "authors": [ |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yue Hu and Xiaojun Wan. 2013. Ppsgen: Learning to generate presentation slides for academic papers. In Proceedings of IJCAI. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A trainable document summarizer", |
| "authors": [ |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Kupiec", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Pedersen", |
| "suffix": "" |
| }, |
| { |
| "first": "Francine", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Proceedings of SIGIR", |
| "volume": "", |
| "issue": "", |
| "pages": "68--73", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julian Kupiec, Jan Pedersen, and Francine Chen. 1995. A trainable document summarizer. In Proceedings of SIGIR, pages 68-73. ACM.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Multi-document summarization using support vector regression", |
| "authors": [ |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "You", |
| "middle": [], |
| "last": "Ouyang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of DUC. Citeseer", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sujian Li, You Ouyang, Wei Wang, and Bin Sun. 2007. Multi-document summarization using support vector regression. In Proceedings of DUC. Citeseer.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Using supervised bigram-based ilp for extractive summarization", |
| "authors": [ |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Xian", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1004--1013", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen Li, Xian Qian, and Yang Liu. 2013. Using supervised bigram-based ilp for extractive summarization. In Proceedings of ACL, pages 1004-1013. Citeseer.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Multi-document summarization via budgeted maximization of submodular functions", |
| "authors": [ |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Bilmes", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "912--920", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hui Lin and Jeff Bilmes. 2010. Multi-document summarization via budgeted maximization of submodular func- tions. In Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics, pages 912-920. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "A class of submodular functions for document summarization", |
| "authors": [ |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Bilmes", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "510--520", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hui Lin and Jeff Bilmes. 2011. A class of submodular functions for document summarization. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 510-520. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "The automated acquisition of topic signatures for text summarization", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of COLING", |
| "volume": "", |
| "issue": "", |
| "pages": "495--501", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin and Eduard Hovy. 2000. The automated acquisition of topic signatures for text summarization. In Proceedings of COLING, pages 495-501. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Rouge: A package for automatic evaluation of summaries", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Text summarization branches out: Proceedings of the ACL-04 workshop", |
| "volume": "8", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out: Proceedings of the ACL-04 workshop, volume 8.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "A study of global inference algorithms in multi-document summarization", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Advances in Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "557--564", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan McDonald. 2007. A study of global inference algorithms in multi-document summarization. Advances in Information Retrieval, pages 557-564.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Textrank: Bringing order into texts", |
| "authors": [ |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Tarau", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "404--411", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rada Mihalcea and Paul Tarau. 2004. Textrank: Bringing order into texts. In Proceedings of EMNLP, pages 404-411, Barcelona, Spain, July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Graph-based ranking algorithms for sentence extraction, applied to text summarization", |
| "authors": [ |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of ACL", |
| "volume": "20", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rada Mihalcea. 2004. Graph-based ranking algorithms for sentence extraction, applied to text summarization. In Proceedings of ACL, page 20. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Using maximum entropy for sentence extraction", |
| "authors": [ |
| { |
| "first": "Miles", |
| "middle": [], |
| "last": "Osborne", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the ACL-02 Workshop on Automatic Summarization", |
| "volume": "", |
| "issue": "", |
| "pages": "1--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Miles Osborne. 2002. Using maximum entropy for sentence extraction. In Proceedings of the ACL-02 Workshop on Automatic Summarization, pages 1-8. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Developing learning strategies for topic-based summarization", |
| "authors": [ |
| { |
| "first": "You", |
| "middle": [], |
| "last": "Ouyang", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenjie", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of CIKM", |
| "volume": "", |
| "issue": "", |
| "pages": "79--86", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "You Ouyang, Sujian Li, and Wenjie Li. 2007. Developing learning strategies for topic-based summarization. In Proceedings of CIKM, pages 79-86. ACM.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Applying regression models to query-focused multidocument summarization", |
| "authors": [ |
| { |
| "first": "You", |
| "middle": [], |
| "last": "Ouyang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenjie", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Qin", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Information Processing & Management", |
| "volume": "47", |
| "issue": "2", |
| "pages": "227--237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "You Ouyang, Wenjie Li, Sujian Li, and Qin Lu. 2011. Applying regression models to query-focused multi- document summarization. Information Processing & Management, 47(2):227-237.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "An assessment of the accuracy of automatic evaluation in summarization", |
| "authors": [ |
| { |
| "first": "Karolina", |
| "middle": [], |
| "last": "Owczarzak", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "Hoa", |
| "middle": [ |
| "Trang" |
| ], |
| "last": "Conroy", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Dang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of Workshop on Evaluation Metrics and System Comparison for Automatic Summarization", |
| "volume": "", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karolina Owczarzak, John M Conroy, Hoa Trang Dang, and Ani Nenkova. 2012. An assessment of the accuracy of automatic evaluation in summarization. In Proceedings of Workshop on Evaluation Metrics and System Comparison for Automatic Summarization, pages 1-9. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Centroid-based summarization of multiple documents: sentence extraction, utility-based evaluation, and user studies", |
| "authors": [ |
| { |
| "first": "Hongyan", |
| "middle": [], |
| "last": "Dragomir R Radev", |
| "suffix": "" |
| }, |
| { |
| "first": "Malgorzata", |
| "middle": [], |
| "last": "Jing", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Budzikowska", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of NAACL-ANLP", |
| "volume": "", |
| "issue": "", |
| "pages": "21--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dragomir R Radev, Hongyan Jing, and Malgorzata Budzikowska. 2000. Centroid-based summarization of multi- ple documents: sentence extraction, utility-based evaluation, and user studies. In Proceedings of NAACL-ANLP, pages 21-30. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Centroid-based summarization of multiple documents", |
| "authors": [ |
| { |
| "first": "Hongyan", |
| "middle": [], |
| "last": "Dragomir R Radev", |
| "suffix": "" |
| }, |
| { |
| "first": "Ma\u0142gorzata", |
| "middle": [], |
| "last": "Jing", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Sty\u015b", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tam", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Information Processing & Management", |
| "volume": "40", |
| "issue": "6", |
| "pages": "919--938", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dragomir R Radev, Hongyan Jing, Ma\u0142gorzata Sty\u015b, and Daniel Tam. 2004. Centroid-based summarization of multiple documents. Information Processing & Management, 40(6):919-938.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "A decade of automatic content evaluation of news summaries: Reassessing the state of the art", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "M" |
| ], |
| "last": "Rankel", |
| "suffix": "" |
| }, |
| { |
| "first": "Hoa", |
| "middle": [ |
| "Trang" |
| ], |
| "last": "Conroy", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Dang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "131--136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter A Rankel, John M Conroy, Hoa Trang Dang, and Ani Nenkova. 2013. A decade of automatic content evalu- ation of news summaries: Reassessing the state of the art. In Proceedings of ACL, pages 131-136. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "The multilayer perceptron as an approximation to a bayes optimal discriminant function", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Dennis", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ruck", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Steven", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Rogers", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kabrisky", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Mark", |
| "suffix": "" |
| }, |
| { |
| "first": "Bruce", |
| "middle": [ |
| "W" |
| ], |
| "last": "Oxley", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Suter", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "IEEE Transactions on Neural Networks", |
| "volume": "1", |
| "issue": "4", |
| "pages": "296--298", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dennis W Ruck, Steven K Rogers, Matthew Kabrisky, Mark E Oxley, and Bruce W Suter. 1990. The multi- layer perceptron as an approximation to a bayes optimal discriminant function. IEEE Transactions on Neural Networks, 1(4):296-298.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Graph-based multi-modality learning for topic-focused multi-document summarization", |
| "authors": [ |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianguo", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "1586--1591", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaojun Wan and Jianguo Xiao. 2009. Graph-based multi-modality learning for topic-focused multi-document summarization. In Proceedings of IJCAI, pages 1586-1591. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Multi-document summarization using cluster-based link analysis", |
| "authors": [ |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianwu", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of SIGIR", |
| "volume": "", |
| "issue": "", |
| "pages": "299--306", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaojun Wan and Jianwu Yang. 2008. Multi-document summarization using cluster-based link analysis. In Proceedings of SIGIR, pages 299-306. ACM.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Ctsum: extracting more certain summaries for news articles", |
| "authors": [ |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianmin", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of SIGIR", |
| "volume": "", |
| "issue": "", |
| "pages": "787--796", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaojun Wan and Jianmin Zhang. 2014. Ctsum: extracting more certain summaries for news articles. In Proceed- ings of SIGIR, pages 787-796. ACM.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Multi-document summarization via discriminative summary reranking", |
| "authors": [ |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ziqiang", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujian", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1507.02062" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaojun Wan, Ziqiang Cao, Furu Wei, Sujian Li, and Ming Zhou. 2015. Multi-document summarization via discriminative summary reranking. arXiv preprint arXiv:1507.02062.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "An exploration of document impact on graph-based multi-document summarization", |
| "authors": [ |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "755--762", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaojun Wan. 2008. An exploration of document impact on graph-based multi-document summarization. In Proceedings of EMNLP, pages 755-762. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Using bilingual information for cross-language document summarization", |
| "authors": [ |
| { |
| "first": "Xiaojun", |
| "middle": [], |
| "last": "Wan", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1546--1555", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaojun Wan. 2011. Using bilingual information for cross-language document summarization. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pages 1546-1555. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Multi-document summarization by maximizing informative content-words", |
| "authors": [ |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Wen-Tau Yih", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucy", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Hisami", |
| "middle": [], |
| "last": "Vanderwende", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Suzuki", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of IJCAI", |
| "volume": "7", |
| "issue": "", |
| "pages": "1776--1782", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wen-tau Yih, Joshua Goodman, Lucy Vanderwende, and Hisami Suzuki. 2007. Multi-document summarization by maximizing informative content-words. In Proceedings of IJCAI, volume 7, pages 1776-1782. AAAI Press.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "ROUGE-2 Score on DUC 2004.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "text": "Experimental Upper bounds of our sentence regression framework and existing sentence regression framework.", |
| "num": null, |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF0": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "text": "", |
| "content": "<table><tr><td>: Summary of features</td></tr></table>" |
| }, |
| "TABREF3": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "text": "Comparison results (%) on DUC datasets", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |