| { |
| "paper_id": "Y14-1020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:44:12.317551Z" |
| }, |
| "title": "Topic-based Multi-document Summarization using Differential Evolution for Combinatorial Optimization of Sentences", |
| "authors": [ |
| { |
| "first": "Haruka", |
| "middle": [], |
| "last": "Shigematsu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ochanomizu University", |
| "location": { |
| "addrLine": "Bunkyo-ku", |
| "postCode": "112-8610", |
| "settlement": "Tokyo", |
| "country": "Japan" |
| } |
| }, |
| "email": "shigematsu.haruka@is.ocha.ac.jp" |
| }, |
| { |
| "first": "Ichiro", |
| "middle": [], |
| "last": "Kobayashi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Ochanomizu University", |
| "location": { |
| "addrLine": "Bunkyo-ku", |
| "postCode": "112-8610", |
| "settlement": "Tokyo", |
| "country": "Japan" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes a method of multidocument summarization with evolutionary computation. In automatic document summarization, the method to make a summary by finding the best combination of important sentences in target documents is popular approach. To find the best combination of sentences, explicit solution techniques such as integer linear programming, branch and bound method, and so on are usually adopted. However, there is a problem with them in terms of calculation efficiency. So, we apply evolutionary computation, especially differential evolution which is regarded as a method having a good feature in terms of calculation cost to obtain a reasonable quasi-optimum solution in real time, to the problem of combinatorial optimization of important sentences. Moreover, we consider latent topics in deciding the importance of a sentence, and define three fitness functions to compare the results. As a result, we have confirmed that our proposed methods reduced the calculation time necessary to make a summary considerably, although precision is more worse than the method with an explicit solution technique.", |
| "pdf_parse": { |
| "paper_id": "Y14-1020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes a method of multidocument summarization with evolutionary computation. In automatic document summarization, the method to make a summary by finding the best combination of important sentences in target documents is popular approach. To find the best combination of sentences, explicit solution techniques such as integer linear programming, branch and bound method, and so on are usually adopted. However, there is a problem with them in terms of calculation efficiency. So, we apply evolutionary computation, especially differential evolution which is regarded as a method having a good feature in terms of calculation cost to obtain a reasonable quasi-optimum solution in real time, to the problem of combinatorial optimization of important sentences. Moreover, we consider latent topics in deciding the importance of a sentence, and define three fitness functions to compare the results. As a result, we have confirmed that our proposed methods reduced the calculation time necessary to make a summary considerably, although precision is more worse than the method with an explicit solution technique.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "As a general method of automatic multi-document summarization, we often use the important sentence extraction method which obtains the most proper combination of important sentences in target documents for a summary, avoiding redundancy in the generated summary. The explicit solution techniques, e.g., integer programming, branch and bound method, for optimal combination are often used under some constraints for the best combination of sentences. They have however a problem in terms of calculation costs. In general, if the size of target data sets is huge, the problem of combinatorial optimization becomes NP-hard. On the other hand, as an optimization method to obtain quasi-optimum solution in real time, it is reported that evolutionary computation is useful for realistic solutions. In this context, we employ differential evolution (DE) known as superior to other evolutionary computation algorithms in terms of calculation costs and the accuracy of solution, and apply it to multi-document summarization. Besides, under an assumption that multiple topics are included in documents, latent topics in documents are extracted by means of latent Dirichlet allocation, we make a summary, considering the latent topics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As for document summarization using combinatorial optimization techniques, many studies employ explicit solution techniques such as branch and bound method, dynamic programming, integer linear programming, and so on (Mcdonald, 2007; Yih et al., 2007; Gillick et al., 2008; Takamura et al., 2009; Lin et al., 2010) . However, the explicit solution techniques often face NP-hard, they require much calculation time for solving a problem of combinatorial optimization, though they provide optimal solution. In this context, Nishikawa et al.(2012) have proposed a method to obtain approximate solution by employing Lagrange relaxation on constraints to make a summary and to introduce it to PACLIC 28 ! 153 the objective function of selecting best combination of important sentences, and got a good result.", |
| "cite_spans": [ |
| { |
| "start": 216, |
| "end": 232, |
| "text": "(Mcdonald, 2007;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 233, |
| "end": 250, |
| "text": "Yih et al., 2007;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 251, |
| "end": 272, |
| "text": "Gillick et al., 2008;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 273, |
| "end": 295, |
| "text": "Takamura et al., 2009;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 296, |
| "end": 313, |
| "text": "Lin et al., 2010)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 521, |
| "end": 543, |
| "text": "Nishikawa et al.(2012)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related studies", |
| "sec_num": "2" |
| }, |
| { |
| "text": "On the other hand, as an optimization method to obtain approximate solution, it has been reported that evolutionary computation is useful -Petkovic et al. (2011) and Nieminen et al. (2003) have compared the ability between explicit solution techniques, and dynamic programming and genetic algorithm (GA) (Holland, 1975) , and confirmed that GA is superior to the explicit techniques in terms of calculation cost. Furthermore, in the experiments in Chandrasekar et al. 2012, differential evolution (DE) is superior to GA and particle swarm (Kennedy et al., 1995) in terms of the precision of solution and calculation speed.", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 161, |
| "text": "-Petkovic et al. (2011)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 166, |
| "end": 188, |
| "text": "Nieminen et al. (2003)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 304, |
| "end": 319, |
| "text": "(Holland, 1975)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 539, |
| "end": 561, |
| "text": "(Kennedy et al., 1995)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related studies", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As for document summarization using combinatorial optimization techniques, the number of the studies using evolutionary computation has been gradually increasing. Nandhini (2013) applied GA for the combinatorial optimization of sentences so that a generated summary realizes good readability, cohesion, and rich contents, and then showed that their method provided stable precision rather than other methods using explicit solution techniques. Alguliev et al. (2011) proposed a method using differential evolution to make a summary taking account of covering the whole contents of target documents and removing redundancy of the contents in a generated summary.", |
| "cite_spans": [ |
| { |
| "start": 163, |
| "end": 178, |
| "text": "Nandhini (2013)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 444, |
| "end": 466, |
| "text": "Alguliev et al. (2011)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related studies", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As for combinatorial optimization of sentences, the way of deciding an important sentences is essential. In general, the importance of a sentence is often decided by the words included in the sentence. As the way of deciding the important words, in addition to the conventional way of using tf-idf, the way of using latent information has been recently regarded as useful. To estimate latent topics in documents, latent Dirichlet allocation (LDA) (Blei et al., 2003) is often used and applied to various NLP application, e.g., clustering, summarization, information retrieval, information recommendation, etc. As for document summarization, Murray et al. (2009) and Arora et al. (2008) employed LDA to extract important sentences based on latent topics. Gao et al. (2012) have proposed a method employing LDA to make a topic-based similarity graph of sentences, and shown that the method provides high precision.", |
| "cite_spans": [ |
| { |
| "start": 447, |
| "end": 466, |
| "text": "(Blei et al., 2003)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 641, |
| "end": 661, |
| "text": "Murray et al. (2009)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 666, |
| "end": 685, |
| "text": "Arora et al. (2008)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 754, |
| "end": 771, |
| "text": "Gao et al. (2012)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related studies", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Considering these prior studies, in this study we propose a multi-document summarization method employing latent topics for deciding the importance of sentences and differential evolution for combinatorial optimization of sentences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related studies", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Differential evolution (DE) is a kind of evolutionary computation and a populationbased stochastic search algorithm to solve a combinatorial optimization problem. DE has a special feature in mutation operation compared to simple GA (Holland, 1975) . It performs based on differences between pairs of solutions for the purpose of deciding the orientation in search space by following the distribution of solutions in the current population. DE is regarded as a useful method for optimal solution in terms of simplicity, calculation speed and precision. The general DE algorithm is shown as follows:", |
| "cite_spans": [ |
| { |
| "start": 232, |
| "end": 247, |
| "text": "(Holland, 1975)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Differential evolution", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Step 1. Initialization: N solutions are randomly generated in the initial population.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Differential evolution", |
| "sec_num": "3" |
| }, |
| { |
| "text": "G(0) = {P 1 (0), P 2 (0), . . . , P N (0)}.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Differential evolution", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Step 2. Completion of judgment: Complete the process if the number of generation has reached to the predefined number, g max .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Differential evolution", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Step 3. Mutation: For each individual P i (g), three unique solutions, P a (g),P b (g),P c (g), are selected from the population G(g). And then a mutation vector Q i (g) is obtained from a base vector P a (g) and a difference vector P b (g) \u2212 P c (g) as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Differential evolution", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Q i (g) = P a (g) + F (P b (g) \u2212 P c (g)) (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Differential evolution", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Here, F is an adjustment parameter for the difference.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Differential evolution", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Step 4. Crossover:A parent vector P i (g) and a mutation vector Q i (g) are crossed over and a child vector R i (g) is generated.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Differential evolution", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Step 5. Selection of solutions: Compare a parent vector P i (g) and a child vector R i (g), the better solution is selected for the next generation. This process is adopted to all solutions in the current generation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Differential evolution", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Step 6. Return to Step 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Differential evolution", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The overview of the process from step 3 to step 5 is illustrated in Figure 1 . 4 Document summarization using DE Let us assume that target documents consisting of n sentences, and a summary is made by the combination of important sentences extracted from the documents. To encode the phenotype of this setting into the genotype, we employ a n-length binary vector in which 1 indicates the state of the sentence being selected and 0 is not the state. As for optimal combination of sentences uisng DE, each solution is regarded as the combination of sentences, and therefore, the best combination of sentences for a summary is found by solving the problem under some constraint such as the length of a summary, etc.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 68, |
| "end": 76, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Differential evolution", |
| "sec_num": "3" |
| }, |
| { |
| "text": "A summary is made based on the best solution obtained in all generations of DE process. There are some specific processes added to general DE process for document summarization, for example, converting real number vectors into binary vectors which indicates the states of sentence selection, solution selection based on constraint on the length of a summary, etc. Each modified DE process is shown in the following.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Process of document summarization using DE", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In DE process, the population G(g) consisting of N solutions is evolved in generations g = 0, 1, . . . , g max . Here, the i-th solution at generation g, i.e., P i (g), is expressed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generation of the initial population", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "P i (g) = [p i,1 (g), p i,2 (g), . . . , p i,n (g)]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generation of the initial population", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "In general, the initial population G(0) is provided by the following equation so as it should be diverse in search space. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generation of the initial population", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "In general, equation 3is used to obtain mutation vector Q i , however, there are many studies to propose other new vectors in order to obtain a better solution (Mallipeldi et al., 2007; Storn, 1996; Qin et al., 2009; Iorio et al., 2004; Ali, 2011) . In our study, we adopt the equation employed by Alguliev et al.(2011) because they have got a good result for document summarization with the equation.", |
| "cite_spans": [ |
| { |
| "start": 160, |
| "end": 185, |
| "text": "(Mallipeldi et al., 2007;", |
| "ref_id": null |
| }, |
| { |
| "start": 186, |
| "end": 198, |
| "text": "Storn, 1996;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 199, |
| "end": 216, |
| "text": "Qin et al., 2009;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 217, |
| "end": 236, |
| "text": "Iorio et al., 2004;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 237, |
| "end": 247, |
| "text": "Ali, 2011)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 298, |
| "end": 319, |
| "text": "Alguliev et al.(2011)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mutation", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "Q i (g) = P a (g) + F \u2022 (P best (g) \u2212 P b (g)) + F \u2022 (P best (g) \u2212 P c (g)) (3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mutation", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "P a (g),P b (g),P c (g) are solutions randomly selected from the population G(g) except solution P i (g). P best is the best solution in G(g). F is an adjustment factor, and the value of [0.4, 1.0] is regarded as effective by .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Mutation", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "A parent vector P i (g) and mutation vector Q i (g) are crossed over with crossover ratio CR(g), and then a child vector R i (g) is generated. Here, each locus of a child vector r i,s (g) succeeds the locus of either a parent vector p i,s (g) or a mutation vector q i,s (g) under the condition shown in equation 4.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Crossover", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "r i,s (g) = \u23a7 \u23a8 \u23a9 q i,s (g) (if rand i,s \u2264 CR(g)or s = s rand ) p i,s (g) (otherwise)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Crossover", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "(4) s rand is a value randomly selected from 1, 2, . . . , n. By providing a chance to mutate at the s rand -th locus, it prevents that a child vector becomes the same one as a parent vector.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Crossover", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "Moreover, in general, the solution is expected to become better as generation proceeds, therefore, a child vector had better not be generated by taking over many features of a parent vector. In this context, mutation rate decreases as generation proceeds. So, mutation rate CR(g) is shown in equation 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Crossover", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "CR(g) = CR(0) \u2022 sigm g max 2 \u2022 (g + 1)", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Crossover", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "Here, sigm(\u2022) is a sigmoid function and is used to decrease mutation rate as generation gets close to g max . CR(0) is the mutation rate given at the first generation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Crossover", |
| "sec_num": "4.1.3" |
| }, |
| { |
| "text": "A new solution P i (g+1) at the next generation to generation g is selected by evaluating a parent vector P i (g) and a child vector R i (g). Here, in order to evaluate fitness value, a solution has to be a binary vector. So, a real-valued vector P is changed to a binary vector P \u2032 by following rule.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Selection", |
| "sec_num": "4.1.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p \u2032 i,s (g) = \u23a7 \u23a8 \u23a9 1 (if 0.5 < sigm(p i,s (g))) 0 (otherwise)", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Selection", |
| "sec_num": "4.1.4" |
| }, |
| { |
| "text": "First of all, real value p i,s (g) is changed to the value of [0, 1] through a sigmoid function. if the value is bigger than 0.5 then it is set as 1, and if not then 0. After changing real-valued vector to binary vector and obtaining fitness value, either a parent vector P i (g) or a child vector R i (g) is selected as a solution at next generation, i.e., P i (g + 1) by the following rules.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Selection", |
| "sec_num": "4.1.4" |
| }, |
| { |
| "text": "\u2022 If both parent and child satisfy the constraint, the one with higher fitness value is selected. \u2022 If either a parent or a child does not satisfy the constraint the one which satisfies the constraint is selected. \u2022 If both parent and child do not satisfy the constraint, the one which does not satisfy the constraint so much is selected.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Selection", |
| "sec_num": "4.1.4" |
| }, |
| { |
| "text": "We define a fitness function so as it evaluates a solution P i , which includes important contents and less redundancy, as being highly regarded. Here, we propose three fitness functions, taking account of latent topics in documents.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Definition of fitness function", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We define fitness function 1 as the one which evaluates the combination of sentences including important contents of target documents as being highly regarded, considering the importance of a sentence and coverage ratio simultaneously (see, equation (7)).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 1", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "f (P i ) = |W i | V n s=1 b s p \u2032 i,s", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Fitness function 1", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "Here, |W i | and V indicate the numbers of vocabularies included in a solution P i and target documents, respectively, and |W i | V indicates the coverage ratio of the vocabularies in a solution P i to V . b s expresses the importance of sentence s based on latent topics estimated by means of LDA, and is expressed in equation 8.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 1", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "b s = K t=1 b ts", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Fitness function 1", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "Here, b ts expresses the importance of sentence s in each topic t(t = 1, . . . , K), therefore, it is decided by the total sum of the importance in each topic. b ts is expressed in equation (9).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 1", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "b ts = V w=1 \u03c6 tw y sw |W s | \u2022 \u03b8 t (9) PACLIC 28 ! 156", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 1", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "\u03a6 t is the word occurrence probabilistic distribution to topics, it is represented as \u03a6 t = {\u03c6 t1 , . . . , \u03c6 tV }(t = 1, . . . , K). Here, \u03c6 tw indicates the importance of word w at topic t. y sw is a variable to express binary conditions to show 1 if word w is included in the sentence, and 0 if not. Moreover, considering the length of a sentence in evaluation, the total value of importance of words included in sentence s is divided by the square root of the total number of words in sentence s, i.e., |W s |.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 1", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "Here, it is regarded that the more a topic is included in documents, the more important the topic in the documents, therefore, the ratio of topic t in target documents, i.e., \u03b8 t , is multiplied.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 1", |
| "sec_num": "4.2.1" |
| }, |
| { |
| "text": "In fitness function 2, we change the way of calculating b s defined in fitness function 1. Here, we regard that it is important if a sentence has similar topic vector to a particular topic vector of target documents (see, equation 10).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 2", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "b s = max t=1,2,...,K {sim(w ts , O t )}", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Fitness function 2", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "O t represents topic t vector, i.e., O t = [o t1 , o t2 , . . . , o tV ], (t = 1, 2, . . . , K). In other words, O t corresponds to word distribution \u03a6 t estimated by means of LDA. w ts indicates sentence s vector at topic t, it is obtained by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 2", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "w ts = {o tj x sj } V j=1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 2", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "Here, x sj is the variable which indicates 1 if word j is included in sentence s, and 0 if not. sim(a, b) expresses cosine similarity between vectors a, b. The highest value of cosine similarity among K topics is regarded as the importance of sentence s.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 2", |
| "sec_num": "4.2.2" |
| }, |
| { |
| "text": "In fitness function 3, the importance of a sentence is calculated with equation 10, and the total importance of solution P i is obtained by the combination of sentences (see, the fraction of equation 11), and the importance is divided by the total value of the similarity of any pair of sentences in target documents (see, equation (11)), taking account of the penalty of redundancy in the combination of sentences, unlike the case of fitness function 1, i.e., multiplying coverage ratio,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 3", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "|W i | V . f (P i ) = n\u22121 s=1 n r=s+1 b s + b r p \u2032 i,s p \u2032 i,r n\u22121 s=1 n r=s+1 sim(w s , w r )p \u2032 i,s p \u2032 i,r", |
| "eq_num": "(11)" |
| } |
| ], |
| "section": "Fitness function 3", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": "Here, w s is the word vector of sentence s, i.e., w s = [w s1 , w s2 , . . . , w sV ]. w sa expresses importance of word a in sentence s, and it is calculated by tf \u2212 isf shown in equation 12.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 3", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "w s,a = tf sa \u00d7 log( n n a )", |
| "eq_num": "(12)" |
| } |
| ], |
| "section": "Fitness function 3", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": "tf sa expresses the ratio that word a is included in sentence s, n is the total number of sentences, and n a is the number of sentences including word a.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 3", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": "With n\u22121 s=1 n r=s+1 sim(w s , w r )p \u2032 i,s p \u2032 i,r", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 3", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": ", the total sum of cosine similarity between sentences selected in solution P i is calculated as an evaluation factor of redundancy in a generated summary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Fitness function 3", |
| "sec_num": "4.2.3" |
| }, |
| { |
| "text": "In the experiments, we use DUC04 Task2 data set. In the data set, there are 50 topic document sets. The length of a summary is the constraint on making a summary. Here, constraint is to make a summary within 665 bytes is the constraint. For each document set, a summary is generated 10 times, and averaged the precision of the 10 summaries evaluated with ROUGE-1 evaluation index (Lin et al., 2004) . ROUGE-1 value is obtained for the both cases where the evaluation with and without stop words. As computation environment, we used Ubuntu 12.04.3 for OS and AMD FX(tm)-8120 1.4GHz for CPU.", |
| "cite_spans": [ |
| { |
| "start": 380, |
| "end": 398, |
| "text": "(Lin et al., 2004)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental settings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We used Gibbs sampling for topic estimation with 100 iteration. The both hyper-parameters of Dirichlet prior distribution of document-topic distribution, \u03b1 and of topic-word distribution, \u03b2 are all set as 0.1. To estimate the number of latent topics in the documents, we use perplexity as an index.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental settings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "As for DE settings, we set the number of maximum generation as g max = 10000, the number of solutions is N = 50. Besides, as the parameter used to generate the initial population, n = 5, and we set p min s = \u221210 and p max x = 10 for all PACLIC 28 ! 157 the initial solutions. As for difference parameter and crossover rate, we set F = 0.45 and CR(0) = 0.7, respectively, referring to the study by Alguliev et al. (2011) .", |
| "cite_spans": [ |
| { |
| "start": 397, |
| "end": 419, |
| "text": "Alguliev et al. (2011)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental settings", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In general, we often generate the initial population randomly by following in equation 2, however, in the case of document summarization, we have confirmed that most of the solutions in the initial population generated by equation 2do not satisfy the given constraint, i.e., the length of a summary is within 665 bytes, in preliminary experiments (see, the left figure in Figure2). If most of the solutions do not satisfy the constraint, it is difficult to obtain solutions with high fitness value satisfying the constraint, even if they are evolved. In this context, we define a new equation to generate the initial population so that the solutions satisfy the constraint at an early generation. Because of p min s = \u221210 and p max x = 10, the new equation for the initial population is defined as shown in equation (13).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Change of the equation for the initial population", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "p i,s (0) = 10 \u2212 20(1 \u2212 rand i,s ) 1/(n+1)", |
| "eq_num": "(13)" |
| } |
| ], |
| "section": "Change of the equation for the initial population", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "With a random value, rand i,s (0 \u2264 rand i,s \u2264 1), the value of [-10,10 ] is provided to each locus of N solutions. Here, n is an adjustment parameter for occurrence probability of value of [-10,10] . The bigger n is, the closer the value is to -10. By employing equation (13), we have confirmed that solutions tend to satisfy the constraint and fitness value increases as the number of generation increases (see, the right figure of Figure 2 ). Table 1 shows the precision of the proposed methods and of other methods regarded as baseline methods. In the table, Topic-DE fit1 , Topic-DE fit2 , and Topic-DE fit3 are the methods using fitness function 1, 2 and 3, respectively. As for the baseline methods, Topic-OPT adopts the same index for the importance of a sentence and coverage ratio as well as Topic-DE fit1 and employs an explicit solution technique with CPLEX solver 1 . CLASSY (Conroy et al., 2005) is the method which provided the highest score at DUC'04. Compared the results among the three proposed methods, Topic-DE fit1 got the highest score for both cases of with and without stop words -compared Topic-DE fit1 with Topic-DE fit2 , in terms of deciding the importance of a sentence, we see that it is useful for calculating the importance of a sentence based on the total value of words included in the sentence rather than the value of similarity of the topic vector among all sentences. Furthermore, as for comparison between Topic-DE fit2 and Topic-DE fit3 , in terms of removing redundancy, we see that it is useful for considering how much the combination of sentences in a generated summary covers the contents of target documents rather than the similarity among the sentences in a summary. Furthermore, compared Topic-OPT with the proposed methods, in terms of calculation time, it decreases considerably by using DE, as we see that every proposed method takes approximately 450 seconds, while Topic-OPT takes approximately 9500 seconds. On the other hand, we also see that the values of ROUGE-1 of the proposed methods are lower than that of Topic-OPT. We think the reason for the difference in precision is that the importance and coverage are obtained for each sentence in objective function in Topic-OPT, whereas in Topic-DE fit1 those are obtained for the combination of sentences in a generated summary.", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 70, |
| "text": "[-10,10", |
| "ref_id": null |
| }, |
| { |
| "start": 189, |
| "end": 197, |
| "text": "[-10,10]", |
| "ref_id": null |
| }, |
| { |
| "start": 887, |
| "end": 908, |
| "text": "(Conroy et al., 2005)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 433, |
| "end": 441, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 445, |
| "end": 452, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Change of the equation for the initial population", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In this study, we have proposed a multi-document summarization method using differential evolution for combinatorial optimization of important sentences in a generated summary, aiming to realize the efficiency of computation for making a summary. As for the evaluation of the combination of sentences for a summary, we took two approaches: one is to evaluate the total value of the importance of sentences for each topic (i.e., fitness function 1), and the other is to evaluate the similarity of topics between a sentence vector and each topic vector of all sentences estimated by LDA (i.e., fitness function 2 and 3). From the results of the experiments, we see that the former one provides a better result, and also see that evaluating how much a generated summary covers the contents of the whole target documents provides a better result rather than evaluating the similarity among sentences in a generated summary, in terms of reducing the redundancy of the contents of a summary compared fitness function 1 with fitness function 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Moreover, compared the proposed methods to the methods with explicit solution techniques, though we see that calculation time was reduced by the proposed methods, precision of the proposed methods was more worse than the methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "As future work, we will increase the number of generation in DE process to confirm whether or not precision depends on the number of generation, and devise a better fitness function for improving precision.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "http://www-01.ibm.com/software/commerce/optimization/ cplex-optimizer/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Sentence selection for generic document summarization using an adaptive differential evolution algorithm", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Alguliev", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "M" |
| ], |
| "last": "Aliguliyev", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "A" |
| ], |
| "last": "Mehdiyev", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Swarm and Evolutionary Computation", |
| "volume": "1", |
| "issue": "4", |
| "pages": "213--222", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Alguliev, R. M. Aliguliyev, C. A. Mehdiyev. 2011. Sentence selection for generic document summariza- tion using an adaptive differential evolution algorithm. Swarm and Evolutionary Computation 1(4), pp. 213- 222.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Differential evolution with generalized differentials", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "M" |
| ], |
| "last": "Ali", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Computational and Applied Mathematics", |
| "volume": "225", |
| "issue": "8", |
| "pages": "2205--2216", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M.M. Ali, 2011. Differential evolution with generalized differentials, Journal of Computational and Applied Mathematics 225 (8) pp.2205-2216.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Latent Dirichlet Allocation Based Multi-Document Summarization", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Arora", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Ravindran", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proc. of the second workshop on Analytics for noisy unstructured text data", |
| "volume": "", |
| "issue": "", |
| "pages": "91--97", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Arora and B. Ravindran. 2008. Latent Dirichlet Allo- cation Based Multi-Document Summarization. Proc. of the second workshop on Analytics for noisy un- structured text data, pp. 91-97.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Latent dirichlet allocation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "I" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Jordan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lafferty", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Latent Dirichlet Learning for Document Summarization", |
| "volume": "", |
| "issue": "", |
| "pages": "1689--1692", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David M. Blei and Andrew Y. Ng and Michael I. Jordan and John Lafferty. 2003. Latent dirichlet allocation, Journal of Machine Learning Research, Ying-Lang Chang and Jen-Tzung Chien. 2009. La- tent Dirichlet Learning for Document Summarization, ICASSP, pp.1689-1692.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Performance Comparison of GA, DE, PSO and SA Approaches in Enhancement of Total Transfer Capability using FACTS Devices", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "P" |
| ], |
| "last": "Vapnik", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Journal of Electrical Engineering & Technology", |
| "volume": "7", |
| "issue": "4", |
| "pages": "493--500", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "N. Chen and J.P. Vapnik. 2012. Performance Compari- son of GA, DE, PSO and SA Approaches in Enhance- ment of Total Transfer Capability using FACTS De- vices. Journal of Electrical Engineering & Technol- ogy, Vol. 7, No. 4, pp. 493-500.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "CLASSY Query-Based Multi-Document Summarization", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [ |
| "M" |
| ], |
| "last": "Conroy", |
| "suffix": "" |
| }, |
| { |
| "first": "Jade", |
| "middle": [ |
| "Goldstein" |
| ], |
| "last": "Stewart", |
| "suffix": "" |
| }, |
| { |
| "first": "Judith", |
| "middle": [ |
| "D" |
| ], |
| "last": "Schlesinger", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "the Human Language Technology Conf./Conf. on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John M. Conroy and Jade Goldstein Stewart and Judith D. Schlesinger, 2005. CLASSY Query-Based Multi- Document Summarization In Proceedings of the Doc- ument Understanding Conf. Wksp. 2005 (DUC 2005) at the Human Language Technology Conf./Conf. on Empirical Methods in Natural Language Processing (HLT/EMNLP).", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "The ICSI Summarization System at TAC 2008, A.Highighi and Vanderwende", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Gillick", |
| "suffix": "" |
| }, |
| { |
| "first": "Benoit", |
| "middle": [], |
| "last": "Favre", |
| "suffix": "" |
| }, |
| { |
| "first": "Dilek", |
| "middle": [], |
| "last": "Hakkani-Tur", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proc. of NAACL HLT-09", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Gillick and Benoit Favre and Dilek Hakkani-tur, 2008. The ICSI Summarization System at TAC 2008, A.Highighi and Vanderwende, 2009. Exploring content models for multi-document summarization, Proc. of NAACL HLT-09.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Building TSC3 Corpus and its Evaluation", |
| "authors": [ |
| { |
| "first": "Tsutomu", |
| "middle": [], |
| "last": "Hirao", |
| "suffix": "" |
| }, |
| { |
| "first": "Manabu", |
| "middle": [], |
| "last": "Okumura", |
| "suffix": "" |
| }, |
| { |
| "first": "Takahiro", |
| "middle": [], |
| "last": "Fukushima", |
| "suffix": "" |
| }, |
| { |
| "first": "Hidetsugu", |
| "middle": [], |
| "last": "Nanba", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Japanese) The 10th Annual Conference of the Japanese Association for Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "10--15", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsutomu Hirao, Manabu Okumura, Takahiro Fukushima, and Hidetsugu Nanba, 2004. Building TSC3 Cor- pus and its Evaluation (in Japanese) The 10th An- nual Conference of the Japanese Association for Nat- ural Language Processing. pp.A10B5-02.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Adaptation in natural and artificial systems. An introductory analysis with applications to biology, control, and artificial intelligence", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [ |
| "H" |
| ], |
| "last": "Holland", |
| "suffix": "" |
| } |
| ], |
| "year": 1975, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J.H., Holland. 1975. Adaptation in natural and artificial systems. An introductory analysis with applications to biology, control, and artificial intelligence, University of Michigan Press.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Solving rotated multi-objective optimization problems using differential evolution", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Iorio", |
| "suffix": "" |
| }, |
| { |
| "first": "X", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Australian Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "861--872", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Iorio, X. Li, 2004. Solving rotated multi-objective op- timization problems using differential evolution, Pro- ceedings of the Australian Conference on Artificial In- telligence, Cairns, Australia, December 4.6, pp. 861- 872.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Particle swarm optimization", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Kennedy", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [ |
| "C" |
| ], |
| "last": "Eberhart", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Proc. of IEEE International Conference on Neural Networks", |
| "volume": "1498", |
| "issue": "", |
| "pages": "1942--1948", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Kennedy and R. C. Eberhart. 1995. Particle swarm optimization. Proc. of IEEE International Conference on Neural Networks, Vol. 1498 of Lecture Notes in Computer Science, pp. 1942-1948.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "LDA-Based Topic Formation and Topic-Sentence Reinforcement for Graph-Based Multi-document Summarization", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Ouyang", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Lecture Notes in Computer Science", |
| "volume": "7675", |
| "issue": "", |
| "pages": "376--385", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Gao, W. Li, Y. Ouyang, R. Zhang. 2012. LDA- Based Topic Formation and Topic-Sentence Reinforce- ment for Graph-Based Multi-document Summariza- tion. Lecture Notes in Computer Science Volume 7675, pp 376-385.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "ROUGE: A Package for Automatic Evaluation of Summaries", |
| "authors": [ |
| { |
| "first": "C.-Y", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proc. of Workshop on Text Summarization Branches Out, Post Conference Workshop of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "74--81", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lin, C.-Y 2004. ROUGE: A Package for Automatic Evaluation of Summaries, Proc. of Workshop on Text Summarization Branches Out, Post Conference Work- shop of ACL 2004, pp. 74-81,", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Multi-document Summarization via Budgeted Maximization of Submodular Functions, Human Language Technologies: The", |
| "authors": [ |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Bilmes", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lin, Hui and Bilmes, Jeff, 2010. Multi-document Sum- marization via Budgeted Maximization of Submodular Functions, Human Language Technologies: The 2010", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "912--920", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Annual Conference of the North American Chapter of the Association for Computational Linguistics, Los Angeles, California, pp. 912-920.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Differential evolution algorithm with ensemble of parameters and mutation strategies", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Mallipeldi", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "N" |
| ], |
| "last": "Suganthan", |
| "suffix": "" |
| }, |
| { |
| "first": "Q", |
| "middle": [ |
| "K" |
| ], |
| "last": "Pan", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "F" |
| ], |
| "last": "Tasgetiren", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Applied SoftComputing", |
| "volume": "11", |
| "issue": "2", |
| "pages": "1679--1696", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Mallipeldi, P.N. Suganthan, Q.K. Pan, M.F. Tasge- tiren, 2011 Differential evolution algorithm with en- semble of parameters and mutation strategies, Ap- plied SoftComputing 11 (2) pp.1679-1696.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "A Study of Global Inference Algorithms in Multi-Document Summarization", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proc. of the 29th European Conference on Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "557--564", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Mcdonald. 2007. A Study of Global Inference Algorithms in Multi-Document Summarization. Proc. of the 29th European Conference on Information Re- trieval, pp557-564.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Summarization by Latent Dirichlet Allocation: Superior Sentence Extraction through Topic Modeling. A senior thesis for Bachelors degree", |
| "authors": [ |
| { |
| "first": "Kenton", |
| "middle": [ |
| "W" |
| ], |
| "last": "Murray", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kenton W. Murray. 2009. Summarization by Latent Dirichlet Allocation: Superior Sentence Extraction through Topic Modeling. A senior thesis for Bache- lors degree, Princeton University.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Use of Genetic Algorithm for Cohesive Summary Extraction to Assist Reading Difficulties", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Nandhini", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "R" |
| ], |
| "last": "Balasundaram", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Applied Computational Intelligence and Soft Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K. Nandhini and S. R. Balasundaram. 2013. Use of Genetic Algorithm for Cohesive Summary Extraction to Assist Reading Difficulties. Applied Computational Intelligence and Soft Computing Volume 2013 Article ID 945623, 11 pages.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Genetic algorithm for finding a good first integer solution for MILP. Department of Computing", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Nieminen", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Ruuth", |
| "suffix": "" |
| }, |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Maros", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "4", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "K. Nieminen, S. Ruuth, and I. Maros. 2003. Genetic algorithm for finding a good first integer solution for MILP. Department of Computing, Imperial College Departmental Technical Report 2003/4, ISSN 1469- 4174.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Text Summarization Model based on Redundancy Constrained Knapsack Problem", |
| "authors": [ |
| { |
| "first": "Hitoshi", |
| "middle": [], |
| "last": "Nishikawa", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsutomu", |
| "middle": [], |
| "last": "Hirao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshihiro", |
| "middle": [], |
| "last": "Matsuo", |
| "suffix": "" |
| }, |
| { |
| "first": "Toshiro", |
| "middle": [], |
| "last": "Makino", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proc. of the 24th International Conference on Computational Linguistics (COLING)", |
| "volume": "", |
| "issue": "", |
| "pages": "893--902", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hitoshi Nishikawa, Tsutomu Hirao, Yoshihiro Matsuo and Toshiro Makino. 2012. Text Summarization Model based on Redundancy Constrained Knapsack Problem. In Proc. of the 24th International Con- ference on Computational Linguistics (COLING), pp. 893-902.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Summarization while Maximizing Multiple Objectives with Lagrangian Relaxation", |
| "authors": [ |
| { |
| "first": "Masaaki", |
| "middle": [], |
| "last": "Nishino", |
| "suffix": "" |
| }, |
| { |
| "first": "Norihito", |
| "middle": [], |
| "last": "Yasuda", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsutomu", |
| "middle": [], |
| "last": "Hirao", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Suzuki", |
| "suffix": "" |
| }, |
| { |
| "first": "Masaaki", |
| "middle": [], |
| "last": "Nagata", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proc. of the 35th European Conference on Information Retrieval (ECIR)", |
| "volume": "", |
| "issue": "", |
| "pages": "772--775", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Masaaki Nishino, Norihito Yasuda, Tsutomu Hirao, Jun Suzuki and Masaaki Nagata. 2013. Summariza- tion while Maximizing Multiple Objectives with La- grangian Relaxation. In Proc. of the 35th European Conference on Information Retrieval (ECIR), pp. 772- 775.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Dynamic Programming Algorithm vs. Genetic Algorithm: Which is Faster", |
| "authors": [ |
| { |
| "first": "Dusan", |
| "middle": [], |
| "last": "Petkovic", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Research and Development in Intelligent Systems XXVII", |
| "volume": "", |
| "issue": "", |
| "pages": "483--488", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dusan Petkovic 2011. Dynamic Programming Al- gorithm vs. Genetic Algorithm: Which is Faster?. Research and Development in Intelligent Systems XXVII, pp 483-488.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Differential evolution algorithm with strategy adaptation for global numerical optimization", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [ |
| "K" |
| ], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [ |
| "L" |
| ], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [ |
| "N" |
| ], |
| "last": "Suganthan", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "IEEE Transactions on Evolutionary Computation", |
| "volume": "13", |
| "issue": "2", |
| "pages": "398--417", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A.K. Qin, V.L. Huang, P.N. Suganthan, 2009. Differ- ential evolution algorithm with strategy adaptation for global numerical optimization, IEEE Transactions on Evolutionary Computation 13 (2) pp.398-417.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Minimizing the Real Functions of the ICEC96 Contest by Differential Evolution", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Storn", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Price", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proc. of the International Conference on Evolutionary Computation", |
| "volume": "", |
| "issue": "", |
| "pages": "842--844", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Storn and K. Price. 1996. Minimizing the Real Func- tions of the ICEC96 Contest by Differential Evolution. Proc. of the International Conference on Evolutionary Computation, pp. 842-844.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "On the usage of differential evolution for function optimization", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Storn", |
| "suffix": "" |
| } |
| ], |
| "year": 1922, |
| "venue": "Proceedings of the Biennial Conference of the North American Fuzzy Information Processing Society", |
| "volume": "", |
| "issue": "", |
| "pages": "519--523", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R. Storn 1996, On the usage of differential evolution for function optimization, Proceedings of the Biennial Conference of the North American Fuzzy Information Processing Society, Berkeley, USA, June 19.22, pp. 519-523.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Text Summarization based on Latent Topic Distribution", |
| "authors": [ |
| { |
| "first": "Haruka", |
| "middle": [], |
| "last": "Shigematsu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ichiro", |
| "middle": [], |
| "last": "Kobayashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "The 26th Annual Conference of the Japanese Society for Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "4--5", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Haruka Shigematsu and Ichiro Kobayashi, 2012. Text Summarization based on Latent Topic Distribution, The 26th Annual Conference of the Japanese Society for Artificial Intelligence, 4I1-R-9-1 (in Japanese)", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Text summarization model based on the budgeted median problem", |
| "authors": [ |
| { |
| "first": "Hiroya", |
| "middle": [], |
| "last": "Takamura", |
| "suffix": "" |
| }, |
| { |
| "first": "Manabu", |
| "middle": [], |
| "last": "Okumura", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the 18th ACM conference on Information and knowledge management CIKM '09", |
| "volume": "", |
| "issue": "", |
| "pages": "1589--1592", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hiroya Takamura and Manabu Okumura 2009. Text summarization model based on the budgeted median problem, Proceedings of the 18th ACM conference on Information and knowledge management CIKM '09, Hong Kong, China, pp.1589-1592.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Multi-Document Summarization using Sentence-based Topic Models", |
| "authors": [ |
| { |
| "first": "Dingding", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shenghuo", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yihong", |
| "middle": [], |
| "last": "Gong", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. of the ACL-IJCNLP 2009", |
| "volume": "", |
| "issue": "", |
| "pages": "297--300", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dingding Wang, Shenghuo Zhu, Tao Li and Yihong Gong, 2009. Multi-Document Summarization us- ing Sentence-based Topic Models, Proc. of the ACL- IJCNLP 2009, pp.297-300.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Multidocument Summarization by Maximizing Informative Content-words", |
| "authors": [ |
| { |
| "first": "Wen", |
| "middle": [ |
| "-" |
| ], |
| "last": "Yih", |
| "suffix": "" |
| }, |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucy", |
| "middle": [], |
| "last": "Vanderwende", |
| "suffix": "" |
| }, |
| { |
| "first": "Hisami", |
| "middle": [], |
| "last": "Suzuki", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 20th International Joint Conference on Artifical Intelligence, IJCAI'07", |
| "volume": "", |
| "issue": "", |
| "pages": "1776--1782", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yih, Wen-tau and Goodman, Joshua and Vander- wende, Lucy and Suzuki, Hisami, 2007. Multi- document Summarization by Maximizing Informative Content-words, Proceedings of the 20th International Joint Conference on Artifical Intelligence, IJCAI'07, pp.1776-1782.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "The DE process from step 3 to step 5" |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "minimum and the maximum values, respectively. rand i,s is a random value of [0, 1]. By equation (2), random values of [p min s , p max s ] are provided to p i,s (s = 1, . . . , n)." |
| }, |
| "FIGREF2": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "Operation to the generation of the initial population" |
| } |
| } |
| } |
| } |