| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:36:01.777851Z" |
| }, |
| "title": "Unsupervised document summarization using pre-trained sentence embeddings and graph centrality", |
| "authors": [ |
| { |
| "first": "Juan", |
| "middle": [], |
| "last": "Ramirez-Orta", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Dalhousie University", |
| "location": {} |
| }, |
| "email": "juan.ramirez.orta@dal.ca" |
| }, |
| { |
| "first": "Evangelos", |
| "middle": [], |
| "last": "Milios", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Dalhousie University", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes our submission for the LongSumm task in SDP 2021. We propose a method for incorporating sentence embeddings produced by deep language models into extractive summarization techniques based on graph centrality in an unsupervised manner. The proposed method is simple, fast, can summarize any document of any size and can satisfy any length constraints for the summaries produced. The method offers competitive performance to more sophisticated supervised methods and can serve as a proxy for abstractive summarization techniques.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes our submission for the LongSumm task in SDP 2021. We propose a method for incorporating sentence embeddings produced by deep language models into extractive summarization techniques based on graph centrality in an unsupervised manner. The proposed method is simple, fast, can summarize any document of any size and can satisfy any length constraints for the summaries produced. The method offers competitive performance to more sophisticated supervised methods and can serve as a proxy for abstractive summarization techniques.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Automatic text summarization is a very old and important task in Natural Language Processing (NLP) that has received continued attention since the creation of the field in the late 50's (Luhn, 1958) , mainly because of the ever-increasing size of collections of text. The objective of the task is, given a document, to produce a shorter text with maximum information content, fluency and coherence. The summarization task can be classified into extractive and abstractive. Extractive summarization means that the summary is composed exclusively of passages present in the original document and abstractive summarization means that there can be words in the summary that did not appear in the original document.", |
| "cite_spans": [ |
| { |
| "start": 186, |
| "end": 198, |
| "text": "(Luhn, 1958)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Since the creation of the first neural language models (Bengio et al., 2003) , vector representations of text that encode meaning (called embeddings) have played a significant role in NLP. They allow the application of statistical and geometrical methods to words, sentences and documents ((Pennington et al., 2014) , (Mikolov et al., 2013) , (Reimers and Gurevych, 2019) ), leading to stateof-the-art performance on several NLP tasks like Information Retrieval, Question Answering or Paraphrase Identification. Among these neural language models, very deep pre-trained neural language models, like BERT (Devlin et al., 2018) , T5 (Raffel et al., 2020) , and GPT-3 (Brown et al., 2020) have shown impressive performance in tasks like language modelling and text generation or benchmarks like GLUE (Wang et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 76, |
| "text": "(Bengio et al., 2003)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 289, |
| "end": 315, |
| "text": "((Pennington et al., 2014)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 318, |
| "end": 340, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 343, |
| "end": 371, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 604, |
| "end": 625, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 631, |
| "end": 652, |
| "text": "(Raffel et al., 2020)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 665, |
| "end": 685, |
| "text": "(Brown et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 797, |
| "end": 816, |
| "text": "(Wang et al., 2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "An important variation of extractive summarization that goes back as far as the late 90's (Salton et al., 1994 (Salton et al., , 1997 utilizes graphs, where the nodes represent text units and the links represent some measure of semantic similarity. These early graphbased summarization techniques involved creating a graph where the nodes were the sentences or paragraphs of a document and two nodes were connected if the corresponding text units had a similar vocabulary. After creating the document graph, the system created a summary by starting at the first paragraph and following random walks defined by different algorithms that tried to cover as much of the graph as possible.", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 110, |
| "text": "(Salton et al., 1994", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 111, |
| "end": 133, |
| "text": "(Salton et al., , 1997", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "A more evolved approach was the creation of lexical centrality (Erkan and Radev, 2004) (Mihalcea and Tarau, 2004) (Wolf and Gibson, 2004) , which is a measure of the importance of a passage in a text where the sentences of the document are connected by the similarity of their vocabularies.", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 86, |
| "text": "(Erkan and Radev, 2004)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 87, |
| "end": 113, |
| "text": "(Mihalcea and Tarau, 2004)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 114, |
| "end": 137, |
| "text": "(Wolf and Gibson, 2004)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The current state of the art in automatic summarization with graphs is mainly based on algorithms like PageRank (Brin and Page, 1998) enhanced with statistical information of the terms in the document (like in (Ramesh et al., 2014)) or Graph Neural Networks (Kipf and Welling, 2016) on top of deep language models (like in (Xu et al., 2019) ).", |
| "cite_spans": [ |
| { |
| "start": 112, |
| "end": 133, |
| "text": "(Brin and Page, 1998)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 210, |
| "end": 232, |
| "text": "(Ramesh et al., 2014))", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 323, |
| "end": 340, |
| "text": "(Xu et al., 2019)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Only two systems from the previous Scholarly Document Processing workshop held in 2020 are based on graphs: CIST-BUPT and Monash-Summ.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In CIST-BUPT (Li et al., 2020) , they used Recurrent Neural Networks to create sentence embeddings that can be used to build a graph which is then fed into a Graph Convolutional Network (Kipf and Welling, 2016) and a Graph Attention Network (Veli\u010dkovi\u0107 et al., 2018) to create extractive summaries. To generate abstractive summaries, they used the gap-sentence method of (Zhang et al., 2019) to fine-tune T5 (Raffel et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 13, |
| "end": 30, |
| "text": "(Li et al., 2020)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 241, |
| "end": 266, |
| "text": "(Veli\u010dkovi\u0107 et al., 2018)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 371, |
| "end": 391, |
| "text": "(Zhang et al., 2019)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 408, |
| "end": 429, |
| "text": "(Raffel et al., 2020)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In Monash-Summ (Ju et al., 2020) , they propose an unsupervised approach that leverages linguistic knowledge to construct a sentence graph like in SummPip (Zhao et al., 2020) . The graph nodes, which represent sentences, are further clustered to control the summary length, while the final abstractive summary is created from the key phrases and discourse from each cluster.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 32, |
| "text": "(Ju et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 155, |
| "end": 174, |
| "text": "(Zhao et al., 2020)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This work focuses on extractive summarization using graphs leveraging sentence embeddings produced by pre-trained language models. The essential idea is that, while the sentence embeddings produced by SBERT (Reimers and Gurevych, 2019) are not well suited for clustering algorithms like Hierarchical Clustering or DBSCAN (Ester et al., 1996) , they produce excellent results in Paraphrase Identification or Semantic Textual Similarity when compared with Cosine Similarity, which implies that they can be used along with graph centrality methods. The text summarization method proposed in this paper has the following contributions:", |
| "cite_spans": [ |
| { |
| "start": 207, |
| "end": 235, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 321, |
| "end": 341, |
| "text": "(Ester et al., 1996)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Is unsupervised and can be used as a proxy for more advanced summarization methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Can easily scale to arbitrarily large amounts of text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Is fast and easy to implement.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Can fit any length requirements for the production of summaries.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section, we describe how the system works.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The system is composed of three main steps: first, we use SBERT to produce sentence embeddings for every sentence in the document to summarize; next, we form a graph by comparing all the pairs of sentence embeddings obtained and finally, we rank the sentences by their degree centrality in this graph. Fig. 1 gives an overview of the whole method.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 302, |
| "end": 308, |
| "text": "Fig. 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Methodology", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The first step of our pipeline is to split the input text into a list of sentences. This step is critical because Figure 1 : The complete pipeline of the proposed method. In the first step, we split the input text into sentences by using a regular expression handcrafted specifically for scientific documents. In the second step, we compute the sentence embeddings of the parsed sentences using SBERT. In the third step, we create a graph by comparing all the pairs of sentence embeddings obtained using cosine similarity. In the fourth step, we rank the sentences by the degree centrality in the generated graph. In the fifth and final step, we only keep a certain number of sentences or words to adjust to the length requirements of the summary.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 114, |
| "end": 122, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentence tokenization", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "if the sentences are too long, the final summary will have a lot of meaningless content (therefore losing precision). However, if the sentences are too short, there is a risk of not having enough context to produce an accurate sentence embedding for them or extracting meaningless sequences, like data in tables or numbers that lie in the middle of the text. We found that the function sent_tokenize() from the NLTK package (Bird et al., 2009) often failed because of the numbers in the tables and the abbreviations, like \"et al.\", which are very common in scientific literature. Because of this, we used a regular expression handcrafted specifically to split the text found in scientific documents.", |
| "cite_spans": [ |
| { |
| "start": 424, |
| "end": 443, |
| "text": "(Bird et al., 2009)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentence tokenization", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "After extracting the sentences, the next step is to produce the sentence embedding of each sentence using SBERT (Reimers and Gurevych, 2019) , which is a Transformer-based (Vaswani et al., 2017) model built on top of BERT (Devlin et al., 2018) that takes as input sentences and produces sentence embeddings that can be compared with cosine similarity, which is given by the following formula:", |
| "cite_spans": [ |
| { |
| "start": 112, |
| "end": 140, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 172, |
| "end": 194, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 222, |
| "end": 243, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Computing sentence embeddings", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "sim(x, y) = x \u2022 y |x||y| .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Computing sentence embeddings", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "As shown in (Reimers and Gurevych, 2019) , these sentence embeddings are superior in quality than taking the CLS token of BERT or averaging the sentence embeddings of the words in the sentence produced by BERT, GloVe (Pennington et al., 2014) , or Word2Vec (Mikolov et al., 2013) .", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 40, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 217, |
| "end": 242, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 257, |
| "end": 279, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Computing sentence embeddings", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "SBERT, like BERT, was pre-trained on a general large text collection to learn good sentence embeddings, but it has to be fine-tuned on a more specific data set according to the task. Since we are working with scientific papers, we picked the \"base\" version of RoBERTa that was fine-tuned in the MSMARCO data set (Bajaj et al., 2016 ) for the Information Retrieval task.", |
| "cite_spans": [ |
| { |
| "start": 312, |
| "end": 331, |
| "text": "(Bajaj et al., 2016", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Computing sentence embeddings", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "After the sentence embeddings have been produced, the next step is to produce a weighted complete graph with a node for each sentence in the text. Its edges are weighted according to the cosine similarities of the corresponding sentence embeddings. An example graph is depicted in Fig. 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 281, |
| "end": 287, |
| "text": "Fig. 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Generation of the sentence graph", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "s 1 s 2 s 3 s 4 1 \u2212 sim(e 1 , e 2 )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generation of the sentence graph", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "1 \u2212 sim(e 1 , e 3 ) 1 \u2212 sim(e 1 , e 4 ) Figure 2 : The process of graph generation and ranking of the sentences. Every node in the generated complete graph represents a sentence in the document and the weight of each edge is given by the similarity between the nodes it conects. The importance of the sentence in the document is modelled as rank(s i ) = n j=1 1 \u2212 sim(e i , e j ), where e i and e j are the corresponding SBERT sentence embeddings of s i and s j .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 40, |
| "end": 48, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Generation of the sentence graph", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "To build this graph, the first step is to gather all the pairwise cosine similarities in a matrix. Let D = (s 1 , s 2 , ..., s n ) be a document. Using SBERT, we produce a sequence of vectors (e 1 , e 2 , ..., e n ), where e i is the sentence embedding of s i . Then, we can compute the matrix A, where A[i, j] = 1 \u2212 sim(e i , e j ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generation of the sentence graph", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We make the following observations:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generation of the sentence graph", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "\u2022 The diagonal of A is composed exclusively of zeros, because A[i, i] = 1 \u2212 sim(e i , e i ) = 0.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generation of the sentence graph", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "\u2022 The matrix A is symmetric, because A[i, j] = 1 \u2212 sim(e i , e j ) = 1 \u2212 sim(e j , e i ) = A[j, i].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generation of the sentence graph", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "\u2022 All the entries in A are non-negative, because \u22121 \u2264 sim(e i , e j ) \u2264 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generation of the sentence graph", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "These observations imply that the matrix A can be interpreted as the adjacency matrix of a weighted complete graph G = (V, E) where V = {s 1 , s 2 , ..., s n }, E = {(s 1 , s 2 )|s 1 , s 2 \u2208 V } and the edges are weighted by the following function: w(s 1 , s 2 ) = 1 \u2212 sim(e 1 , e 2 ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generation of the sentence graph", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "The forth step is to assign a score for each sentence that allows us to sort them by their importance in the document. As a consequence, we define the importance rank for each sentence as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking by centrality", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "rank(s i ) = n j=1 A[i, j] = n j=1 1 \u2212 sim(e i , e j ),", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Ranking by centrality", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "where e i and e j are the corresponding SBERT sentence embedding for s i and s j .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking by centrality", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "To motivate this definition, we observe that adding the entries of the matrix A columnwise gives naturally a ranking of the nodes of G that is a natural generalization of the degree centrality. However, in our ranking, the most \"central\" sentences (sentences that are similar to many other sentences in the document) have lower scores than the ones that are less \"central.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking by centrality", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "To further support this definition, we observe that if G were an undirected, unweighted simple graph G = (V, E) (that is, the entries of A are either 0 or 1, A is symmetric and only has zeros in its diagonal), then we would have that", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking by centrality", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "n j=1 A[i, j] = #{v \u2208 V |(v i , v) \u2208 E}, (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking by centrality", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "which is the definition of the degree of node v i and is clearly a (somewhat crude) measure of the importance of the node in the graph.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking by centrality", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "It is important to note that in scientific papers, which have around 300 sentences, the proposed method takes around 1 second for the whole process. This result implies that there is no obstacle for applying this method to longer documents since producing the sentence embeddings with the SBERT implementation is very efficient, and the only thing that we are doing is compare all the pairs of sentence embeddings, which can be done with highly efficient linear algebra libraries.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ranking by centrality", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "The final step in the method is to select the sentences that are going to form the summary. To do this, we can take only the bottom n-percentile in reverse (as opposed to the top n-percentile, since in our method, a lower rank means that the sentence is more important in the document) or concatenate the ranked sentences in reverse (so that the sentences with the lowest ranks -that is, the most important ones-come first) and take the first k words to satisfy a word-length constraint for the summaries.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary selection", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "3 Experimental setup", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Summary selection", |
| "sec_num": "2.5" |
| }, |
| { |
| "text": "Since our method is for unsupervised extractive summarization, we only used the extractive summaries in the TalkSumm data set (Lev et al., 2019) to estimate the appropriate threshold value for the sentence selection phase. As suggested in the task, we used science-parse (AllenAI, 2019) to extract the text of the scientific articles and split it into sections. Given that the objective of the task is to produce long summaries for the documents, we discarded the title and abstract and then took as input for the algorithm the remaining text as a single block.", |
| "cite_spans": [ |
| { |
| "start": 126, |
| "end": 144, |
| "text": "(Lev et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 271, |
| "end": 286, |
| "text": "(AllenAI, 2019)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data set", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "As is customary in summarization tasks, we used ROUGE (Lin, 2004) in its variations ROUGE-1, ROUGE-2 and ROUGE-L.", |
| "cite_spans": [ |
| { |
| "start": 54, |
| "end": 65, |
| "text": "(Lin, 2004)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We tried with p = {1, 1.5, 2, 2.5, 5, 10, 15} as the value of the bottom percentage of sentences to keep for the final summary and truncated the output to satisfy the 600 word limit for the task when the summary was longer. It is important to note that the freedom of this parameter allows the system to produce summaries of arbitrary length, depending on the task at hand.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Percentile threshold in the selection phase", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Overall, we observed that the 600-word constraint of the task prevented our method from performing better, but we also observed that the best summaries produced by our method are too long (around 1,000 words or more). Table 1 displays the performance of the method variations that we submitted to the task.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 218, |
| "end": 225, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The method introduced in this work displays competitive performance with more sophisticated meth- Table 1 : performance of the different variations of the proposed method submitted to the task. In this setting, the ranked sentences were sorted in reverse and concatenated to form a preliminary output, which was truncated at 600 words to comply with the task's requirements. The \"Bottom %\" column displays the percentile used in the sentence selection phase of the method. R-N F stands for the F-measure in ROUGE-N, while R-N R stands for the Recall in ROUGE-N.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 98, |
| "end": 105, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Bottom % R-1 F R-1 R R-2 F R-2 R R-L F R-L R 1.0 0.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "ods and can be useful when there is not enough labelled data to train a deep neural summarization system while being fast, simple and efficient. Overall, we observed that the precision component of ROUGE for the proposed method has much room for improvement, as having sentences as the minimal text units prevents it from filtering out the less important phrases. Another important future direction is to reduce the redundancy of the summaries, as it is common to have several versions of the same important sentence scattered across the document, so all these versions of the sentence appear in the final summary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Science parse. GitHub repository", |
| "authors": [ |
| { |
| "first": "Allenai", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "AllenAI. 2019. Science parse. GitHub repos- itory, https://github.com/allenai/ science-parse. Visited on April 23, 2021.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Saurabh Tiwary, and Tong Wang. 2016. MS MARCO: A Human Generated MAchine Reading COmprehension Dataset", |
| "authors": [ |
| { |
| "first": "Payal", |
| "middle": [], |
| "last": "Bajaj", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Campos", |
| "suffix": "" |
| }, |
| { |
| "first": "Nick", |
| "middle": [], |
| "last": "Craswell", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rangan", |
| "middle": [], |
| "last": "Majumder", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Mcnamara", |
| "suffix": "" |
| }, |
| { |
| "first": "Bhaskar", |
| "middle": [], |
| "last": "Mitra", |
| "suffix": "" |
| }, |
| { |
| "first": "Tri", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Mir", |
| "middle": [], |
| "last": "Rosenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Xia", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Alina", |
| "middle": [], |
| "last": "Stoica", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1611.09268" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Payal Bajaj, Daniel Campos, Nick Craswell, Li Deng, Jianfeng Gao, Xiaodong Liu, Rangan Majumder, Andrew McNamara, Bhaskar Mitra, Tri Nguyen, Mir Rosenberg, Xia Song, Alina Stoica, Saurabh Ti- wary, and Tong Wang. 2016. MS MARCO: A Hu- man Generated MAchine Reading COmprehension Dataset. arXiv preprint arXiv:1611.09268.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A neural probabilistic language model", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9jean", |
| "middle": [], |
| "last": "Ducharme", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Janvin", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "J. Mach. Learn. Res", |
| "volume": "3", |
| "issue": "", |
| "pages": "1137--1155", |
| "other_ids": { |
| "DOI": [ |
| "https://dl.acm.org/doi/pdf/10.5555/944919.944966" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, R\u00e9jean Ducharme, Pascal Vincent, and Christian Janvin. 2003. A neural proba- bilistic language model. J. Mach. Learn. Res., 3(null):1137-1155.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Natural Language Processing with Python", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bird", |
| "suffix": "" |
| }, |
| { |
| "first": "Ewan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Loper", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Bird, Ewan Klein, and Edward Loper. 2009. Natural Language Processing with Python. O'Reilly Media.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "The anatomy of a large-scale hypertextual web search engine", |
| "authors": [ |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Brin", |
| "suffix": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "Page", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "COMPUTER NETWORKS AND ISDN SYSTEMS", |
| "volume": "", |
| "issue": "", |
| "pages": "107--117", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/S0169-7552(98)00110-X" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sergey Brin and Lawrence Page. 1998. The anatomy of a large-scale hypertextual web search engine. In COMPUTER NETWORKS AND ISDN SYSTEMS, pages 107-117.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Mann", |
| "suffix": "" |
| }, |
| { |
| "first": "Nick", |
| "middle": [], |
| "last": "Ryder", |
| "suffix": "" |
| }, |
| { |
| "first": "Melanie", |
| "middle": [], |
| "last": "Subbiah", |
| "suffix": "" |
| }, |
| { |
| "first": "Jared", |
| "middle": [ |
| "D" |
| ], |
| "last": "Kaplan", |
| "suffix": "" |
| }, |
| { |
| "first": "Prafulla", |
| "middle": [], |
| "last": "Dhariwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Arvind", |
| "middle": [], |
| "last": "Neelakantan", |
| "suffix": "" |
| }, |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Shyam", |
| "suffix": "" |
| }, |
| { |
| "first": "Girish", |
| "middle": [], |
| "last": "Sastry", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanda", |
| "middle": [], |
| "last": "Askell", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandhini", |
| "middle": [], |
| "last": "Agarwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Ariel", |
| "middle": [], |
| "last": "Herbert-Voss", |
| "suffix": "" |
| }, |
| { |
| "first": "Gretchen", |
| "middle": [], |
| "last": "Krueger", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Henighan", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Ramesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Ziegler", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Clemens", |
| "middle": [], |
| "last": "Winter", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Hesse", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Sigler", |
| "suffix": "" |
| }, |
| { |
| "first": "Mateusz", |
| "middle": [], |
| "last": "Litwin", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "33", |
| "issue": "", |
| "pages": "1877--1901", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert- Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems, volume 33, pages 1877-1901. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Lexrank: Graph-based lexical centrality as salience in text summarization", |
| "authors": [ |
| { |
| "first": "G\u00fcnes", |
| "middle": [], |
| "last": "Erkan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [ |
| "R" |
| ], |
| "last": "Radev", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "J. Artif. Int. Res", |
| "volume": "22", |
| "issue": "1", |
| "pages": "457--479", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G\u00fcnes Erkan and Dragomir R. Radev. 2004. Lexrank: Graph-based lexical centrality as salience in text summarization. J. Artif. Int. Res., 22(1):457-479.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A density-based algorithm for discovering clusters in large spatial databases with noise", |
| "authors": [ |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Ester", |
| "suffix": "" |
| }, |
| { |
| "first": "Hans-Peter", |
| "middle": [], |
| "last": "Kriegel", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00f6rg", |
| "middle": [], |
| "last": "Sander", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaowei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Proceedings of the Second International Conference on Knowledge Discovery and Data Mining, KDD'96", |
| "volume": "", |
| "issue": "", |
| "pages": "226--231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin Ester, Hans-Peter Kriegel, J\u00f6rg Sander, and Xi- aowei Xu. 1996. A density-based algorithm for discovering clusters in large spatial databases with noise. In Proceedings of the Second International Conference on Knowledge Discovery and Data Min- ing, KDD'96, page 226-231. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Longxiang Gao, and Shirui Pan. 2020. Monash-summ@LongSumm", |
| "authors": [ |
| { |
| "first": "Jiaxin", |
| "middle": [], |
| "last": "Ju", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.sdp-1.37" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiaxin Ju, Ming Liu, Longxiang Gao, and Shirui Pan. 2020. Monash-summ@LongSumm 20", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "SciSummPip: An unsupervised scientific paper summarization pipeline", |
| "authors": [], |
| "year": null, |
| "venue": "Proceedings of the First Workshop on Scholarly Document Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "318--327", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.sdp-1.37" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "SciSummPip: An unsupervised scientific paper sum- marization pipeline. In Proceedings of the First Workshop on Scholarly Document Processing, pages 318-327, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Semisupervised classification with graph convolutional networks", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Kipf", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Welling", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.02907" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas N Kipf and Max Welling. 2016. Semi- supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Talk-Summ: A dataset and scalable annotation method for scientific paper summarization based on conference talks", |
| "authors": [ |
| { |
| "first": "Guy", |
| "middle": [], |
| "last": "Lev", |
| "suffix": "" |
| }, |
| { |
| "first": "Michal", |
| "middle": [], |
| "last": "Shmueli-Scheuer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Herzig", |
| "suffix": "" |
| }, |
| { |
| "first": "Achiya", |
| "middle": [], |
| "last": "Jerbi", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Konopnicki", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2125--2131", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1204" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guy Lev, Michal Shmueli-Scheuer, Jonathan Herzig, Achiya Jerbi, and David Konopnicki. 2019. Talk- Summ: A dataset and scalable annotation method for scientific paper summarization based on confer- ence talks. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 2125-2131, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Automatic scientific document summarization", |
| "authors": [ |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Xie", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yafei", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Siya", |
| "middle": [], |
| "last": "Qi", |
| "suffix": "" |
| }, |
| { |
| "first": "Xingyuan", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the First Workshop on Scholarly Document Processing", |
| "volume": "2020", |
| "issue": "", |
| "pages": "225--234", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.sdp-1.25" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lei Li, Yang Xie, Wei Liu, Yinan Liu, Yafei Jiang, Siya Qi, and Xingyuan Li. 2020. CIST@CL-SciSumm 2020, LongSumm 2020: Automatic scientific doc- ument summarization. In Proceedings of the First Workshop on Scholarly Document Processing, pages 225-234, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "ROUGE: A package for automatic evaluation of summaries", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Text Summarization Branches Out", |
| "volume": "", |
| "issue": "", |
| "pages": "74--81", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin. 2004. ROUGE: A package for auto- matic evaluation of summaries. In Text Summariza- tion Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Roberta: A robustly optimized bert pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "The automatic creation of literature abstracts", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [ |
| "P" |
| ], |
| "last": "Luhn", |
| "suffix": "" |
| } |
| ], |
| "year": 1958, |
| "venue": "IBM Journal of Research and Development", |
| "volume": "2", |
| "issue": "2", |
| "pages": "159--165", |
| "other_ids": { |
| "DOI": [ |
| "10.1147/rd.22.0159" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. P. Luhn. 1958. The automatic creation of literature abstracts. IBM Journal of Research and Develop- ment, 2(2):159-165.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "TextRank: Bringing order into text", |
| "authors": [ |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Tarau", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "404--411", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rada Mihalcea and Paul Tarau. 2004. TextRank: Bringing order into text. In Proceedings of the 2004 Conference on Empirical Methods in Natural Lan- guage Processing, pages 404-411, Barcelona, Spain. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "26", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in Neural Information Processing Systems, volume 26. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. Glove: Global vectors for word rep- resentation. In Empirical Methods in Natural Lan- guage Processing (EMNLP), pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Exploring the limits of transfer learning with a unified text-totext transformer", |
| "authors": [ |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Raffel", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharan", |
| "middle": [], |
| "last": "Narang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Matena", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanqi", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "J" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "21", |
| "issue": "140", |
| "pages": "1--67", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Kather- ine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to- text transformer. Journal of Machine Learning Re- search, 21(140):1-67.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Sentencerank -a graph based approach to summarize text", |
| "authors": [ |
| { |
| "first": "Animesh", |
| "middle": [], |
| "last": "Ramesh", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Srinivasa", |
| "suffix": "" |
| }, |
| { |
| "first": "Pramod", |
| "middle": [ |
| "N" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "5th International Conference on the Applications of Digital Information and Web Technologies, ICADIWT 2014", |
| "volume": "", |
| "issue": "", |
| "pages": "177--182", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICADIWT.2014.6814680" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Animesh Ramesh, K. Srinivasa, and Pramod .N. 2014. Sentencerank -a graph based approach to summa- rize text. In 5th International Conference on the Ap- plications of Digital Information and Web Technolo- gies, ICADIWT 2014, pages 177-182.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Sentencebert: Sentence embeddings using siamese bertnetworks", |
| "authors": [ |
| { |
| "first": "Nils", |
| "middle": [], |
| "last": "Reimers", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- bert: Sentence embeddings using siamese bert- networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Automatic analysis, theme generation, and summarization of machine-readable texts", |
| "authors": [ |
| { |
| "first": "Gerard", |
| "middle": [], |
| "last": "Salton", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Allan", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Buckley", |
| "suffix": "" |
| }, |
| { |
| "first": "Amit", |
| "middle": [], |
| "last": "Singhal", |
| "suffix": "" |
| } |
| ], |
| "year": 1994, |
| "venue": "Science", |
| "volume": "264", |
| "issue": "5164", |
| "pages": "1421--1426", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gerard Salton, James Allan, Chris Buckley, and Amit Singhal. 1994. Automatic analysis, theme genera- tion, and summarization of machine-readable texts. Science, 264(5164):1421-1426.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Automatic text structuring and summarization", |
| "authors": [ |
| { |
| "first": "Gerard", |
| "middle": [], |
| "last": "Salton", |
| "suffix": "" |
| }, |
| { |
| "first": "Amit", |
| "middle": [], |
| "last": "Singhal", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Mitra", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Buckley", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Methods and Tools for the Automatic Construction of Hypertext", |
| "volume": "33", |
| "issue": "", |
| "pages": "193--207", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/S0306-4573(96)00062-3" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gerard Salton, Amit Singhal, Mandar Mitra, and Chris Buckley. 1997. Automatic text structuring and sum- marization. Information Processing & Management, 33(2):193-207. Methods and Tools for the Auto- matic Construction of Hypertext.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, volume 30. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Graph Attention Networks. International Conference on Learning Representations", |
| "authors": [ |
| { |
| "first": "Petar", |
| "middle": [], |
| "last": "Veli\u010dkovi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillem", |
| "middle": [], |
| "last": "Cucurull", |
| "suffix": "" |
| }, |
| { |
| "first": "Arantxa", |
| "middle": [], |
| "last": "Casanova", |
| "suffix": "" |
| }, |
| { |
| "first": "Adriana", |
| "middle": [], |
| "last": "Romero", |
| "suffix": "" |
| }, |
| { |
| "first": "Pietro", |
| "middle": [], |
| "last": "Li\u00f2", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Petar Veli\u010dkovi\u0107, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Li\u00f2, and Yoshua Bengio. 2018. Graph Attention Networks. International Conference on Learning Representations. Accepted as poster.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 EMNLP Workshop Black-boxNLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "353--355", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-5446" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Fe- lix Hill, Omer Levy, and Samuel Bowman. 2018. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In Pro- ceedings of the 2018 EMNLP Workshop Black- boxNLP: Analyzing and Interpreting Neural Net- works for NLP, pages 353-355, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Paragraph-, word-, and coherence-based approaches to sentence ranking: A comparison of algorithm and human performance", |
| "authors": [ |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Gibson", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the 42nd Annual Meeting of the Association for Computational Linguistics (ACL-04)", |
| "volume": "", |
| "issue": "", |
| "pages": "383--390", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1218955.1219004" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Florian Wolf and Edward Gibson. 2004. Paragraph-, word-, and coherence-based approaches to sentence ranking: A comparison of algorithm and human per- formance. In Proceedings of the 42nd Annual Meet- ing of the Association for Computational Linguistics (ACL-04), pages 383-390, Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Discourse-aware neural extractive model for text summarization", |
| "authors": [ |
| { |
| "first": "Jiacheng", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhe", |
| "middle": [], |
| "last": "Gan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingjing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiacheng Xu, Zhe Gan, Yu Cheng, and Jingjing Liu. 2019. Discourse-aware neural extractive model for text summarization. CoRR, abs/1910.14142.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Pegasus: Pre-training with extracted gap-sentences for abstractive summarization", |
| "authors": [ |
| { |
| "first": "Jingqing", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yao", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Saleh", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "J" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1912.08777" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jingqing Zhang, Yao Zhao, Mohammad Saleh, and Pe- ter J. Liu. 2019. Pegasus: Pre-training with ex- tracted gap-sentences for abstractive summarization. arXiv preprint arXiv:1912.08777.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Summpip: Unsupervised multidocument summarization with sentence graph compression", |
| "authors": [ |
| { |
| "first": "Jinming", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Longxiang", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "Lan", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "He", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "He", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Gholamreza", |
| "middle": [], |
| "last": "Haffari", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '20", |
| "volume": "", |
| "issue": "", |
| "pages": "1949--1952", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3397271.3401327" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinming Zhao, Ming Liu, Longxiang Gao, Yuan Jin, Lan Du, He Zhao, He Zhang, and Gholamreza Haffari. 2020. Summpip: Unsupervised multi- document summarization with sentence graph com- pression. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Devel- opment in Information Retrieval, SIGIR '20, page 1949-1952, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| } |
| }, |
| "ref_entries": {} |
| } |
| } |