ACL-OCL / Base_JSON /prefixF /json /fnp /2020.fnp-1.34.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2020",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T10:23:43.553382Z"
},
"title": "Hierarchical summarization of financial reports with RUNNER",
"authors": [
{
"first": "Marina",
"middle": [],
"last": "Litvak",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Shamoon College of Engineering (SCE) Beer-Sheva",
"location": {
"country": "Israel"
}
},
"email": "marinal@ac.sce.ac.il"
},
{
"first": "Natalia",
"middle": [],
"last": "Vanetik",
"suffix": "",
"affiliation": {},
"email": "natalyav@sce.ac.il"
},
{
"first": "Tzvi",
"middle": [],
"last": "Puchinsky",
"suffix": "",
"affiliation": {},
"email": "tzvipu@ac.sce.ac.il"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "With the constantly growing amount of information, the need arises to automatically summarize this written information. One of the challenges in the summary is that it's difficult to generalize. For example, summarizing a news article is very different from summarizing a financial earnings report. This paper reports an approach for summarizing financial texts, which are different from the documents from other domains at least in three parameters: length, structure, and format. Our approach considers these parameters, it is adapted to hierarchical structure of sections, document length, and special \"language\". The approach builds a hierarchical summary, visualized as a tree with summaries under different discourse topics. The approach was evaluated using extrinsic and intrinsic automated evaluations, which are reported in this paper. As all participants of the Financial Narrative Summarisation (FNS 2020) shared task, we used FNS2020 dataset for evaluations.",
"pdf_parse": {
"paper_id": "2020",
"_pdf_hash": "",
"abstract": [
{
"text": "With the constantly growing amount of information, the need arises to automatically summarize this written information. One of the challenges in the summary is that it's difficult to generalize. For example, summarizing a news article is very different from summarizing a financial earnings report. This paper reports an approach for summarizing financial texts, which are different from the documents from other domains at least in three parameters: length, structure, and format. Our approach considers these parameters, it is adapted to hierarchical structure of sections, document length, and special \"language\". The approach builds a hierarchical summary, visualized as a tree with summaries under different discourse topics. The approach was evaluated using extrinsic and intrinsic automated evaluations, which are reported in this paper. As all participants of the Financial Narrative Summarisation (FNS 2020) shared task, we used FNS2020 dataset for evaluations.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "The area of text summarization exists for several decades, since the first work of Luhn (Luhn, 1958) . Since then, the summarization approaches evolved from simple and straightforward extractive unsupervised approaches to abstractive supervised methods, using deep learning language models (Liu, 2019) . However, the most advanced seq2seq models (transformers) are very limited in input size and, therefore, are inapplicable to long texts. Also, only few of state-of-the-art summarizers consider hierarchical structure of the input documents (Yang and Wang, 2008; Zhang et al., 2019) , their key concepts (Ouyang et al., 2009) or topics (Wang et al., 2013; Akhtar, 2017) and build a hierarchical summary (Christensen et al., 2014; Akhtar et al., 2019) . Usually, hierarchical summary is built per document collection. The top level of hierarchy provides a general overview and users can navigate the hierarchy to drill down for more details on topics of interest.",
"cite_spans": [
{
"start": 88,
"end": 100,
"text": "(Luhn, 1958)",
"ref_id": "BIBREF20"
},
{
"start": 290,
"end": 301,
"text": "(Liu, 2019)",
"ref_id": "BIBREF18"
},
{
"start": 552,
"end": 563,
"text": "Wang, 2008;",
"ref_id": "BIBREF26"
},
{
"start": 564,
"end": 583,
"text": "Zhang et al., 2019)",
"ref_id": "BIBREF28"
},
{
"start": 605,
"end": 626,
"text": "(Ouyang et al., 2009)",
"ref_id": "BIBREF21"
},
{
"start": 637,
"end": 656,
"text": "(Wang et al., 2013;",
"ref_id": "BIBREF24"
},
{
"start": 657,
"end": 670,
"text": "Akhtar, 2017)",
"ref_id": "BIBREF1"
},
{
"start": 704,
"end": 730,
"text": "(Christensen et al., 2014;",
"ref_id": "BIBREF4"
},
{
"start": 731,
"end": 751,
"text": "Akhtar et al., 2019)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "There is a growing interest in the application of automatic and computer-aided approaches for extracting, summarising, and analysing both qualitative and quantitative financial data, as a series of FNP and related workshops (El-Haj, 2019; El-Haj et al., 2018) recently demonstrates. However, summarization of documents in financial domain is usually limited to summarization of financial news (Filippova et al., 2009; Yang and Wang, 2003; de Oliveira et al., 2002 ; Baralis et al., 2016; Zhang et al., 2018) which are not very different from the general news in length and format. Only few attempts were made to summarize financial reports (Isonuma et al., 2017) , which are different from the news articles in at least four parameters: length, structure, format, and lexicon.",
"cite_spans": [
{
"start": 224,
"end": 238,
"text": "(El-Haj, 2019;",
"ref_id": "BIBREF10"
},
{
"start": 239,
"end": 259,
"text": "El-Haj et al., 2018)",
"ref_id": "BIBREF8"
},
{
"start": 393,
"end": 417,
"text": "(Filippova et al., 2009;",
"ref_id": "BIBREF11"
},
{
"start": 418,
"end": 438,
"text": "Yang and Wang, 2003;",
"ref_id": "BIBREF25"
},
{
"start": 439,
"end": 463,
"text": "de Oliveira et al., 2002",
"ref_id": "BIBREF6"
},
{
"start": 466,
"end": 487,
"text": "Baralis et al., 2016;",
"ref_id": "BIBREF2"
},
{
"start": 488,
"end": 507,
"text": "Zhang et al., 2018)",
"ref_id": "BIBREF27"
},
{
"start": 640,
"end": 662,
"text": "(Isonuma et al., 2017)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "This paper reports an approach for hierarchical summarization of financial reports. Financial annual reports in the data of Financial Narrative Summarisation (FNS 2020) shared task 1 (El-Haj et al., 2020) are long, have many sections, and are written in \"financial\" language using many special terms, numerical data, and tables. Our system for hieRarchical sUmmarization fiNaNcial rEpoRts (shortly RUNNER) considers discourse and topic hierarchical structure and builds a hierarchical view of the summarized report with interactive user interface. In contrast with the previous works on hierarchical summarization, our approach considers the internal hierarchical structure of a document and its topics instead mapping it to a global hierarchy of entire corpus.",
"cite_spans": [
{
"start": 183,
"end": 204,
"text": "(El-Haj et al., 2020)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Figure 1: RUNNER pipeline RUNNER utilizes two main methods: topic modeling (TM) and discourse parsing (DP). The pipeline of the proposed methodology is depicted in Figure 1 and includes the following steps:",
"cite_spans": [],
"ref_spans": [
{
"start": 164,
"end": 172,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "Text preprocessing, that includes text cleaning, sentence splitting and tokenization. We developed our own tool that cleaned text before segmenting it to sentences and tokens. Financial reports usually contain a lot of sections, figures, and tables. Because the text files in the FNS-2020 dataset were obtained by converting pdf files to plain texts, these texts contain a lot of \"noise\" left from broken tables and meta-data such as section and page numbers. We cleaned the noise by measuring the ratio between text and numbers and ratio between number of words and whitespaces. Lines with ratio less than 0.4 were removed. Then, regular expressions were applied to find and mark such entities as URL, phone number, date, time, email. Finally, non-Unicode characters were filtered out. Figure 2 demonstrates the example of text before and after preprocessing. Section segmentation, where section headers are identified and a document is segmented into sections. The section titles were extracted following the heuristic rules saying that (1) each title appears in a separate line, (2) does not end with period mark, and (3) contains only few (up to 5) words with (4) each word either starting with capital case letter or containing only upper case letters. The extracted candidates were then compared against the list of 13 manually edited titles 2 . The candidate that obtained Jaccard similarity above 0.4 to one of the titles from the list was extracted as a title. The text body between two consequent titles was marked as a section. Discourse parsing of each section. For discourse parsing we used the CODRA parser (Joty et al., 2015) . CODRA parser performs two-part process: (1) a discourse segmenter creates a segmentation analysis on the sentence level and EDU's for the discourse parsing process and (2) a discourse parser parses the text on sentence level and document level to identify relations between parts of sentences and sentences in the document. Figure 3 shows an example discourse tree. Leaf node stands for a sentence or a part of a sentence. The rhetorical analysis of the parser starts from a breaking a text into Elementary Discourse Units (EDUs). Because EDUs do not span across multiple sentences, this segmentation task finds EDUs inside the sentence boundaries. As a result, some sentences (actually, most, according to our observations) are split into EDUs. Every EDU is marked as a nucleus (an essence part) or a satellite (a complementary part of the related nucleus), based on the relation that they are connected to. Internal (relation) nodes represent different inter-sentence relations: elaboration, same-unit, etc. Topic modeling. For topic modeling we applied Latent Dirichlet Allocation (LDA) model (Blei et al., 2003) . It was applied on all files in the FNS-2020 dataset with predefined number of topics 3 . Topic-to-text assignment, where each sentence (or sentence part) represented by a leaf node of the discourse tree, is assigned to one of the topics obtained by LDA. We refer topic probabilities p(t|w) for all sentence S words w \u2208 S as their topic-related importance scores. Therefore, we extract a dominant topic (t \u2208 T ) for each sentence S, as a topic with the maximal normalized sum of topic probabilities for all sentence words w \u2208 S: max t\u2208T w\u2208S p(t|w)",
"cite_spans": [
{
"start": 1621,
"end": 1640,
"text": "(Joty et al., 2015)",
"ref_id": "BIBREF14"
},
{
"start": 2739,
"end": 2758,
"text": "(Blei et al., 2003)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [
{
"start": 787,
"end": 795,
"text": "Figure 2",
"ref_id": "FIGREF0"
},
{
"start": 1967,
"end": 1975,
"text": "Figure 3",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "w\u2208S 1 . Figure 4 shows an example of a topic-to-sentence assignment. Topic distribution smoothing. We noticed that after single text nodes (that stand for sentences or 2 Titles that appear in almost every report in FNS-2020 dataset, such as: 'chairman statement', 'chief executive officer CEO review', 'chief executive officer CEO report', 'governance statement', 'remuneration report', 'business review', 'financial review', 'operating review', 'highlights', 'auditors report', 'risk management', 'chairman governance introduction', 'corporate social responsibility CSR disclosures'. 3 We experimented with 4, 6, and 10 topics, and finally decided to keep 10 topics as best performing value. After reviewing word clusters representing topics, we found that they most probably represent key information from the different sections of the financial report sentence parts) are assigned to topics, we can get unexpected topic distribution where two parts of the same sentence or two adjacent sentences inside the same paragraph and/or belonging to the same discourse relation are assigned to different topics, and transition from one topic to another is not coherent. 4 We decided to smooth topic distribution by extrapolating one dominant topic on entire block of adjacent sentences and sentence parts, connected by a direct discourse relation. We denote nodes with at least one leaf node as \"simple\" (see Figure 5 ) and all leaves in its sub-tree are finally assigned to one dominant topic, so that a \"random\" noise is left out. The implement this approach as follows. We know that all leaf nodes are arranged in the natural sequential order of their texts from right-to-left (top-down) in a discourse tree. We assume that the important information usually comes first (important part of a sentence usually precedes its complementary part, and a sentence stating some fact usually precedes a sentence that elaborates more about this fact) and, therefore, upper right nodes and nucleuses should propagate their topics on their siblings. According to this assumption and our empirical observations on each parameter's influence, the final impact factor N I of node n is calculated as follows.",
"cite_spans": [
{
"start": 585,
"end": 586,
"text": "3",
"ref_id": null
},
{
"start": 1165,
"end": 1166,
"text": "4",
"ref_id": null
}
],
"ref_spans": [
{
"start": 8,
"end": 16,
"text": "Figure 4",
"ref_id": "FIGREF2"
},
{
"start": 1404,
"end": 1412,
"text": "Figure 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "N I(n) = 3 i=1 w i \u00d7 f i (n), where: \u2022 f 1 is a relative depth feature rd(n) = h(t)+1\u2212d(n) h(t)+1",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": ", h(t) is a tree height, d(n) is n's depth",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "\u2022 f 2 is a position feature pos(n) = 1, if n is on right 0, else",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "\u2022 f 3 is a discourse label feature l(n) = 1, if n is nucleus 0, else",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "\u2022 w 1 = 0.5, w 2 = 0.3, and w 3 = 0.2",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "Then, the final dominant topic for a \"simple\" sub-tree is calculated as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "max t\u2208T { n\u2208leaves N I(n) * score t,n }.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "After topic-to-sentence assignment (at previous stage), every leaf node has non-zero value for only one dominant topic, other topics have score t,n = 0.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "We also experimented with the second strategy of assignment topics to sentences, where we do not assign a dominant topic to each leaf but operate their vectors of topic weights",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "v n = (w t 1 , w t 2 , . . . , w t |T | ),",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "where w t i is a normalized sum of topic t i probabilities for all sentence words, as calculated in previous stage. The dominant topic is assigned to entire sub-tree (under the \"simple\" node) by summing the topic distribution vectors multiplied by the importance score of their nodes and choosing a topic with a maximal score. Formally, the dominant topic is assigned as follows: max t\u2208T n\u2208leaves v n \u00d7 N I(n).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "Figure 5: Example of \"simple\" sub-trees with initial LDA-based sentence-over-topics distribution. Based on this distribution and discourse structure, one dominant topic is finally calculated for each \"simple\" sub-tree. Given the initial topic assignment to leaves, the dominant topic of the first (upper) sub-tree is finally assigned to 9.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "Summarization of entire report (regardless visualization) and of each section (for visualization needs) was performed by two different greedy strategies:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "(1) All topics t are ranked by their importance T I(t) (normalized sum of their probabilities for all document/section words). Then, summaries are created by extraction of nucleuses from each topic, in the topics' importance order, until the maximum length limit is reached. As for entire report a summary should not exceed 1000 words according to the shared task instructions, we limit a section summary to 100 words.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "(2) All nucleuses are ranked by their importance. An importance score of nucleus m, represented by a node n in the discourse tree, is calculated as N I(n) \u00d7 T I(dt), where dt is a dominant topic assigned to m. Then, in a greedy manner, summaries are created by extraction of nucleuses in their importance order, until the maximum length limit is reached.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "We report the results for both strategies in the Experiments section. Hierarchical visualization. At this stage RUNNER creates an interactive html file with the data from all the stages for a user to browse. The file contains the following sections: (1) original text;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "(2) processed XML text after cleaning and section segmentation;",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "(3) discourse trees for all the sections; (4) sentences (nodes) with assigned topics after smoothing; (5) the final hierarchical tree with the section summaries, and (6) a general report summary. For visualization and interactive user's navigation, the following tree structure of a document is built and present to a user: root represents an entire document and points to its sections, each section is split to major topics inside this section after smoothing, and each topic points to a summary of this particular section focused on the chosen topic. Visualization is performed in interactive manner, upon a user's request. Figure 6 shows an example of such a tree. Demo video 5 demonstrates all interactive options provided by the system. Figure 6 : Visualization summary tree.",
"cite_spans": [],
"ref_spans": [
{
"start": 626,
"end": 634,
"text": "Figure 6",
"ref_id": null
},
{
"start": 742,
"end": 750,
"text": "Figure 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "Hierarchical Summarization with RUNNER",
"sec_num": "2"
},
{
"text": "The Financial Narrative Summarisation (FNS 2020) shared task aims to demonstrate the value and challenges of applying automatic text summarisation to financial text written in English, usually referred to as financial narrative disclosures. The task dataset has been extracted from UK annual reports published in PDF file format. UK annual reports are lengthy documents with around 80 pages on average, some annual reports could span over more than 250 pages, while the summary length should not exceed 1000 words. The training set includes 3,000 annual reports, with 3-4 human-generated summaries as gold standard. For the evaluation process the test set of 500 files were provided. To address the time limitations and processing long files 7 the project reduced the length of the original files (to 15000 characters) to be able to process in feasible time limit (20 minutes per file at most). Table 1 contains the dataset statistics. Please note that our method is unsupervised and does not require a training set. Therefore, we calculated average statistics only for the documents of the test set.",
"cite_spans": [],
"ref_spans": [
{
"start": 895,
"end": 902,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Dataset",
"sec_num": "3.1"
},
{
"text": "# documents avg words avg sentences avg sections avg words/section 500 63583.7 40 5.3 13452.6 Table 1 : FNS 2020 dataset statstics. Test set.",
"cite_spans": [],
"ref_spans": [
{
"start": 94,
"end": 101,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Dataset",
"sec_num": "3.1"
},
{
"text": "We evaluate four variations of our approach (denoted by RUNNER ij ), which are combinations of two strategies for node importance calculation (i \u2208 {1, 2}) and two strategies of summarizaion (j \u2208 {1, 2}), and compare their results with two baseline methods-MUSE (Litvak et al., 2010) and POLY (Litvak and Vanetik, 2013) . MUSE is a supervised approach based on a genetic algorithm, it was trained on 30 randomly selected gold standard summaries provided with FNS-2020 dataset. POLY is unsupervised approach based on linear programming, it was applied with Maximal Weighted Term Sum (OBJ1 in (Litvak and Vanetik, 2013) ) objective function.",
"cite_spans": [
{
"start": 261,
"end": 282,
"text": "(Litvak et al., 2010)",
"ref_id": "BIBREF17"
},
{
"start": 292,
"end": 318,
"text": "(Litvak and Vanetik, 2013)",
"ref_id": "BIBREF16"
},
{
"start": 590,
"end": 616,
"text": "(Litvak and Vanetik, 2013)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluated methods",
"sec_num": "3.2"
},
{
"text": "We decided to utilize clustering as an evaluated task and see how similar the clustering of summaries is to that of the original reports. For this purpose, K-means (Lloyd, 1982) was applied on original reports and then on their summaries. Different clustering quality metrics were calculated on both clustering results and compared. As preprocessing before K-Means application, we performed corpus vectorization with tf-idf and Principal component analysis (Pearson, 1901) to reduce dimensionality. Number of clusters was set to three. In order to compare between clustering results we used the following metrics: Davies-Bouldin index (DBI) (Davies and Bouldin, 1979) , Dunn index (DI) (Dunn, 1974) , Silhouette coefficient (SC) (Rousseeuw, 1987), inter-cluster distance (inter-CD-sum of the square distance between each cluster centroid), intra cluster distance (intra-CD-sum of the square distance from the items of each cluster to its centroid), maximum radius (MR-largest distance from an instance to its cluster centroid), and average radius (AR-sum of the largest distances in each cluster divided by the number of clusters).",
"cite_spans": [
{
"start": 164,
"end": 177,
"text": "(Lloyd, 1982)",
"ref_id": "BIBREF19"
},
{
"start": 457,
"end": 472,
"text": "(Pearson, 1901)",
"ref_id": "BIBREF22"
},
{
"start": 641,
"end": 667,
"text": "(Davies and Bouldin, 1979)",
"ref_id": "BIBREF5"
},
{
"start": 686,
"end": 698,
"text": "(Dunn, 1974)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Extrinsic evaluation",
"sec_num": "3.3"
},
{
"text": "Since algorithms that produce clusters with high intra-cluster similarity and low inter-cluster similarity will have a low DBI, the clustering algorithm that produces a collection of clusters with the smallest DBI is considered the best algorithm. Dunn index is defined as the ratio between the minimal inter-cluster distance to maximal intra-cluster distance. Therefore, algorithms that produce clusters with high DI are more desirable. Silhouette coefficient contrasts the average distance to elements in the same cluster with the average distance to elements in other clusters. Objects with a high SC value are considered well clustered, objects with a low value may be outliers. We also measured Precision, Recall, and Purity for all clusters of summaries, assuming that clusters of reports are ground truth. Table 2 shows the comparative results. The best scores are marked in bold and the second best are marked by grey background. It can be seen that clustering of reports gains better scores than clustering of summaries in most metrics. However, smaller intra-cluster similarity and radius mean that clusters of summaries are smaller and more distant from each other. Also, as MUSE SC score shows, the summaries clusters may contain less outsiders. As it can be seen, RUNNER produces summaries with second best scores for sever (out of ten) metrics, meaning that it succeeds to keep the most representative information and filter out the redundant one in its summaries. The most important, that despite close results, clustering of summaries took much less time (16 times faster) than clustering of entire reports-2 versus 33 seconds for entire test set of 500 documents. ",
"cite_spans": [],
"ref_spans": [
{
"start": 813,
"end": 820,
"text": "Table 2",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Extrinsic evaluation",
"sec_num": "3.3"
},
{
"text": "Intrinsic evaluation was performed using ROUGE metrics (Lin, 2004) which work by comparing an automatically produced summary against a set of reference summaries (typically human-produced). We applied three ROUGE metrics-ROUGE-1, ROUGE-2, and ROUGE-L. Table 3 show the results, with recall, precision, and F-measure for each metric. It can be seen that RUNNER performs better than POLY (both are unsupervised), and even outperforms MUSE (which is supervised) in one metric (ROUGE-L, Precision), meaning that its summaries are less \"scattered\" and more coherent (and therefore probably more readable) then other summaries. The comparative results with other systems participating in the FNS 2020 shared task can be seen in Appendix, Tables 4-7. 8",
"cite_spans": [
{
"start": 55,
"end": 66,
"text": "(Lin, 2004)",
"ref_id": "BIBREF15"
}
],
"ref_spans": [
{
"start": 252,
"end": 259,
"text": "Table 3",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "Intrinsic evaluation",
"sec_num": "3.4"
},
{
"text": "For LDA, we used the Python gensim4 package. Corpus tf-idf vectorization and K-means clustering were performed by the Python sklearn package. For running Rouge, we used ROUGE 2.05 java pack-system R-1 R R-1 P R-1 F R-2 R R-2 P R-2 F R-L R R-L P R- age (Ganesan, 2018) . Our approach was implemented in Python and run on Intel Pentium Gold G5400 with 16GB memory server with 40GB swap file configured.",
"cite_spans": [
{
"start": 252,
"end": 267,
"text": "(Ganesan, 2018)",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Tools and runtime environment",
"sec_num": "3.5"
},
{
"text": "This paper describes a new method for hierarchical summarization of financial reports, based on integrating the discourse structure and topic modeling. In future, we intend to apply this method and its extension to educational materials, which also have highly hierarchical structure and an evolving flow of topics in a discourse. Hierarchical summarization can help to organize those materials in a hierarchical structure and provide users with interactive navigation to the topics of interest. RUNNER's source code is available 9 and can be run using the provided instructions 10 .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions and Future Work",
"sec_num": "4"
},
{
"text": "We assume that in a natural topic distribution, that is usually observed in general domains, topics must flow from one paragraph (or sections or cluster of sentences) to another, without mix of topics inside clusters.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "ExperimentsWe performed two types of evaluation for our summarization method 6 : extrinsic and intrinsic. Extrinsic evaluation can help judge the quality of the summaries based on how they affect the completion of specific task, while intrinsic evaluation estimates the quality of the generated output directly, usually by comparing it to the human-generated content.5 https://drive.google.com/file/d/14qMRUhZIwaVoSltaLPSiH6NZx13M_9ue/view 6 Only general summaries were evaluated, due to limitations of gold standard summaries provided with a test set.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "mostly, due to a very time-consuming discourse parsing",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Only one variation of RUNNER was submitted to the task evaluations, which is the closest to RUNNER11. However, since then RUNNER's code was significantly updated, therefore the scores are not the same.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "We would like to thank Alla Kitaeva for supporting this project in a scope of the final undergraduate project.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgements",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Hierarchical summarization of text documents using topic modeling and formal concept analysis",
"authors": [
{
"first": "Nadeem",
"middle": [],
"last": "Akhtar",
"suffix": ""
},
{
"first": "Hira",
"middle": [],
"last": "Javed",
"suffix": ""
},
{
"first": "Tameem",
"middle": [],
"last": "Ahmad",
"suffix": ""
}
],
"year": 2019,
"venue": "Data Management, Analytics and Innovation",
"volume": "",
"issue": "",
"pages": "21--33",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Nadeem Akhtar, Hira Javed, and Tameem Ahmad. 2019. Hierarchical summarization of text documents using topic modeling and formal concept analysis. In Data Management, Analytics and Innovation, pages 21-33. Springer.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Hierarchical summarization of news tweets with twitter-lda",
"authors": [
{
"first": "Nadeem",
"middle": [],
"last": "Akhtar",
"suffix": ""
}
],
"year": 2017,
"venue": "Applications of Soft Computing for the Web",
"volume": "",
"issue": "",
"pages": "83--98",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Nadeem Akhtar. 2017. Hierarchical summarization of news tweets with twitter-lda. In Applications of Soft Computing for the Web, pages 83-98. Springer.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Supporting stock trading in multiple foreign markets: a multilingual news summarization approach",
"authors": [
{
"first": "Elena",
"middle": [],
"last": "Baralis",
"suffix": ""
},
{
"first": "Luca",
"middle": [],
"last": "Cagliero",
"suffix": ""
},
{
"first": "Tania",
"middle": [],
"last": "Cerquitelli",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the Second International Workshop on Data Science for Macro-Modeling",
"volume": "",
"issue": "",
"pages": "1--6",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Elena Baralis, Luca Cagliero, and Tania Cerquitelli. 2016. Supporting stock trading in multiple foreign markets: a multilingual news summarization approach. In Proceedings of the Second International Workshop on Data Science for Macro-Modeling, pages 1-6.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Latent dirichlet allocation",
"authors": [
{
"first": "M",
"middle": [],
"last": "David",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Blei",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Andrew",
"suffix": ""
},
{
"first": "Michael I Jordan",
"middle": [],
"last": "Ng",
"suffix": ""
}
],
"year": 2003,
"venue": "Journal of machine Learning research",
"volume": "3",
"issue": "",
"pages": "993--1022",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "David M Blei, Andrew Y Ng, and Michael I Jordan. 2003. Latent dirichlet allocation. Journal of machine Learning research, 3(Jan):993-1022.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Hierarchical summarization: Scaling up multidocument summarization",
"authors": [
{
"first": "Janara",
"middle": [],
"last": "Christensen",
"suffix": ""
},
{
"first": "Stephen",
"middle": [],
"last": "Soderland",
"suffix": ""
},
{
"first": "Gagan",
"middle": [],
"last": "Bansal",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 52nd annual meeting of the association for computational linguistics",
"volume": "1",
"issue": "",
"pages": "902--912",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Janara Christensen, Stephen Soderland, Gagan Bansal, et al. 2014. Hierarchical summarization: Scaling up multi- document summarization. In Proceedings of the 52nd annual meeting of the association for computational linguistics (volume 1: Long papers), pages 902-912.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "A cluster separation measure",
"authors": [
{
"first": "L",
"middle": [],
"last": "David",
"suffix": ""
},
{
"first": "Donald",
"middle": [
"W"
],
"last": "Davies",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Bouldin",
"suffix": ""
}
],
"year": 1979,
"venue": "IEEE transactions on pattern analysis and machine intelligence",
"volume": "",
"issue": "",
"pages": "224--227",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "David L Davies and Donald W Bouldin. 1979. A cluster separation measure. IEEE transactions on pattern analysis and machine intelligence, (2):224-227.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "A financial news summarization system based on lexical cohesion",
"authors": [
{
"first": "Paulo Cesar Fernandes De",
"middle": [],
"last": "Oliveira",
"suffix": ""
},
{
"first": "Khurshid",
"middle": [],
"last": "Ahmad",
"suffix": ""
},
{
"first": "Lee",
"middle": [],
"last": "Gillam",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of the International Conference on Terminology and Knowledge Engineering",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Paulo Cesar Fernandes de Oliveira, Khurshid Ahmad, and Lee Gillam. 2002. A financial news summarization sys- tem based on lexical cohesion. In Proceedings of the International Conference on Terminology and Knowledge Engineering, Nancy, France.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Well-separated clusters and optimal fuzzy partitions",
"authors": [
{
"first": "C",
"middle": [],
"last": "Joseph",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Dunn",
"suffix": ""
}
],
"year": 1974,
"venue": "Journal of cybernetics",
"volume": "4",
"issue": "1",
"pages": "95--104",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Joseph C Dunn. 1974. Well-separated clusters and optimal fuzzy partitions. Journal of cybernetics, 4(1):95-104.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "The first financial narrative processing workshop (fnp 2018)",
"authors": [
{
"first": "Mahmoud",
"middle": [],
"last": "El-Haj",
"suffix": ""
},
{
"first": "Paul",
"middle": [],
"last": "Rayson",
"suffix": ""
},
{
"first": "Andrew",
"middle": [],
"last": "Moore",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the LREC 2018 Workshop",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mahmoud El-Haj, Paul Rayson, and Andrew Moore. 2018. The first financial narrative processing workshop (fnp 2018). In Proceedings of the LREC 2018 Workshop.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "The Financial Narrative Summarisation Shared Task (FNS 2020)",
"authors": [],
"year": 2020,
"venue": "The 1st Joint Workshop on Financial Narrative Processing and MultiLing Financial Summarisation (FNP-FNS 2020",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mahmoud El-Haj, Ahmed AbuRa'ed, Nikiforos Pittaras, and George Giannakopoulos. 2020. The Financial Nar- rative Summarisation Shared Task (FNS 2020). In The 1st Joint Workshop on Financial Narrative Processing and MultiLing Financial Summarisation (FNP-FNS 2020, Barcelona, Spain.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Multiling 2019: Financial narrative summarisation",
"authors": [
{
"first": "Mahmoud",
"middle": [],
"last": "El-Haj",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the Workshop MultiLing 2019: Summarization Across Languages, Genres and Sources",
"volume": "",
"issue": "",
"pages": "6--10",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mahmoud El-Haj. 2019. Multiling 2019: Financial narrative summarisation. In Proceedings of the Workshop MultiLing 2019: Summarization Across Languages, Genres and Sources, pages 6-10.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Company-oriented extractive summarization of financial news",
"authors": [
{
"first": "Katja",
"middle": [],
"last": "Filippova",
"suffix": ""
},
{
"first": "Mihai",
"middle": [],
"last": "Surdeanu",
"suffix": ""
},
{
"first": "Massimiliano",
"middle": [],
"last": "Ciaramita",
"suffix": ""
},
{
"first": "Hugo",
"middle": [],
"last": "Zaragoza",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the 12th Conference of the European Chapter",
"volume": "",
"issue": "",
"pages": "246--254",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Katja Filippova, Mihai Surdeanu, Massimiliano Ciaramita, and Hugo Zaragoza. 2009. Company-oriented extrac- tive summarization of financial news. In Proceedings of the 12th Conference of the European Chapter of the ACL (EACL 2009), pages 246-254.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Rouge 2.0: Updated and improved measures for evaluation of summarization tasks",
"authors": [
{
"first": "Kavita",
"middle": [],
"last": "Ganesan",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1803.01937"
]
},
"num": null,
"urls": [],
"raw_text": "Kavita Ganesan. 2018. Rouge 2.0: Updated and improved measures for evaluation of summarization tasks. arXiv preprint arXiv:1803.01937.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Extractive summarization using multi-task learning with document classification",
"authors": [
{
"first": "Masaru",
"middle": [],
"last": "Isonuma",
"suffix": ""
},
{
"first": "Toru",
"middle": [],
"last": "Fujino",
"suffix": ""
},
{
"first": "Junichiro",
"middle": [],
"last": "Mori",
"suffix": ""
},
{
"first": "Yutaka",
"middle": [],
"last": "Matsuo",
"suffix": ""
},
{
"first": "Ichiro",
"middle": [],
"last": "Sakata",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "2101--2110",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Masaru Isonuma, Toru Fujino, Junichiro Mori, Yutaka Matsuo, and Ichiro Sakata. 2017. Extractive summarization using multi-task learning with document classification. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2101-2110.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Codra: A novel discriminative framework for rhetorical analysis",
"authors": [
{
"first": "Shafiq",
"middle": [],
"last": "Joty",
"suffix": ""
},
{
"first": "Giuseppe",
"middle": [],
"last": "Carenini",
"suffix": ""
},
{
"first": "Raymond T",
"middle": [],
"last": "Ng",
"suffix": ""
}
],
"year": 2015,
"venue": "Computational Linguistics",
"volume": "41",
"issue": "3",
"pages": "385--435",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Shafiq Joty, Giuseppe Carenini, and Raymond T Ng. 2015. Codra: A novel discriminative framework for rhetori- cal analysis. Computational Linguistics, 41(3):385-435.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Rouge: A package for automatic evaluation of summaries",
"authors": [
{
"first": "Chin-Yew",
"middle": [],
"last": "Lin",
"suffix": ""
}
],
"year": 2004,
"venue": "Text summarization branches out",
"volume": "",
"issue": "",
"pages": "74--81",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Mining the gaps: Towards polynomial summarization",
"authors": [
{
"first": "Marina",
"middle": [],
"last": "Litvak",
"suffix": ""
},
{
"first": "Natalia",
"middle": [],
"last": "Vanetik",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the Sixth International Joint Conference on Natural Language Processing",
"volume": "",
"issue": "",
"pages": "655--660",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marina Litvak and Natalia Vanetik. 2013. Mining the gaps: Towards polynomial summarization. In Proceedings of the Sixth International Joint Conference on Natural Language Processing, pages 655-660.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "A new approach to improving multilingual summarization using a genetic algorithm",
"authors": [
{
"first": "Marina",
"middle": [],
"last": "Litvak",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Last",
"suffix": ""
},
{
"first": "Menahem",
"middle": [],
"last": "Friedman",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 48th annual meeting of the association for computational linguistics",
"volume": "",
"issue": "",
"pages": "927--936",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marina Litvak, Mark Last, and Menahem Friedman. 2010. A new approach to improving multilingual summariza- tion using a genetic algorithm. In Proceedings of the 48th annual meeting of the association for computational linguistics, pages 927-936.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Fine-tune bert for extractive summarization",
"authors": [
{
"first": "Yang",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1903.10318"
]
},
"num": null,
"urls": [],
"raw_text": "Yang Liu. 2019. Fine-tune bert for extractive summarization. arXiv preprint arXiv:1903.10318.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Least squares quantization in PCM",
"authors": [
{
"first": "Stuart",
"middle": [],
"last": "Lloyd",
"suffix": ""
}
],
"year": 1982,
"venue": "IEEE transactions on information theory",
"volume": "28",
"issue": "2",
"pages": "129--137",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stuart Lloyd. 1982. Least squares quantization in PCM. IEEE transactions on information theory, 28(2):129-137.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "The automatic creation of literature abstracts",
"authors": [
{
"first": "Hans",
"middle": [],
"last": "Peter Luhn",
"suffix": ""
}
],
"year": 1958,
"venue": "IBM Journal of research and development",
"volume": "2",
"issue": "2",
"pages": "159--165",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hans Peter Luhn. 1958. The automatic creation of literature abstracts. IBM Journal of research and development, 2(2):159-165.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "An integrated multi-document summarization approach based on word hierarchical representation",
"authors": [
{
"first": "You",
"middle": [],
"last": "Ouyang",
"suffix": ""
},
{
"first": "Wenjie",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Qin",
"middle": [],
"last": "Lu",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the ACL-IJCNLP 2009 Conference Short Papers",
"volume": "",
"issue": "",
"pages": "113--116",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "You Ouyang, Wenjie Li, and Qin Lu. 2009. An integrated multi-document summarization approach based on word hierarchical representation. In Proceedings of the ACL-IJCNLP 2009 Conference Short Papers, pages 113-116.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Liii. on lines and planes of closest fit to systems of points in space. The London, Edinburgh, and Dublin Philosophical Magazine",
"authors": [
{
"first": "Karl",
"middle": [],
"last": "Pearson",
"suffix": ""
}
],
"year": 1901,
"venue": "Journal of Science",
"volume": "2",
"issue": "11",
"pages": "559--572",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Karl Pearson. 1901. Liii. on lines and planes of closest fit to systems of points in space. The London, Edinburgh, and Dublin Philosophical Magazine and Journal of Science, 2(11):559-572.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Silhouettes: a graphical aid to the interpretation and validation of cluster analysis",
"authors": [
{
"first": "J",
"middle": [],
"last": "Peter",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Rousseeuw",
"suffix": ""
}
],
"year": 1987,
"venue": "Journal of computational and applied mathematics",
"volume": "20",
"issue": "",
"pages": "53--65",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Peter J Rousseeuw. 1987. Silhouettes: a graphical aid to the interpretation and validation of cluster analysis. Journal of computational and applied mathematics, 20:53-65.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Content coverage maximization on word networks for hierarchical topic summarization",
"authors": [
{
"first": "Chi",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Xiao",
"middle": [],
"last": "Yu",
"suffix": ""
},
{
"first": "Yanen",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Chengxiang",
"middle": [],
"last": "Zhai",
"suffix": ""
},
{
"first": "Jiawei",
"middle": [],
"last": "Han",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 22nd ACM international conference on Information & Knowledge Management",
"volume": "",
"issue": "",
"pages": "249--258",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chi Wang, Xiao Yu, Yanen Li, Chengxiang Zhai, and Jiawei Han. 2013. Content coverage maximization on word networks for hierarchical topic summarization. In Proceedings of the 22nd ACM international conference on Information & Knowledge Management, pages 249-258.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Automatic summarization for financial news delivery on mobile devices",
"authors": [
{
"first": "C",
"middle": [],
"last": "Christopher",
"suffix": ""
},
{
"first": "Fu Lee",
"middle": [],
"last": "Yang",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Wang",
"suffix": ""
}
],
"year": 2003,
"venue": "WWW (Posters)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christopher C Yang and Fu Lee Wang. 2003. Automatic summarization for financial news delivery on mobile devices. In WWW (Posters).",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "Hierarchical summarization of large documents",
"authors": [
{
"first": "C",
"middle": [],
"last": "Christopher",
"suffix": ""
},
{
"first": "Fu Lee",
"middle": [],
"last": "Yang",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Wang",
"suffix": ""
}
],
"year": 2008,
"venue": "Journal of the American Society for Information Science and Technology",
"volume": "59",
"issue": "6",
"pages": "887--902",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Christopher C Yang and Fu Lee Wang. 2008. Hierarchical summarization of large documents. Journal of the American Society for Information Science and Technology, 59(6):887-902.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "Extractive-abstractive summarization with pointer and coverage mechanism",
"authors": [
{
"first": "Yong",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Erdan",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Weidong",
"middle": [],
"last": "Xiao",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of 2018 International Conference on Big Data Technologies",
"volume": "",
"issue": "",
"pages": "69--74",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yong Zhang, Erdan Chen, and Weidong Xiao. 2018. Extractive-abstractive summarization with pointer and coverage mechanism. In Proceedings of 2018 International Conference on Big Data Technologies, pages 69- 74.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Hibert: Document level pre-training of hierarchical bidirectional transformers for document summarization",
"authors": [
{
"first": "Xingxing",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Furu",
"middle": [],
"last": "Wei",
"suffix": ""
},
{
"first": "Ming",
"middle": [],
"last": "Zhou",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1905.06566"
]
},
"num": null,
"urls": [],
"raw_text": "Xingxing Zhang, Furu Wei, and Ming Zhou. 2019. Hibert: Document level pre-training of hierarchical bidirec- tional transformers for document summarization. arXiv preprint arXiv:1905.06566.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"uris": null,
"num": null,
"type_str": "figure",
"text": "Text before and after preprocessing."
},
"FIGREF1": {
"uris": null,
"num": null,
"type_str": "figure",
"text": "Discourse tree for sentences from \"remuneration report\" section of document 17941.txt in FNS-2020 dataset."
},
"FIGREF2": {
"uris": null,
"num": null,
"type_str": "figure",
"text": "Topic-to-text assignment."
},
"TABREF1": {
"type_str": "table",
"text": "Clustering results.",
"html": null,
"content": "<table/>",
"num": null
},
"TABREF3": {
"type_str": "table",
"text": "Rouge results.",
"html": null,
"content": "<table/>",
"num": null
}
}
}
}