| { |
| "paper_id": "K19-1038", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T07:05:49.410040Z" |
| }, |
| "title": "Automated Pyramid Summarization Evaluation", |
| "authors": [ |
| { |
| "first": "Yanjun", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pennsylvania State University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pennsylvania State University", |
| "location": {} |
| }, |
| "email": "chensunx@gmail.com" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "J" |
| ], |
| "last": "Passonneau", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Pennsylvania State University", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Pyramid evaluation was developed to assess the content of paragraph length summaries of source texts. A pyramid lists the distinct units of content found in several reference summaries, weights content units by how many reference summaries they occur in, and produces three scores based on the weighted content of new summaries. We present an automated method that is more efficient, more transparent, and more complete than previous automated pyramid methods. It is tested on a new dataset of student summaries, and historical NIST data from extractive summarizers.", |
| "pdf_parse": { |
| "paper_id": "K19-1038", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Pyramid evaluation was developed to assess the content of paragraph length summaries of source texts. A pyramid lists the distinct units of content found in several reference summaries, weights content units by how many reference summaries they occur in, and produces three scores based on the weighted content of new summaries. We present an automated method that is more efficient, more transparent, and more complete than previous automated pyramid methods. It is tested on a new dataset of student summaries, and historical NIST data from extractive summarizers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "During the 70's and 80's, educational pychologists studied human summarization skills, and their development throughout secondary school and beyond. Three separate skills are acquired in the following order: selection of important information, abstraction through vocabulary generalization and sentence fusion, and integration with background knowledge (van Dijk and Kintsch, 1977; Brown and Day, 1983) . A recent comparison of summaries from human experts versus extractive summarizers on forty-six topics from the TAC 2010 summarization challenge used automatic caseframe analysis, and found essentially these same properties in the human summaries, and not in the extractive ones (Cheung and Penn, 2013) . Abstractive summarizers, however, are beginning to replicate the first two of these behaviors, as illustrated in many published examples based on encoder-decoder and pointergenerator neural architectures (Nallapati et al., 2016; See et al., 2017; Hsu et al., 2018; Guo et al., 2018) . Summarization evaluation relies almost exclusively on ROUGE (Lin, 2004) , an automated tool that cannot directly assess importance of summary content, or novel wording for the same infor-Aligned PyrEval (W=5) and Manual (W=4) SCU RSUM1 For example, an art gallery in London held an exhibit. with digital curr. as the preferred . . . RSUM2 However, there has been some positive news as bus. such as a Scottish Hotel & a London Art Gallery are allowing cust. to pay with crypto currencies RSUM3 Cellan-Jones (2018) writes recent days both a London art gallery and a Scottish hotel . . . to allow their cust. to pay with crypto-currencies. RSUM4 by suggesting that {a London art gallery & Scottish hotel chain plan to . . . support for different crypto-currencies.} P araph . . . { that the London based art gallery would use only crypto currencies} P araph RSUM5 Businesses located in London and Scotland have made enquiries to allow payment from customers using cryptoc.", |
| "cite_spans": [ |
| { |
| "start": 353, |
| "end": 381, |
| "text": "(van Dijk and Kintsch, 1977;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 382, |
| "end": 402, |
| "text": "Brown and Day, 1983)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 683, |
| "end": 706, |
| "text": "(Cheung and Penn, 2013)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 913, |
| "end": 937, |
| "text": "(Nallapati et al., 2016;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 938, |
| "end": 955, |
| "text": "See et al., 2017;", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 956, |
| "end": 973, |
| "text": "Hsu et al., 2018;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 974, |
| "end": 991, |
| "text": "Guo et al., 2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1054, |
| "end": 1065, |
| "text": "(Lin, 2004)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Match to a student summary that used synomyms: a craftsmanship exhibition alongside a Scottish inn have plans for their clients to pay in digital currencies to a manual SCU of weight 4 from a dataset of student summaries. The manual and automated SCUs express the same content, and their weights differ only by one. For each of five reference summaries (RSUM1-RSUM5), exact matches of words between the PyrEval and manual contributor are in bold, text in plain font (RSUM2, RSUM4) appears only in the manual version, and text in italics appears only in the PyrEval version. Paraphrases of the same content from RSUM4 were identified by human annotators (plain font) and PyrEval (italics). Also shown is a matching segment from a student summary, where the student used synonyms of some of the words in the reference summaries.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "mation. We present an automated method to assess the importance of summary content, independent of wording, based on a widely used manual evaluation called pyramid (Nenkova et al., 2007) . The pyramid method and ROUGE both use multiple summaries written by humans as references to assess new summaries. The manual pyramid method requires human annotators to identify Summary Content Units (SCUs) by grouping phrases from different reference summaries into the same SCU if they express the same propositional content. Figure 1 illustrates an SCU from a manual pyramid applied to college student summaries of articles on cryptocurrency, with contributions from four of the five reference summaries (RSUM1-RSUM4). It is aligned to a nearly identical SCU constructed by PyrEval, with a contribution from the fifth reference (RSUM5). Previous work has shown that these kinds of discrepancies occur between human annotators, and have little effect on interannotator agreement or rankings of summarizers (Passonneau, 2010) . The importance of an SCU increases with the number of reference summaries that express it, as indicated by its weight. If an evaluation summary expresses the same content as an SCU, its score is increased by the SCU weight (details below). ROUGE allows the user to select among numerous ways to measure ngram overlap of a new summary to the references, e.g., for different ngram sizes with or without skips, and with or without stemming. ROUGE does not, however, consider the relative importance of content, or account for synonyms of words that appear in the reference summaries.", |
| "cite_spans": [ |
| { |
| "start": 164, |
| "end": 186, |
| "text": "(Nenkova et al., 2007)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 997, |
| "end": 1015, |
| "text": "(Passonneau, 2010)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 517, |
| "end": 525, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We present PyrEval, 1 which outperforms previous work on automated pyramid in accuracy and efficiency. It produces human-readable pyramids, and prints matches between SCUs and evaluation summaries, which can support feedback for educational applications. PyrEval performs well on a new dataset of student summaries, where we applied the pyramid annotation. We also present results for TAC 2010 automated summaries, one of the more recent years where NIST applied pyramid evaluation. While ROUGE-2 more accurately identifies system differences than PyrEval, its performance is more sensitive to different topics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The challenge in evaluation of summary content is that different equally good human summaries have only partial content overlap. van Halteren and Teufel (2003) annotated factoids (similar to FOL propositions, and to SCUs) for fifty summaries of a Dutch news article, and found a Zipfian distribution of factoid frequency: a small number of factoids represent 80% of the content in summaries, but a very long tail of rare content accounts for 20%. Pyramid annotation of ten summaries for a 1 Available at https://github.com/serenayj/ PyrEval few DUC 2003 topics had a similar a Zipfian distribution (Nenkova and Passonneau, 2004) .", |
| "cite_spans": [ |
| { |
| "start": 133, |
| "end": 159, |
| "text": "Halteren and Teufel (2003)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 598, |
| "end": 628, |
| "text": "(Nenkova and Passonneau, 2004)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pyramid Content Analysis", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Pyramid has had extensive reliability testing. A sensitivity analysis showed four reference summaries were sufficient for score reliability, and with probability of misranking errors below 0.01% (Nenkova and Passonneau, 2004; Nenkova et al., 2007) . Interannotator agreement using Krippendorff's alpha on ten pyramids ranged from 0.61 to 0.89, and averaged 0.78 on matching new summaries to pyramids for 16 systems on 3 topics each (Passonneau, 2010) . Comparison of two manual pyramid evaluations from distinct annotators showed that different pyramids for the same topic yield the same system rankings, even though SCUs from different pyramids typically do not align exactly (Passonneau, 2010) .", |
| "cite_spans": [ |
| { |
| "start": 195, |
| "end": 225, |
| "text": "(Nenkova and Passonneau, 2004;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 226, |
| "end": 247, |
| "text": "Nenkova et al., 2007)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 432, |
| "end": 450, |
| "text": "(Passonneau, 2010)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 677, |
| "end": 695, |
| "text": "(Passonneau, 2010)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pyramid Content Analysis", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The default size of a phrase that contributes to an SCU is a simple clause, but if it is clear from the context that a summary essentially expresses the same content expressed in other reference summaries, it is said to contribute to the same SCU, and the annotator must select at least a few contributing words. SCU weights reflect how many of N reference summaries express the SCU content. As such, SCUs are constrained to have no more than one contributor phrase from each reference summary. If a summary repeats the same information, the repetition will increment the count of total SCUs within one summary, but cannot be a distinct contributor. For example, the paraphrases from RSUM4 shown in Figure 1 add two to the total SCU size of the summary, but can only be used once to increment an SCU weight. Simple clauses in an evaluation summary that do not match pyramid SCUs add to the summary's SCU count, but have zero weight.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 699, |
| "end": 707, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Pyramid Content Analysis", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Summarization is an important component of strategy instruction in reading and writing skills (Graham and Perin, 2007) , but is used less than it could be due to the demands of manual grading and feedback. However, integration of NLP with rubric-based assessment has received increasing attention. and Gerard and Linn (2016) applied automated assessment using rubrics to successfully identify students who need the most help, and facilitate and meaningful classroom interactions. Agejev and\u0160najder (2017) used ROUGE and BLEU to identify col-lege students' L2 skills. Santamar\u00eda Lancho et al. (2018) used G-Rubric, an LSA-based tool, to help instructors grade short text answers to open-ended questions. Passonneau et al. (2018) found a high correlation of an automated pyramid with a manual rubric on a small set of summaries; see last paragraph of this section. ROUGE is the most prevalent method to assess automated summarization.", |
| "cite_spans": [ |
| { |
| "start": 94, |
| "end": 118, |
| "text": "(Graham and Perin, 2007)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 703, |
| "end": 727, |
| "text": "Passonneau et al. (2018)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In 39 long papers on summarization in ACL conferences from 2013 through 2018 (mostly abstractive), 87% used ROUGE-1, ROUGE-2 or other variants such as LCS (longest common subsequence). A few used POURPRE (question answering) (Lin and Demner-Fushman, 2006) , or METEOR (MT) (Denkowski and Lavie, 2014) to investigate scores based on weighted content or synonomy. POURPRE relies on string matching against reference units called answer facts, weighting matches by inverse document frequency. METEOR aligns words between reference and candidate, and can use relaxed word matching, such as WordNet synonymy. Despite its dominant use in previous work, Graham (2015) noted that the large range of ROUGE variants causes inconvenience and instability in evaluating performance. Graham's results from testing the 192 variants on DUC2004 data suggest that the ROUGE variants that correlate best with human evaluations are not often used.", |
| "cite_spans": [ |
| { |
| "start": 225, |
| "end": 255, |
| "text": "(Lin and Demner-Fushman, 2006)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 273, |
| "end": 300, |
| "text": "(Denkowski and Lavie, 2014)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 647, |
| "end": 660, |
| "text": "Graham (2015)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "3" |
| }, |
| { |
| "text": "PyrEval differs from other automated pyramid tools in its focus on accurately isolating and weighting the distinct SCUs in the reference summaries. Three previous semi-automated pyramid tools used dynamic programming to score summaries, given a manual pyramid (Harnly et al., 2005; Passonneau et al., 2013 Passonneau et al., , 2018 . The first of these used unigram overlap to compare summaries to a pyramid. Absolute scores were much lower than ground truth, but average system rankings across multiple tasks were accurate. A subsequent extension that used cosine similarity of latent vector representations of ngrams and SCUs, based on (Guo and Diab, 2012) , had much better performance (Passonneau et al., 2013) . This was extended further through use of a weighted set cover algorithm for scoring (Passonneau et al., 2018) . PEAK was the first fully automated approach to construct a pyramid and score summaries (Yang et al., 2016) . It uses OpenIE to extract subjectpredicate-object triples from references, then con-structs a hypergraph with triples as hyperedges. Semantic similarity between nodes from distinct hyperedges is measured using ADW's random walks over WordNet (Pilehvar et al., 2013) , to assign weights to triples. On a small set of summaries used here in Table 1 , PEAK raw scores had a high correlation with a manual summary rubric. PEAK was also tested on a single DUC 2006 topic, where the input text was first manually altered. Because PEAK is slow, Peyrard and Eckle-Kohler (2017) reimplemented it's use of the Hungarian algorithm to optimize their summarizer. Because PEAK produces many noisy copies of the same SCU, its output cannot be used to justify scores based on the unique matches or misses of a student summary to SCUs. Its score normalizations are inaccurate, and the un-normalized scores are impractical for general-purpose evaluation.", |
| "cite_spans": [ |
| { |
| "start": 260, |
| "end": 281, |
| "text": "(Harnly et al., 2005;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 282, |
| "end": 305, |
| "text": "Passonneau et al., 2013", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 306, |
| "end": 331, |
| "text": "Passonneau et al., , 2018", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 638, |
| "end": 658, |
| "text": "(Guo and Diab, 2012)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 689, |
| "end": 714, |
| "text": "(Passonneau et al., 2013)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 801, |
| "end": 826, |
| "text": "(Passonneau et al., 2018)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 916, |
| "end": 935, |
| "text": "(Yang et al., 2016)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 1172, |
| "end": 1203, |
| "text": "WordNet (Pilehvar et al., 2013)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1277, |
| "end": 1284, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "3" |
| }, |
| { |
| "text": "To construct a pyramid, humans identify contributor segments 2 and group them into SCUs. Evaluating a summary is a simpler process of matching phrases to existing SCUs. PyrEval performs analogous steps, as shown in Figure 2 . It first decomposes sentences of reference summaries (RSUM) into segments (DECOMP PARSE) and converts them into semantic vectors (LATENT SEM). It then applies EDUA to group the segment vectors into SCUs. EDUA (see below) is a novel restricted set partition algorithm that maximizes the semantic similarity within SCUs, subject to SCU constraints. Evaluation summaries (ESUM) are preprocessed in a similar fashion to convert them to segments represented as vectors. As in (Passonneau et al., 2018) , PyrEval applies WMIN (Sakai et al., 2003) to find the optimal set of matches with pyramid SCUs. The remainder of this section describes each step.", |
| "cite_spans": [ |
| { |
| "start": 697, |
| "end": 722, |
| "text": "(Passonneau et al., 2018)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 746, |
| "end": 766, |
| "text": "(Sakai et al., 2003)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 215, |
| "end": 223, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "PyrEval System", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The decomposition parser takes as input a phrase structure parse and dependency parse for each sentence, using Stanford CoreNLP . Every tensed verb phrase (VP) from the phrase structure parse initializes a new segment. The head verbs of tensed VPs are aligned to the dependency parse, and their dependent subjects are then attached to the segments. Words other than Figure 2 : PyrEval preprocessors segment sentences from reference (RSUM) and evaluation (ESUM) summaries into clause-like units, then convert them to latent vectors. EDUA constructs a pyramid from RSUM vectors (lower left): the horizontal bands of the pyramid represent SCUs of decreasing weight (shaded squares). WMIN matches SCUs to ESUM segments to produce a raw score, and three normalized scores. those in the VP and subject are reinserted in their original order. Every sentence has at least one default segmentation corresponding to the full sentence, possibly with one or more alternative segmentations of at least two segments each. It performs well for most cases apart from sentences with coordinate structures, which are notoriously difficult for conventional parsers. Figure 3 illustrates a sentence segmentation, with three alternatives. 3", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 366, |
| "end": 374, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1147, |
| "end": 1155, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentence Decomposition", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The second PyrEval preprocessing step converts segments to semantic vectors. We chose to avoid semantic representation that requires training, to make PyrEval a lightweight, standalone tool. Although recent contextualized representations perform very well on a variety of NLP tasks, they are typically intended as the basis for a transfer learning approach, or to initialize further task-specific 3 Segmentation 1.6.2 is the one EDUA-G selects for the pyramid.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Vectors for Segments", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Christopher Shake, the director of the London art gallery, suggests that this is not the case and that many different companies of different natures not just technology related are getting involved with cryptocurrencies.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Vectors for Segments", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "1.6.1.0 that this is not the case 1.6.1.1 Christopher Shake, the director of the London art gallery, suggests and. 1.6.1.2 that many different companies of different natures not just technology related are getting involved with cryptocurrencies 1.6.2.0 Christopher Shake, the director of the London art gallery, suggests that this is not the case and. 1.6.2.1 that many different companies of different natures not just technology related are getting involved with cryptocurrencies 1.6.3.0 Christopher Shake, the director of the London art gallery, suggests and that many different companies of different natures not just technology related are getting involved with cryptocurrencies. 1.6.3.1 that this is not the case Figure 3 : Segmentation output for a sentence from a reference summary for the \"CryptoCurrencies\" topic of our student summaries. neural training (e.g., (Pagliardini et al., 2018; Peters et al., 2018; Devlin et al., 2018; Vaswani et al., 2017) ). The most practical way to rely on completely pre-trained representations is to use word embeddings along with a method to combine them into phrase embeddings. Here we report on a comparison of ELMo (Peters et al., 2018) and the Universal Sentence Encoder for English (USE) (Cer et al., 2018) with two conventional word embedding methods, GloVe (Pennington et al., 2014) and WTMF (Guo and Diab, 2012) . 4 ELMo is character-based rather than wordbased, relies on a many-layered bidirectional LSTM, and incorporates word sequence (language model) information. It was trained on billions of tokens of Wikipedia and news text. To create meaning vectors for strings of words, we use pretrained ELMo vectors, taking the weighted sum of 3 output layers as the word embeddings, then applying mean pooling. 5 USE is intended for transfer learning tasks, based on Transformer (Vaswani et al., 2017) or the (Iyyer et al., 2015) deep averaging network (DAN). We create meaning vectors for word strings with the USE-DAN pretrained encoder. 6 We use the GloVe download for 100D vec- Figure 4 : Part of an EDUA solution graph. Each vertex is a segment vector from a reference summary, indexed by Summary.ID (si), Sentence.ID (sij), Segmentation.ID (s ijk ), Segment.ID (s ijkm ). All segments of all reference summaries have a corresponding node. All edges connect segments from different summaries with similarity \u2265 t edge . This schematic representation of a partial solution contains three fully connected subgraphs with attraction edges (solid lines), each representing an SCU, whose weight is the number of vertices (segments).", |
| "cite_spans": [ |
| { |
| "start": 872, |
| "end": 898, |
| "text": "(Pagliardini et al., 2018;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 899, |
| "end": 919, |
| "text": "Peters et al., 2018;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 920, |
| "end": 940, |
| "text": "Devlin et al., 2018;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 941, |
| "end": 962, |
| "text": "Vaswani et al., 2017)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 1164, |
| "end": 1185, |
| "text": "(Peters et al., 2018)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 1239, |
| "end": 1257, |
| "text": "(Cer et al., 2018)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1310, |
| "end": 1335, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 1345, |
| "end": 1365, |
| "text": "(Guo and Diab, 2012)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 1368, |
| "end": 1369, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 1831, |
| "end": 1853, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 1861, |
| "end": 1881, |
| "text": "(Iyyer et al., 2015)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1992, |
| "end": 1993, |
| "text": "6", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 719, |
| "end": 727, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 2034, |
| "end": 2042, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Semantic Vectors for Segments", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "tors trained on the 840B Common Crawl. 7 To combine the GloVe word vectors into a phrase vector, we use the weighted averaging method from (Arora et al., 2016) . WTMF is a matrix factorization method. We use WTMF matrices trained on the Guo and Diab (2012) corpus (393K sentences, 81K vocabulary size) that consists of WordNet, Wiktionary, and the Brown corpus.", |
| "cite_spans": [ |
| { |
| "start": 39, |
| "end": 40, |
| "text": "7", |
| "ref_id": null |
| }, |
| { |
| "start": 139, |
| "end": 159, |
| "text": "(Arora et al., 2016)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Vectors for Segments", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We compare the four embedding methods on three datasets. Because our goal is to select a method that performs well on pyramid annotation, the first two datasets are human and machine summaries with manual pyramid annotations, with correlation of the manual pyramid and PyrEval scores as the metric. WIM (for What is Matter) is a dataset of student summaries with pyramid annotation from (Passonneau et al., 2018) with 20 student summaries on one topic. Note that PyrEval achieved a correlation of 0.85 on this data, compared with 0.82 for PEAK (Passonneau et al., 2018) . We also use a subset of data from the NIST TAC 2009 summarizer challenge. We use summaries from all 54 peer systems on 14 of the 44 topics. We also use the STS-14 benchmark dataset of semantic similarity judgements (3750 sentence pairs), as in (Guo and Diab, 2012) . Table 1 shows WTMF to perform best on the 1. Given a set of n reference summaries R, a preprocessing function (described in subsections 4.1-4.2) SEG returns segments as vectors: \u2200Ri \u2208 R, SEGS(Ri) = {seg ijk1 , seg ijk2 , . . . , seg ijkm } where seg ijkm is the mth segment of the kth segmentation of the jth sentence in the ith summary.", |
| "cite_spans": [ |
| { |
| "start": 387, |
| "end": 412, |
| "text": "(Passonneau et al., 2018)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 544, |
| "end": 569, |
| "text": "(Passonneau et al., 2018)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 816, |
| "end": 836, |
| "text": "(Guo and Diab, 2012)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 839, |
| "end": 846, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Semantic Vectors for Segments", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "2. A graph G is constructed from SEGS(Ri), where an edge connects segments seg ijkm , seg i j k m if (i = i , seg ijkm = seg i j k m , cosine(seg ijkm , seg i j k m ) \u2265 t edge ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Vectors for Segments", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Every fully connected subgraph (clique) is a candidate scu whose size is the number of nodes, which has a maximum of n.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Vectors for Segments", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "3. The attraction score of an scu z , AS(scu", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Vectors for Segments", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "z ) = 1 ( |scu z | 2 ) seg ijkm ,seg i j k m \u2208scu z ,seg ijkm =seg i j k m cosine(seg ijkm , seg i j k m ) if z > 1, else = 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Semantic Vectors for Segments", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "SCU x that is a covering of all sentences in R (meaning only one segmentation per sentence belongs to", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A candidate pyramid P is a set of equivalence classes", |
| "sec_num": "4." |
| }, |
| { |
| "text": "any P ), \u2200 x \u2208 [1, n] : (\u2203 SCU x \u2208 P ) \u2192 (x \u2208 [1, n], \u2200scu z \u2208 SCU x , x = z). An SCU x has an attraction class score AC(SCU x ) = 1 |SCU x | scu z \u2208SCU x AS(SCU z ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A candidate pyramid P is a set of equivalence classes", |
| "sec_num": "4." |
| }, |
| { |
| "text": "5. Finally, a pyramid P has an attraction score AP(P ) = SCU x \u2208P AS(SCU x ). 6. The optimal pyramid(R) = P that maximizes AP. three tasks by a large margin. We speculate this results from two factors. The lower dimensionality of WTMF vectors compared to ELMo or USE-DAN leads to higher maximum cosine values, thus better contrast between similar and dissimilar pairs. WTMF differs from similar matrix reduction methods in assigning a small weight to non-context words, which improves robustness for short phrases (fewer context words) Guo and Diab (2012) . The authors also claimed that a training corpus largely consisting of definitional statements leads to co-occurrence data that is less noisy than sentences found in the wild.", |
| "cite_spans": [ |
| { |
| "start": 536, |
| "end": 555, |
| "text": "Guo and Diab (2012)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A candidate pyramid P is a set of equivalence classes", |
| "sec_num": "4." |
| }, |
| { |
| "text": "EDUA (Emergent Discovery of Units of Attraction) is a restricted set partition algorithm. It constructs an optimal pyramid to achieve the highest attraction (semantic similarity of segments) in all SCUs. Figure 4 schematically represents the input graph to EDUA (see also item 1 in Fig-ure 5) , whose nodes consist of the segment vectors described in the preceding section, and whose edges connect segments from different summaries whose cosine similarity \u2265 t edge . 8 A candidate SCU is a fully connected subgraph (clique; item 2 in Figure 5 ). Every candidate SCU has an attraction score AS equal to the average of the edge scores (item 3 in Figure 5) . A candidate pyramid is a set of SCUs that constitute a covering of all the sentences in the input reference summaries, with all segments for a given sentence coming from only one of its segmentations. The SCU weights for a pyramid, which are in [1, n] for n reference summaries, form a partition over its segments, and each equivalence class (all SCUs of the same weight) has a score AC that is the average of its SCU scores (item 4 in Figure 5 ). The score for a candidate pyramid AP is the sum of its AC scores (item 5 in Figure 5 ). We use the sum rather than the average for AP to favor the equivalence classes for higher weight SCUs. The optimal pyramid maximizes AP (item 6 in Figure 5 ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 204, |
| "end": 212, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 282, |
| "end": 292, |
| "text": "Fig-ure 5)", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 534, |
| "end": 542, |
| "text": "Figure 5", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 644, |
| "end": 653, |
| "text": "Figure 5)", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1092, |
| "end": 1100, |
| "text": "Figure 5", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1180, |
| "end": 1188, |
| "text": "Figure 5", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 1339, |
| "end": 1347, |
| "text": "Figure 5", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "EDUA", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EDUA has two implementations. EDUA-C implements a complete solution based on depth first search (DFS) of candidate SCUs that guarantees the global optimum (max AP). EDUA-G is a greedy approximation. 9", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "EDUA", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EDUA-C constructs an adjacency list that for each clique (candidate SCU) in the input graph, identifies all the other SCUs that satisfy two constraints: 1) for any given sentence, all SCUs reference the same segmentation; 2) all segments in all SCUs are distinct. DFS search proceeds through the adjacency list, ordering the SCUs by weight, until a path is found through all SCUs that meets the constraints. The solution has the highest AP, or in the case of ties, the path found first. Figure 6 illustrates a toy EDUA-C DFS tree. Each node depicts a candidate SCU clique, labelled by the number of nodes in the clique (SCU weight) . No child node has a higher weight than its parent nodes. A child node is added to a search path (solid nodes) if it violates no constraints. Each of the six paths in the figure would receive an AP score. After DFS finds all legal paths, the one with highest AP is selected as the solution. Figure 4 , labeled by their weights. Each DFS path is a partition over one way to segment all the input summaries and group all segments into SCUs. The solution is the path with the highest AP. Table 2 compares the distribution of SCUs by weight of the two EDUA variants with manual pyramids on the student summary dataset discussed in the next section. EDUA-C produces a more skewed distribution than EDUA-G. Both variants suffer from the coarse-grained segmentation output from the decomposition parser, but EDUA-G compensates by enforcing the Zipfian distribution observed in most pyramids (see appendix A for details).", |
| "cite_spans": [ |
| { |
| "start": 619, |
| "end": 631, |
| "text": "(SCU weight)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 487, |
| "end": 495, |
| "text": "Figure 6", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 924, |
| "end": 932, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 1118, |
| "end": 1125, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "EDUA-C", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "To evaluate speed, we tested both variants on datasets with different numbers and lengths of reference summaries. TAC 2010 reference summaries (4 per topic) have on average 46 segments each, and 321 candidate SCUs. Pyramid construction for TAC 2010 takes less than 10 seconds with either variant on an Ubuntu machine with 4 Intel i5-6600 CPUs. EDUA-G's greater efficiency is more apparent for larger input. DUC 2005 has seven reference summaries per topic, and longer summaries than in TAC 2010; on five, EDUA-C takes 211 seconds, while EDUA-G is still only about ten seconds; on six, EDUA-C takes 20 minutes, compared to 5 minutes for EDUA-G. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison of EDUA variants", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "For automatic matching of phrases in evaluation summaries to SCUs in a manual pyramid, Passonneau et al. (2018) found good performance with WMIN (Sakai et al., 2003) , a greedy maximum weighted independent set algorithm. Because EDUA pyramids are analogous to manual pyramids, PyrEval also uses WMIN. The input to WMIN is a graph where each node is a tuple of a segmentation of an ESUM sentence with the sets of SCUs that give the highest average cosine similarity for that sentence. The node weight is the sum of SCU weights. Graph edges enforce constraints that only one segmentation for a sentence can be selected, and each pyramid SCU can be matched to an ESUM sentence at most once. WMIN selects the nodes that result in the maximum sum of SCU weights for the ESUM. Score computation is a function of the matched SCUs, as illustrated by the ESUM in the lower right of Figure 2 . This ESUM has five SCUS: two of weight 5, one of weight 4, one of weight 2, and one that does not match the pyramid (zero weight). The sum of SCU weights is 16. The original pyramid score, a precision analog, normalizes the raw sum by the maximum sum for the same SCU count given by the pyramid -(3 \u00d7 5) + (2 \u00d7 4) -indicating the degree to which the summary SCUs are as high weighted as possible. Following (Passonneau et al., 2018) , we use the term quality score. The average number of SCUs in the reference summaries is 15, whose maximum weight from this pyramid is 53. Normalizing the raw sum by 53 gives a coverage score of 0.30 (a recall analog). The harmonic mean of these scores gives an F score analog referred to as a comprehensive score.", |
| "cite_spans": [ |
| { |
| "start": 87, |
| "end": 111, |
| "text": "Passonneau et al. (2018)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 145, |
| "end": 165, |
| "text": "(Sakai et al., 2003)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 1291, |
| "end": 1316, |
| "text": "(Passonneau et al., 2018)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 873, |
| "end": 881, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "WMIN Scoring", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "As part of a collaboration with a researcher in educational technology, we collected a new data set of student summaries that were assigned in fall 2018 to computer science freshman in a university in the United Kingdom (Gao et al., 2019) . Our immediate goal is to see how PyrEval could support instructors who assign summaries by providing an automated assessment that could be later corrected, but which provides scores and score justifications. PyrEval scores correlate well with manual pyramid scores on content, and the log output it produces provides a clear trace of score computation (see below). The class was an academic skills class that included instruction in academic reading and writing. For one assignment, they were instructed to select one of two current technology topics (three readings per topic), then to summarize it in 150 to 250 to words. The two topics are shown below, with the number of student summaries per reading, and average number of words. To write reference summaries for both topics, the instructor recruited advanced students who had done well in her academic skills class in previous years. Three trained annotators applied manual pyramid annotation to the student summaries. As noted in section 2, pyramid annotation is highly reliable. Annotations of the student summaries were performed in two passes by different annotators. Table 3 reports the correlation between the manual pyramid scores and the PyrEval scores on the two sets of student summaries. For both AV and CC, EDUA-G performs better than EDUA-C and ROUGE-2, the best ROUGE variant on TAC10 (see below), and ROUGE-2 performs better than EDUA-C. We attribute the lower correlations on the quality score, and the lower performance on this dataset compared to WIM (see Table 1 ), to the greater challenges of the new dataset. WIM students read a single, middle school text, and average summmary length was 109.02 words. For the new dataset, students read three advanced texts, and produced summaries that were over twice the length (see above). Error analysis shows complex sentence structure for the AV and CC data, with many constructions such as conjunctions and lists, that the decomposition parser cannot handle. As noted above, EDUA-G compensates due to a Zipfian constraint on the pyramid shape. Figure 1 compares a PyrEval SCU with a manual one for the cryptocurrency topic, and Single PyrEval SCU (W=3) about the relation of \"car accidents\" to \"insurance cost\" RSUM1 Also, as most collisions are due to human error, costs of insurance for self driving cars could fall by up to <NUM>. RSUM2 The cars themselves would also reduce insurance premiums; <NUM> percent of road accidents are caused by human error RSUM3 Shankleman does well to balance out the positives such as lower insurance , reduced traffic , savings on mechanical costs and lower chance of road accidents .", |
| "cite_spans": [ |
| { |
| "start": 220, |
| "end": 238, |
| "text": "(Gao et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1369, |
| "end": 1376, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 1771, |
| "end": 1778, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 2305, |
| "end": 2313, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Student Summaries", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Single manual SCU (W=4) on \"high car accidents\" Single manual SCU (W=4) on \"lower insurance\" RSUM1 Also, as most collisions are due to human costs of insurance for self-driving cars could fall by error up to 50% RSUM2 90 percent of road accidents are caused by human The cars themselves would also reduce insurance error premiums RSUM3 . . . lower chance of road accidents . . . . . . lower insurance . . . RSUM4 . . . he claims that over 90 percent of road traffic this would result in lower insurance premium for accidents occur as a result of human error owners of autonomous vehicles by up to 50 percent.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Student Summaries", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Student IDs Segments correctly matching this PyrEval SCU to students' summaries (from PyrEval log output) A The insurance industry is also going to experience great changes as the director insurer of AXA SA explains that more than <NUM> percent of road accidents are caused by human error. B", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Student Summaries", |
| "sec_num": "5" |
| }, |
| { |
| "text": "as <NUM> of the accidents are caused by human errors, also reducing the number of human drivers will contribute to cheap insurance premiums and efficient transport C Shankleman explains how problems with modern day transport such as high crash statistics and extortionate insurance costs will be eradicated with such computing capabilities. also illustrates issues that might explain the relatively poorer performance of ROUGE. We show a phrase that both the manual annotator and PyrEval matched to the SCU from one of the student summaries, where the student used near synonyms for terms in the articles and reference summaries: craftmanship exhibition for art gallery, and inn for hotel. ROUGE cannot match synonyms, and does not distinguish differences in content importance. Figure 7 shows an excerpt from PyrEval's log output on autonomous vehicle to illustrate the alignment of an SCU to three student summaries and comparison to two manual SCUs. 10 The PyrEval SCU captures a causal relation between \"car accidents due to human error\" and \"lower insurance costs.\" The two manual SCUs, however, show that the human annotators split this content into two SCUs, because the content is expressed in distinct clauses in RSUM1 and RSUM2. The same content is supported by the implicit contexts for the shorter RSUM3 contributing phrases. The RSUM4 contributor in the manual SCU about \"lower insurance\" illustrates another issue that PyrEval preprocessing cannot handle: resolution of the deictic pronoun subject in \"this would result . . . \".", |
| "cite_spans": [ |
| { |
| "start": 953, |
| "end": 955, |
| "text": "10", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 779, |
| "end": 787, |
| "text": "Figure 7", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Student Summaries", |
| "sec_num": "5" |
| }, |
| { |
| "text": "10 Preprocessing replaces numeric character strings with tags.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Student Summaries", |
| "sec_num": "5" |
| }, |
| { |
| "text": "NIST summarization challenges dealt exclusively with news, which is also the most prevalent genre for automated summarizers in our survey of 2013-2018 ACL publications (23/39 summarizers; see above). To evaluate ROUGE, NIST used two human gold standards in yearly challenges from 2005 through 2011, one of which was manual pyramid. Annotation was performed by volunteers among the challenge participants, using guidelines developed for DUC 2006. 11 In this section, we apply a method NIST helped develop to evaluate ROUGE against manual pyramid in an evaluation of PyrEval against manual pyramid. We selected TAC 2010 because summarizer performance was less good in the earlier years.", |
| "cite_spans": [ |
| { |
| "start": 436, |
| "end": 448, |
| "text": "DUC 2006. 11", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TAC 2010 Summaries", |
| "sec_num": "6" |
| }, |
| { |
| "text": "TAC 2010 had two 100-word summarization tasks on 10 documents for 46 topics. Task A summarization was guided by a query. Task B was an update to A, based on additional input. On inspection of the 92 pyramids (46 each for Tasks A and B), we found that roughly 27% had poor quality pyramids that did not follow the guidelines mentioned above. We assembled a team of five people familiar with manual pyramid to manually redo the twelve pyramids that were independently identified as the lowest quality. 12 Tests of the correlation of human scores as-11 http://www1.cs.columbia.edu/\u02dcbecky/ DUC2006/2006-pyramid-guidelines.html; we followed these guidelines for annotating the student signed to automated summaries with ROUGE (and other automated metrics) were found to be unreliable, because of high score variance resulting as much from properties of the input texts as from differences in summarization systems (Nenkova, 2005; Nenkova and Louis, 2008) . Analyses of over a decade of NIST data from automated summarizers that evaluate ROUGE against manual pyramid and another manual score led to a solution to this problem (Rankel et al., 2013; Owczarzak et al., 2012a,b; Rankel et al., 2011) . The solution is to use Wilcoxon signed rank tests, so that pairs of systems are compared on matched input in a way that tests for statistical significance. The outcome is either that one of the systems is significantly better than the other, or that the difference between them is not statistically significant. To determine if the automated metric accurately reflects the gold standard scores, the same Wilcoxon tests are performed using the manually assigned scores on all pairs of systems, matching each pair on the same inputs. A given automated metric is then compared to the human gold standard to determine how accurately the automated metric leads to the same set of significant differences between all pairs of systems. Table 4 presents bootstrapped accuracy results for ROUGE and PyrEval using 41 topics per bootstrap sample, along with absolute accuracy on all 46 topics. Each selection of 41 topics gives a gold standard set of system differences against which to compare a given metric. ROUGE 2 has the highest average accuracy on both Task A and B. ROUGE 1 performs nearly as well on Task A. PyrEval performs less well on average accuracy for all tasks, but similarly to ROUGE 1 in Task B. ROUGE-2 has greater sensitivity to topics, as shown by the higher deltas between the bootstrapped accuracy on 41 topics versus the accuracy on all 46. The differences in Table 4 between the bootstrapped summaries. 12 We plan to ask NIST if we can make this data available through them.", |
| "cite_spans": [ |
| { |
| "start": 500, |
| "end": 502, |
| "text": "12", |
| "ref_id": null |
| }, |
| { |
| "start": 909, |
| "end": 924, |
| "text": "(Nenkova, 2005;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 925, |
| "end": 949, |
| "text": "Nenkova and Louis, 2008)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 1120, |
| "end": 1141, |
| "text": "(Rankel et al., 2013;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 1142, |
| "end": 1168, |
| "text": "Owczarzak et al., 2012a,b;", |
| "ref_id": null |
| }, |
| { |
| "start": 1169, |
| "end": 1189, |
| "text": "Rankel et al., 2011)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1921, |
| "end": 1928, |
| "text": "Table 4", |
| "ref_id": "TABREF7" |
| }, |
| { |
| "start": 2566, |
| "end": 2611, |
| "text": "Table 4 between the bootstrapped summaries.", |
| "ref_id": "TABREF7" |
| } |
| ], |
| "eq_spans": [], |
| "section": "TAC 2010 Summaries", |
| "sec_num": "6" |
| }, |
| { |
| "text": "averages across 41 topics, and the accuracy scores on all 46 topics, confirms the sensitivity of evaluation results to topics noted in (Nenkova, 2005; Nenkova and Louis, 2008) .", |
| "cite_spans": [ |
| { |
| "start": 135, |
| "end": 150, |
| "text": "(Nenkova, 2005;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 151, |
| "end": 175, |
| "text": "Nenkova and Louis, 2008)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "TAC 2010 Summaries", |
| "sec_num": "6" |
| }, |
| { |
| "text": "PyrEval outperforms previous automated pyramid methods in accuracy, efficiency, score normalization, and interpretability. It correlates with manual pyramid better than ROUGE on a new dataset of student summaries, and produces output that helps justify the scores (similar to the examples for Figures 1 and 7) . While it does not perform as well as ROUGE on extractive summarization, we speculate it would outperform ROUGE on abstractive summarizers. It relies on EDUA, a novel restricted set partition algorithm, that expects semantic vectors of sentence segments as input. The current rule-based method that identifies sentence substrings (the decomposition parser) is limited by the output of the constituency and dependency parsers it relies on. We are currently working on a neural architecture that simultaneously identifies simple clauses and produces semantic representations that could provide better input for both EDUA and WMIN, and thus improve PyrEval.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 293, |
| "end": 309, |
| "text": "Figures 1 and 7)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "This work was supported in part by a Fellowship from Teaching and Learning with Technology, Penn State University, and by NSF award IIS-1847842. We thank two Penn State undergraduate research assistants for their contributions to the code base: Andrew Warner, and Purushartha Singh. Brent Hoffert, who recently graduated from Penn State, developed the wrapper that simplifies the use of PyrEval. Several additional Penn State undergrads helped correct the TAC 10 pyramids: Brent Hoffert, Alex Driban, Sahil Mishra, Xuannan Su, and Kun Wang. Finally, we thank the reviewers for their helpful suggestions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "A EDUA-G EDUA-G (Greedy) is a greedy approximation to EDUA-C with a backtracking algorithm adjusting the allocation of candidate SCUs, and enforcing the constraints. In this appendix, we use the same notation as in section 4.3. Instead of finding the solution globally with maximum AP across all possible pyramids, EDUA-G works on achieving the maximum AS for each set of SCUs of a given size locally, starting with the set C n with highest weight (number of nodes per subgraph c), then the rest in descending order. In addition to the constraints 1 and 2 in EDUA-C (mentioned in section 4.4), EDUA-G has a capacity constraint for each set C r during search, limiting the number of SCUs committed to the class. This constraint is determined by the length of all the reference summaries and exploits an empirical observation of pyramids: that SCUs have a Zipfian distribution of frequency across reference summaries: a few have the highest weight, and for each lower weight there are more in number, with a very long tail of SCUs of weight 1.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "To enforce the capacity constraint during search, we define the maximum number of SCUs y n of each equivalence class C n as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "y n = \u03b1 1 n \u03b2 (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "where n is the index of the equivalence class, \u03b1 is a constant related to the total number of segments from all reference summaries, and \u03b2 is a scaling parameter (Clauset et al., 2009) . Thus in addition to t edge , EDUA-G has the hyperparameters \u03b1 and \u03b2. The capacities of the equivalence classes are monotone increasing as n decreases:", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 184, |
| "text": "(Clauset et al., 2009)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "|C n | \u2264 |C n\u22121 |", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Summing over |C n | gives the size of the pyramid:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "N i |C n | \u2264 N i y i", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Algorithm 1 presents EDUA-G.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Initialization Similar to EDUA-C, a segment pool SP = SEGS(R 1 ) \u222a . . . \u222a SEGS(R n ) is first constructed from all the reference summaries to store segments and two status flags. The pool is accessible globally. For every segment seg ijkm , two status flags are set:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Algorithm 1: EDUA-G Data: Number of reference summaries n; a list CU of candidate SCUs ordered by weight r where 1 \u2264 r \u2264 n, then by attraction score AS(CU r ); capacity of each equivalence class y1 . . . yn by formula 1; a segment pool SP , residuals L1 Result: Pyramid P with equivalence classes C1 . . . Cn 1 Initialize r = n, 2 Cr = \u2205, P = \u2205, Dr as empty stack, 3 while (r > 1) \u2227 (|Cr| \u2264 yr) do 4 push all candidate CU r selected from CU into Dr sorted by attraction score in ascending order ; 1. segmentation status, denoted as seg ijkm .commit: for all seg \u2208 SP , seg.commit will be initialized as N otV alid; during EDUA-G, if a sentence is first used by a segmentation seg ijk , all segments seg ijk * .commit are set to True, and all other seg ijk * from this sentence seg ij are set to as False 2. segment status, denoted as seg ijkm .used:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "when initialized, seg ijkm .used is set to False; if segment seg ijkm is used in an SCU, the status seg ijkm .used is set to True A graph G is constructed from all segments. A list of candidate SCUs (fully connected subgraph) with weights r from n to 2 is exhaustively extracted from G. All the leftover segments with weight as 1 are stored as residuals denote as L 1 , at default sorted by the index.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Allocation The allocation process proceeds topdown, iterating over descending values of r from Figure 8 : Contour plot for score correlations with \u03b2 (X-axis) and t edge (Y-axis).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 95, |
| "end": 103, |
| "text": "Figure 8", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Parameters DF F-value P-value \u03b1 4 0.31 0.872 \u03b2 4 104.31 0.000 t edge 11 6.56 0.000 Table 5 : One-way ANOVA for hyperparameters, with degrees of freedom (DF), F value and P-value (significance level \u03b1=0.05, sample size N=300).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 83, |
| "end": 90, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "8" |
| }, |
| { |
| "text": "These can be discontinuous substrings, and can reuse words from other contributors, e.g., subjects of VP conjuncts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We do not show results for Word2Vec(Mikolov et al., 2013), where performance was similar to GloVe.5 We use ELMo module from https://github. com/allenai/allennlp/.6 https://tfhub.dev/google/ universal-sentence-encoder/2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://nlp.stanford.edu/projects/ glove/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "The value of t edge is automatically set to the 83rd percentile of all pairwise cosine similarities in the input data.9 See Appendix A for EDUA-G.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "t edge \u2208[60, 70], or [80, 87]. Higher t edge yields fewer edges in the graph, so for efficiency, we select \u03b2 = 2.5, and N = 83. (Depending on the dataset this corresponds to cosine similarities t edge of about 0.15 to 0.35.)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "n to 2. All candidate SCUs are ordered first by weight, then by descending AS. Each set C r is filled with all candidate SCUs of size r, where maximum AS(SCU r ) is selected greedily, until the capacity constraint is satisfied. Every SCU committed to C r requires the segment status to be checked and updated. Then the residual segments are allocated to C 1 as in EDUA-C if the status of the segments permits. For 1 < r < n, if the provisional pyramid violates any constraints, backtracking considers a provisional revision of C r+1 based on reallocating all the segments in each subset of C r+1 of size q, for q from 1 to the size of C r+1 , considering reallocations in order of descending values of AP. The algorithm terminates when all the constraints are satisfied, no segments remain whose segmentation status is True and whose segment status is False.Backtracking The backtracking algorithm proceeds bottom-up, from the current set C r to C n . Recall from section 4.3, every pair of segments in an SCU has an edge \u2265 t edge ; therefore an SCU with r + 1 contributors can be decomposed intoSCUs with r contributors. We utilize this property to ensure every set C r satisfies the constraints. During the emergent search and allocation of SCUs, if a set C r does not meet the capacity constraint, the backtracking process will be initiated for re-allocation by re-using the segments committed to SCUs in C r+1 , to compose new SCUs in C r . As shown in Algorithm 2, while the allocation process selects SCUs with maximum attraction scores greedily, the backtracking takes a conservative approach of re-doing the commit decision by decomposing one SCU at a time in C r+1 with the least AS(SCU r ), and composing new SCUs with weight r for C r . It proceeds recursively from r to n until the resulting P satisfies the constraints. This is because every SCU in C r+1 has higher importance than in C r , and this minimizes the impact of the re-allocation step on AP. The backtracking algorithm terminates after all the constraints are satisfied.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "annex", |
| "sec_num": null |
| }, |
| { |
| "text": "Grid search was used to tune the EDUA-G hyperparameters. On DUC 2005 data, we used \u03b1 in the range [|seg|+10,|seg|+50] where |seg| is the number of input segments, and \u03b2 \u2208 [1, 3]. To set t edge , we compute pairwise similarities of all segment pairs from different summaries, and take t edge as the value at percentile N , for N \u2208 [60, 87] . The performance metric was correlation with manual pyramid on individual summarization tasks. Table 5 of ANOVA on the hyperparameters shows that \u03b2 and t edge have strong impact, while \u03b1 does not (we select \u03b1 = 10). A contour plot of all combinations of \u03b2 and t edge (Figure 8) gives two regions of high correlation: \u03b2 \u2208 [2.5, 3], and", |
| "cite_spans": [ |
| { |
| "start": 98, |
| "end": 117, |
| "text": "[|seg|+10,|seg|+50]", |
| "ref_id": null |
| }, |
| { |
| "start": 330, |
| "end": 334, |
| "text": "[60,", |
| "ref_id": null |
| }, |
| { |
| "start": 335, |
| "end": 338, |
| "text": "87]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 435, |
| "end": 442, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 607, |
| "end": 617, |
| "text": "(Figure 8)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "B Grid Search on Hyperparameters", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Using analytic scoring rubrics in the automatic assessment of college-level summary writing tasks in l2", |
| "authors": [ |
| { |
| "first": "Tamara", |
| "middle": [], |
| "last": "Sladoljev Agejev", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan\u0161najder", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
| "volume": "2", |
| "issue": "", |
| "pages": "181--186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tamara Sladoljev Agejev and Jan\u0160najder. 2017. Using analytic scoring rubrics in the automatic assessment of college-level summary writing tasks in l2. In Pro- ceedings of the Eighth International Joint Confer- ence on Natural Language Processing (Volume 2: Short Papers), pages 181-186.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "A simple but tough-to-beat baseline for sentence embeddings", |
| "authors": [ |
| { |
| "first": "Sanjeev", |
| "middle": [], |
| "last": "Arora", |
| "suffix": "" |
| }, |
| { |
| "first": "Yingyu", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tengyu", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sanjeev Arora, Yingyu Liang, and Tengyu Ma. 2016. A simple but tough-to-beat baseline for sentence em- beddings. In International Conference on Learning Representations (ICLR 2017).", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Macrorules for summarizing texts: The development of expertise", |
| "authors": [ |
| { |
| "first": "Ann", |
| "middle": [ |
| "L" |
| ], |
| "last": "Brown", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeanne", |
| "middle": [ |
| "D" |
| ], |
| "last": "Day", |
| "suffix": "" |
| } |
| ], |
| "year": 1983, |
| "venue": "Journal of Verbal Learning and Verbal Behavior", |
| "volume": "22", |
| "issue": "", |
| "pages": "1--14", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ann L. Brown and Jeanne D. Day. 1983. Macrorules for summarizing texts: The development of exper- tise. Journal of Verbal Learning and Verbal Behav- ior, 22:1-14.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Universal sentence encoder for English", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Cer", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinfei", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Sheng-Yi", |
| "middle": [], |
| "last": "Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "Nan", |
| "middle": [], |
| "last": "Hua", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicole", |
| "middle": [], |
| "last": "Limtiaco", |
| "suffix": "" |
| }, |
| { |
| "first": "Rhomni", |
| "middle": [], |
| "last": "St", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "Mario", |
| "middle": [], |
| "last": "Constant", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Guajardo-Cespedes", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Tar", |
| "suffix": "" |
| }, |
| { |
| "first": "Ray", |
| "middle": [], |
| "last": "Strope", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kurzweil", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "169--174", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Cer, Yinfei Yang, Sheng-yi Kong, Nan Hua, Nicole Limtiaco, Rhomni St. John, Noah Constant, Mario Guajardo-Cespedes, Steve Yuan, Chris Tar, Brian Strope, and Ray Kurzweil. 2018. Universal sentence encoder for English. In Proceedings of the 2018 Conference on Empirical Methods in Nat- ural Language Processing: System Demonstrations, pages 169-174, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Towards robust abstractive multi-document summarization: A caseframe analysis of centrality and domain", |
| "authors": [ |
| { |
| "first": "Jackie", |
| "middle": [ |
| "Chi" |
| ], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Kit", |
| "middle": [], |
| "last": "Cheung", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerald", |
| "middle": [], |
| "last": "Penn", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1233--1242", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jackie Chi Kit Cheung and Gerald Penn. 2013. To- wards robust abstractive multi-document summa- rization: A caseframe analysis of centrality and do- main. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 1233-1242. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Power-law distributions in empirical data", |
| "authors": [ |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Clauset", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Cosma Rohilla Shalizi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mark", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Newman", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "SIAM review", |
| "volume": "51", |
| "issue": "4", |
| "pages": "661--703", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aaron Clauset, Cosma Rohilla Shalizi, and Mark EJ Newman. 2009. Power-law distributions in empiri- cal data. SIAM review, 51(4):661-703.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Meteor universal: Language specific translation evaluation for any target language", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Denkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the ninth workshop on statistical machine translation (ACL)", |
| "volume": "", |
| "issue": "", |
| "pages": "376--380", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Denkowski and Alon Lavie. 2014. Meteor universal: Language specific translation evaluation for any target language. In Proceedings of the ninth workshop on statistical machine translation (ACL), pages 376-380, Baltimore, MD.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: Pre-training of deep bidirectional transformers for language under- standing. arXiv preprint arXiv:1810.04805.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Cognitive psychology and discourse: Recalling and summarizing stories", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Teun", |
| "suffix": "" |
| }, |
| { |
| "first": "Walter", |
| "middle": [], |
| "last": "Van Dijk", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kintsch", |
| "suffix": "" |
| } |
| ], |
| "year": 1977, |
| "venue": "Trends in textlinguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "61--80", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Teun A. van Dijk and Walter Kintsch. 1977. Cognitive psychology and discourse: Recalling and summariz- ing stories. In W. U. Dressier, editor, Trends in text- linguistics, pages 61-80. De Gruyter, New York.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Rubric reliability and annotation of content and argument in source-based argument essays", |
| "authors": [ |
| { |
| "first": "Yanjun", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Driban", |
| "suffix": "" |
| }, |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Brennan", |
| "suffix": "" |
| }, |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Mcmanus", |
| "suffix": "" |
| }, |
| { |
| "first": "Patricia", |
| "middle": [], |
| "last": "Musi", |
| "suffix": "" |
| }, |
| { |
| "first": "Smaranda", |
| "middle": [], |
| "last": "Davies", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "J" |
| ], |
| "last": "Muresan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Passonneau", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "507--518", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yanjun Gao, Alex Driban, Brennan Xavier McManus, Elena Musi, Patricia Davies, Smaranda Muresan, and Rebecca J Passonneau. 2019. Rubric relia- bility and annotation of content and argument in source-based argument essays. In Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications, pages 507- 518.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Examining the impacts of annotation and automated guidance on essay revision and science learning", |
| "authors": [ |
| { |
| "first": "Libby", |
| "middle": [], |
| "last": "Gerard", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Marcia", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacquie", |
| "middle": [], |
| "last": "Linn", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Madhok", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Transforming Learning, Empowering Learners: The International Conference of the Learning Sciences (ICLS) 2016. Singapore: International Society of the Learning Sciences", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Libby Gerard, Marcia C Linn, and Jacquie Madhok. 2016. Examining the impacts of annotation and automated guidance on essay revision and science learning. In C. K. Looi, J. L. Polman, U. Cress, and P. Reimann, editors, Transforming Learning, Em- powering Learners: The International Conference of the Learning Sciences (ICLS) 2016. Singapore: International Society of the Learning Sciences.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Using automated scores of student essays to support teacher guidance in classroom inquiry", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Libby", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcia C", |
| "middle": [], |
| "last": "Gerard", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Linn", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Journal of Science Teacher Education", |
| "volume": "27", |
| "issue": "1", |
| "pages": "111--129", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Libby F Gerard and Marcia C Linn. 2016. Using au- tomated scores of student essays to support teacher guidance in classroom inquiry. Journal of Science Teacher Education, 27(1):111-129.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A metaanalysis of writing instruction for adolescent students", |
| "authors": [ |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| }, |
| { |
| "first": "Dolores", |
| "middle": [], |
| "last": "Perin", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Journal of Educational Psychology", |
| "volume": "99", |
| "issue": "3", |
| "pages": "445--476", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steve Graham and Dolores Perin. 2007. A meta- analysis of writing instruction for adolescent students. Journal of Educational Psychology, 99(3):445-476.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Re-evaluating automatic summarization with BLEU and 192 shades of ROUGE", |
| "authors": [ |
| { |
| "first": "Yvette", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "128--137", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D15-1013" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yvette Graham. 2015. Re-evaluating automatic sum- marization with BLEU and 192 shades of ROUGE. In Proceedings of the 2015 Conference on Empiri- cal Methods in Natural Language Processing, pages 128-137, Lisbon, Portugal. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Soft layer-specific multi-task summarization with entailment and question generation", |
| "authors": [ |
| { |
| "first": "Han", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Ramakanth", |
| "middle": [], |
| "last": "Pasunuru", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "687--697", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Han Guo, Ramakanth Pasunuru, and Mohit Bansal. 2018. Soft layer-specific multi-task summarization with entailment and question generation. In Pro- ceedings of the 56th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), pages 687-697. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Modeling sentences in the latent space", |
| "authors": [ |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Mona", |
| "middle": [], |
| "last": "Diab", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "864--872", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Weiwei Guo and Mona Diab. 2012. Modeling sen- tences in the latent space. In Proceedings of the 50th ACL, pages 864-872.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Examining the consensus between human summaries: Initial experiments with factoid analysis", |
| "authors": [ |
| { |
| "first": "Simone", |
| "middle": [], |
| "last": "Hans Van Halteren", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Teufel", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the HLT-NAACL 2003 Workshop on Text Summarization", |
| "volume": "", |
| "issue": "", |
| "pages": "57--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hans van Halteren and Simone Teufel. 2003. Ex- amining the consensus between human summaries: Initial experiments with factoid analysis. In Pro- ceedings of the HLT-NAACL 2003 Workshop on Text Summarization, pages 57-64. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Automation of summary evaluation by the pyramid method", |
| "authors": [ |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Harnly", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "J" |
| ], |
| "last": "Passonneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Owen", |
| "middle": [], |
| "last": "Rambow", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the Conference of Recent Advances in Natural Language Processing (RANLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "226--232", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aaron Harnly, Ani Nenkova, Rebecca J. Passonneau, and Owen Rambow. 2005. Automation of summary evaluation by the pyramid method. In Proceedings of the Conference of Recent Advances in Natural Language Processing (RANLP), pages 226-232.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A unified model for extractive and abstractive summarization using inconsistency loss", |
| "authors": [ |
| { |
| "first": "Wan-Ting", |
| "middle": [], |
| "last": "Hsu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chieh-Kai", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Ying", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kerui", |
| "middle": [], |
| "last": "Min", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "132--141", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wan-Ting Hsu, Chieh-Kai Lin, Ming-Ying Lee, Kerui Min, Jing Tang, and Min Sun. 2018. A unified model for extractive and abstractive summarization using inconsistency loss. In Proceedings of the 56th Annual Meeting of the Association for Computa- tional Linguistics (Volume 1: Long Papers), pages 132-141. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Deep unordered composition rivals syntactic methods for text classification", |
| "authors": [ |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Varun", |
| "middle": [], |
| "last": "Manjunatha", |
| "suffix": "" |
| }, |
| { |
| "first": "Jordan", |
| "middle": [], |
| "last": "Boyd-Graber", |
| "suffix": "" |
| }, |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "1681--1691", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohit Iyyer, Varun Manjunatha, Jordan Boyd-Graber, and Hal Daum\u00e9 III. 2015. Deep unordered compo- sition rivals syntactic methods for text classification. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Lan- guage Processing (Volume 1: Long Papers), pages 1681-1691, Beijing, China. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "ROUGE: A package for automatic evaluation of summaries", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Text summarization branches out: Proceedings of the ACL-04 workshop", |
| "volume": "8", |
| "issue": "", |
| "pages": "74--81", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin. 2004. ROUGE: A package for auto- matic evaluation of summaries. In Text summariza- tion branches out: Proceedings of the ACL-04 work- shop, volume 8, pages 74-81. Barcelona, Spain.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Methods for automatically evaluating answers to complex questions", |
| "authors": [ |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Dina", |
| "middle": [], |
| "last": "Demner-Fushman", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Information Retrieval", |
| "volume": "9", |
| "issue": "", |
| "pages": "565--587", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jimmy Lin and Dina Demner-Fushman. 2006. Meth- ods for automatically evaluating answers to complex questions. Information Retrieval, 9:565-587.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "The Stanford CoreNLP natural language processing toolkit", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jenny", |
| "middle": [], |
| "last": "Finkel", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [ |
| "J" |
| ], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mc-Closky", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Association for Computational Linguistics (ACL) System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "55--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher D. Manning, Mihai Surdeanu, John Bauer, Jenny Finkel, Steven J. Bethard, and David Mc- Closky. 2014. The Stanford CoreNLP natural lan- guage processing toolkit. In Association for Compu- tational Linguistics (ACL) System Demonstrations, pages 55-60.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1301.3781" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jef- frey Dean. 2013. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Abstractive text summarization using sequence-tosequence RNNs and beyond", |
| "authors": [ |
| { |
| "first": "Ramesh", |
| "middle": [], |
| "last": "Nallapati", |
| "suffix": "" |
| }, |
| { |
| "first": "Bowen", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "C\u00edcero", |
| "middle": [], |
| "last": "Nogueira", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Santos", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Aglar G\u00fcl\u00e7ehre", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Xiang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "20th SIGNLL Conference on Computational Natural Language Learning (CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "280--290", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ramesh Nallapati, Bowen Zhou, C\u00edcero Nogueira dos Santos, \u00c7 aglar G\u00fcl\u00e7ehre, and Bing Xiang. 2016. Abstractive text summarization using sequence-to- sequence RNNs and beyond. In 20th SIGNLL Con- ference on Computational Natural Language Learn- ing (CoNLL, pages 280-290. ACL.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Automatic text summarization of newswire: Lessons learned from the document understanding conference", |
| "authors": [ |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the 20th National Conference on Artificial Intelligence", |
| "volume": "3", |
| "issue": "", |
| "pages": "1436--1441", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ani Nenkova. 2005. Automatic text summarization of newswire: Lessons learned from the document un- derstanding conference. In Proceedings of the 20th National Conference on Artificial Intelligence -Vol- ume 3, AAAI'05, pages 1436-1441. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Can you summarize this? identifying correlates of input difficulty for multi-document summarization", |
| "authors": [ |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| }, |
| { |
| "first": "Annie", |
| "middle": [], |
| "last": "Louis", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL-08: HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "825--833", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ani Nenkova and Annie Louis. 2008. Can you sum- marize this? identifying correlates of input difficulty for multi-document summarization. In Proceedings of ACL-08: HLT, pages 825-833, Columbus, Ohio. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Evaluating content selection in summarization: The pyramid method", |
| "authors": [ |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "J" |
| ], |
| "last": "Passonneau", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ani Nenkova and Rebecca J. Passonneau. 2004. Evalu- ating content selection in summarization: The pyra- mid method. In HLT-NAACL 2004.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "The pyramid method: Incorporating human content selection variation in summarization evaluation", |
| "authors": [ |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "J" |
| ], |
| "last": "Passonneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathleen", |
| "middle": [], |
| "last": "Mckeown", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "ACM Transactions on Speech and Language Processing (TSLP)", |
| "volume": "4", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ani Nenkova, Rebecca J. Passonneau, and Kathleen McKeown. 2007. The pyramid method: Incorpo- rating human content selection variation in summa- rization evaluation. ACM Transactions on Speech and Language Processing (TSLP), 4(2):4.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "An assessment of the accuracy of automatic evaluation in summarization", |
| "authors": [ |
| { |
| "first": "Karolina", |
| "middle": [], |
| "last": "Owczarzak", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "M" |
| ], |
| "last": "Conroy", |
| "suffix": "" |
| }, |
| { |
| "first": "Hoa", |
| "middle": [ |
| "Trang" |
| ], |
| "last": "Dang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of Workshop on Evaluation Metrics and System Comparison for Automatic Summarization", |
| "volume": "", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karolina Owczarzak, John M. Conroy, Hoa Trang Dang, and Ani Nenkova. 2012a. An assessment of the accuracy of automatic evaluation in summa- rization. In Proceedings of Workshop on Evaluation Metrics and System Comparison for Automatic Sum- marization, pages 1-9, Stroudsburg, PA, USA. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Assessing the effect of inconsistent assessors on summarization evaluation", |
| "authors": [ |
| { |
| "first": "Karolina", |
| "middle": [], |
| "last": "Owczarzak", |
| "suffix": "" |
| }, |
| { |
| "first": "Hoa", |
| "middle": [ |
| "Trang" |
| ], |
| "last": "Dang", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "A" |
| ], |
| "last": "Rankel", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "M" |
| ], |
| "last": "Conroy", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics: Short Papers", |
| "volume": "2", |
| "issue": "", |
| "pages": "359--362", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karolina Owczarzak, Hoa Trang Dang, Peter A. Rankel, and John M. Conroy. 2012b. Assessing the effect of inconsistent assessors on summarization evaluation. In Proceedings of the 50th Annual Meet- ing of the Association for Computational Linguis- tics: Short Papers -Volume 2, ACL '12, pages 359- 362. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Unsupervised learning of sentence embeddings using compositional n-gram features", |
| "authors": [ |
| { |
| "first": "Matteo", |
| "middle": [], |
| "last": "Pagliardini", |
| "suffix": "" |
| }, |
| { |
| "first": "Prakhar", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Jaggi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "528--540", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1049" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matteo Pagliardini, Prakhar Gupta, and Martin Jaggi. 2018. Unsupervised learning of sentence embed- dings using compositional n-gram features. In Pro- ceedings of the 2018 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long Papers), pages 528-540, New Orleans, Louisiana. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Formal and functional assessment of the pyramid method for summary content evaluation", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Rebecca", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Passonneau", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Nat. Lang. Eng", |
| "volume": "16", |
| "issue": "2", |
| "pages": "107--131", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rebecca J. Passonneau. 2010. Formal and functional assessment of the pyramid method for summary content evaluation. Nat. Lang. Eng., 16(2):107-131.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Automated pyramid scoring of summaries using distributional semantics", |
| "authors": [ |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "J" |
| ], |
| "last": "Passonneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Dolores", |
| "middle": [], |
| "last": "Perin", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "143--147", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rebecca J. Passonneau, Emily Chen, Weiwei Guo, and Dolores Perin. 2013. Automated pyramid scoring of summaries using distributional semantics. In ACL, pages 143-147.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Wise crowd content assessment and educational rubrics", |
| "authors": [ |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "J" |
| ], |
| "last": "Passonneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ananya", |
| "middle": [], |
| "last": "Poddar", |
| "suffix": "" |
| }, |
| { |
| "first": "Gaurav", |
| "middle": [], |
| "last": "Gite", |
| "suffix": "" |
| }, |
| { |
| "first": "Alisa", |
| "middle": [], |
| "last": "Krivokapic", |
| "suffix": "" |
| }, |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dolores", |
| "middle": [], |
| "last": "Perin", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Journal of Artificial Intelligence in Education", |
| "volume": "28", |
| "issue": "1", |
| "pages": "29--55", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rebecca J. Passonneau, Ananya Poddar, Gaurav Gite, Alisa Krivokapic, Qian Yang, and Dolores Perin. 2018. Wise crowd content assessment and educa- tional rubrics. International Journal of Artificial In- telligence in Education, 28(1):29-55.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representation. In Proceedings of the 2014 confer- ence on empirical methods in natural language pro- cessing (EMNLP), pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word rep- resentations. In Proceedings of the 2018 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), pages 2227-2237, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Supervised learning of automatic pyramid for optimization-based multi-document summarization", |
| "authors": [ |
| { |
| "first": "Maxime", |
| "middle": [], |
| "last": "Peyrard", |
| "suffix": "" |
| }, |
| { |
| "first": "Judith", |
| "middle": [], |
| "last": "Eckle-Kohler", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "1084--1094", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maxime Peyrard and Judith Eckle-Kohler. 2017. Supervised learning of automatic pyramid for optimization-based multi-document summarization. In ACL 2017, pages 1084-1094.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Align, disambiguate and walk: A unified approach for measuring semantic similarity", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mohammad Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Jurgens", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1341--1351", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammad Taher Pilehvar, David Jurgens, and Roberto Navigli. 2013. Align, disambiguate and walk: A unified approach for measuring semantic similarity. In ACL, pages 1341-1351.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Ranking human and machine summarization systems", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Rankel", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "M" |
| ], |
| "last": "Conroy", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [ |
| "V" |
| ], |
| "last": "Slud", |
| "suffix": "" |
| }, |
| { |
| "first": "Dianne", |
| "middle": [ |
| "P" |
| ], |
| "last": "O'leary", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "467--473", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Rankel, John M. Conroy, Eric V. Slud, and Di- anne P. O'Leary. 2011. Ranking human and ma- chine summarization systems. In Proceedings of the Conference on Empirical Methods in Natural Language Processing, EMNLP '11, pages 467-473, Stroudsburg, PA, USA. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "A decade of automatic content evaluation of news summaries: Reassessing the state of the art", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "M" |
| ], |
| "last": "Rankel", |
| "suffix": "" |
| }, |
| { |
| "first": "Hoa", |
| "middle": [ |
| "Trang" |
| ], |
| "last": "Conroy", |
| "suffix": "" |
| }, |
| { |
| "first": "Ani", |
| "middle": [], |
| "last": "Dang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nenkova", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "131--136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter A. Rankel, John M. Conroy, Hoa Trang Dang, and Ani Nenkova. 2013. A decade of automatic con- tent evaluation of news summaries: Reassessing the state of the art. In Proceedings of the 51st Annual Meeting of the Association for Computational Lin- guistics (Volume 2: Short Papers), pages 131-136, Sofia, Bulgaria. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "A note on greedy algorithms for the maximum weighted independent set problem", |
| "authors": [ |
| { |
| "first": "Shuichi", |
| "middle": [], |
| "last": "Sakai", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitsunori", |
| "middle": [], |
| "last": "Togasaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Koichi", |
| "middle": [], |
| "last": "Yamazaki", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Discrete Applied Mathematics", |
| "volume": "126", |
| "issue": "2", |
| "pages": "313--322", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shuichi Sakai, Mitsunori Togasaki, and Koichi Ya- mazaki. 2003. A note on greedy algorithms for the maximum weighted independent set problem. Dis- crete Applied Mathematics, 126(2):313-322.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Using semantic technologies for formative assessment and scoring in large courses and MOOCs", |
| "authors": [ |
| { |
| "first": "Miguel", |
| "middle": [], |
| "last": "Santamar\u00eda Lancho", |
| "suffix": "" |
| }, |
| { |
| "first": "Mauro", |
| "middle": [], |
| "last": "Hern\u00e1ndez", |
| "suffix": "" |
| }, |
| { |
| "first": "Angeles", |
| "middle": [], |
| "last": "S\u00e1nchez-Elvira Paniagua", |
| "suffix": "" |
| }, |
| { |
| "first": "Jos\u00e9", |
| "middle": [], |
| "last": "Mar\u00eda Luz\u00f3n", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillermo De Jorge-Botana", |
| "middle": [], |
| "last": "Encabo", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Journal of Interactive Media in Education", |
| "volume": "", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Miguel Santamar\u00eda Lancho, Mauro Hern\u00e1ndez, Angeles S\u00e1nchez-Elvira Paniagua, Jos\u00e9 Mar\u00eda Luz\u00f3n Encabo, and Guillermo de Jorge-Botana. 2018. Using semantic technologies for formative as- sessment and scoring in large courses and MOOCs. Journal of Interactive Media in Education, 2018(1).", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Get to the point: Summarization with pointergenerator networks", |
| "authors": [ |
| { |
| "first": "Abigail", |
| "middle": [], |
| "last": "See", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1073--1083", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abigail See, Peter J. Liu, and Christopher D. Manning. 2017. Get to the point: Summarization with pointer- generator networks. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1073- 1083. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran As- sociates, Inc.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "PEAK: Pyramid evaluation via automated knowledge extraction", |
| "authors": [ |
| { |
| "first": "Qian", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [ |
| "J" |
| ], |
| "last": "Passonneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerard", |
| "middle": [], |
| "last": "De Melo", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "2673--2680", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Qian Yang, Rebecca J. Passonneau, and Gerard de Melo. 2016. PEAK: Pyramid evaluation via auto- mated knowledge extraction. In AAAI, pages 2673- 2680.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "text": "Alignment of a single PyrEval SCU of weight 5", |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "uris": null, |
| "text": "Formal specification of EDUA's input graph G consisting of all segments from all segmentations of reference summary sentences (item 2), the objective (item 6), and three scores for defining the objective function that are assigned to candidate SCUs (item 3), sets of SCUs of the same weight (item 4), and a candidate pyramid (item 5).", |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "num": null, |
| "uris": null, |
| "text": "A directed Depth First Search tree for EDUA-C. Nodes are cliques representing candidate SCUs, as illustrated in", |
| "type_str": "figure" |
| }, |
| "FIGREF3": { |
| "num": null, |
| "uris": null, |
| "text": "1. Autonomous Vehicles (AV): 42 summaries, average words = 237.76 2. Cryptocurrency (CC): 37 summaries, average words = 245.84", |
| "type_str": "figure" |
| }, |
| "FIGREF4": { |
| "num": null, |
| "uris": null, |
| "text": "Alignment of an PyrEval SCU of weight 3 to segments from student summaries on autonomous vehicle.", |
| "type_str": "figure" |
| }, |
| "FIGREF5": { |
| "num": null, |
| "uris": null, |
| "text": "Dr is not empty do 6 pop e from Dr with maximum AS; 7 if notConflict(Cr, e), and \u2200 seg ijkm \u2208 e seg ijkm .commit == T rue or seg ijkm .commit == N otV alid, and seg ijkm .used == F alse then 8 to meet any of the constraints then 12 BackTrack(Cr, yr, Cr+1, P, SP ) Dr and repeat line 3 18 end 19 foreach segs ijkm \u2208 L1 do 20 if segs ijkm .commit == T rue or segs ijkm .commit == N otV alid then 21 C1 \u2190 C1 \u222a segs ijkm ; 22 segs ijk * .commit = T rue in SP ;", |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "num": null, |
| "text": "", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF3": { |
| "num": null, |
| "text": "", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF5": { |
| "num": null, |
| "text": "Pearson correlation of manual pyramid and PyrEval on four scores (raw/coverage, quality and comprehensive) compared with ROUGE-2 on coverage.", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF7": { |
| "num": null, |
| "text": "Mean accuracy, standard deviation and 95% confidence intervals on TAC 2010 Wilcoxon results for ROUGE-1, ROUGE-2 and PyrEval, using 100 bootstrapped samples of 41 of the 46 topics.", |
| "type_str": "table", |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |