| { |
| "paper_id": "N06-1048", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:45:35.773274Z" |
| }, |
| "title": "Nuggeteer: Automatic Nugget-Based Evaluation using Descriptions and Judgements", |
| "authors": [ |
| { |
| "first": "Gregory", |
| "middle": [], |
| "last": "Marton", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Infolab Group", |
| "institution": "MIT CSAIL Cambridge", |
| "location": { |
| "postCode": "02139", |
| "region": "MA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Alexey", |
| "middle": [], |
| "last": "Radul", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Infolab Group", |
| "institution": "MIT CSAIL Cambridge", |
| "location": { |
| "postCode": "02139", |
| "region": "MA" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The TREC Definition and Relationship questions are evaluated on the basis of information nuggets that may be contained in system responses. Human evaluators provide informal descriptions of each nugget, and judgements (assignments of nuggets to responses) for each response submitted by participants. While human evaluation is the most accurate way to compare systems, approximate automatic evaluation becomes critical during system development. We present Nuggeteer, a new automatic evaluation tool for nugget-based tasks. Like the first such tool, Pourpre, Nuggeteer uses words in common between candidate answer and answer key to approximate human judgements. Unlike Pourpre, but like human assessors, Nuggeteer creates a judgement for each candidatenugget pair, and can use existing judgements instead of guessing. This creates a more readily interpretable aggregate score, and allows developers to track individual nuggets through the variants of their system. Nuggeteer is quantitatively comparable in performance to Pourpre, and provides qualitatively better feedback to developers.", |
| "pdf_parse": { |
| "paper_id": "N06-1048", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The TREC Definition and Relationship questions are evaluated on the basis of information nuggets that may be contained in system responses. Human evaluators provide informal descriptions of each nugget, and judgements (assignments of nuggets to responses) for each response submitted by participants. While human evaluation is the most accurate way to compare systems, approximate automatic evaluation becomes critical during system development. We present Nuggeteer, a new automatic evaluation tool for nugget-based tasks. Like the first such tool, Pourpre, Nuggeteer uses words in common between candidate answer and answer key to approximate human judgements. Unlike Pourpre, but like human assessors, Nuggeteer creates a judgement for each candidatenugget pair, and can use existing judgements instead of guessing. This creates a more readily interpretable aggregate score, and allows developers to track individual nuggets through the variants of their system. Nuggeteer is quantitatively comparable in performance to Pourpre, and provides qualitatively better feedback to developers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The TREC Definition and Relationship questions are evaluated on the basis of information nuggets, abstract pieces of knowledge that, taken together, comprise an answer. Nuggets are described informally, with abbreviations, misspellings, etc., and each is associated with an importance judgement: 'vital' or 'okay'. 1 In some sense, nuggets are like WordNet synsets, and their descriptions are like glosses. Responses may contain more than one nugget-when they contain more than one piece of knowledge from the answer. The median scores of today's systems are frequently zero; most responses contain no nuggets (Voorhees, 2005) .", |
| "cite_spans": [ |
| { |
| "start": 315, |
| "end": 316, |
| "text": "1", |
| "ref_id": null |
| }, |
| { |
| "start": 610, |
| "end": 626, |
| "text": "(Voorhees, 2005)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Human assessors decide what nuggets make up an answer based on some initial research and on pools of top system responses for each question. Answer keys list, for each nugget, its id, importance, and description; two example answer keys are shown in Figures 1 and 2. Assessors make binary decisions about each response, whether it contains each nugget. When multiple responses contain a nugget, the assessor gives credit only to the (subjectively) best response.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Using the judgements of the assessors, the final score combines the recall of the available vital nuggets, and the length (discounting whitespace) of the system response as a proxy for precision. Nuggets valued 'okay' contribute to precision by increasing the length allowance, but do not contribute to recall. The scoring formula is shown in Figure 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 343, |
| "end": 351, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Qid 87.8: 'other' question for target Enrico Fermi 1 vital belived in partical's existence and named it neutrino 2 vital Called the atomic Bomb an evil thing 3 okay Achieved the first controlled nuclear chain reaction 4 vital Designed and built the first nuclear reactor 5 okay Concluded that the atmosphere was in no real danger before Trinity test 6 okay co-developer of the atomic bomb 7 okay pointed out that the galaxy is 100,000 light years across Figure 1 : The \"answer key\" to an \"other\" question from 2005.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 454, |
| "end": 462, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The analyst is looking for links between Colombian businessmen and paramilitary forces. Specifically, the analyst would like to know of evidence that business interests in Colombia are still funding the AUC paramilitary organization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Commander of the national paramilitary umbrella organization claimed his group enjoys growing support from local and international businesses 2 vital Columbia's Chief prosecutor said he had a list of businessmen who supported right-wing paramilitary squads and warned that financing outlawed groups is a criminal offense 3 okay some landowners support AUC for protections services 4 vital Rightist militias waging a dirty war against suspected leftists in Colombia enjoy growing support from private businessmen 5 okay The AUC makes money by taxing Colombia's drug trade 6 okay The ACU is estimated to have 6000 combatants and has links to government security forces. 7 okay Many ACU fighters are former government soldiers Figure 2 : The \"answer key\" to a relationship question.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 724, |
| "end": 732, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "vital", |
| "sec_num": "1" |
| }, |
| { |
| "text": "r # of vital nuggets returned in a response a # of okay nuggets returned in a response R # of vital nuggets in the answer key l # of non-whitespace characters in the entire answer string Then \"recall\" R = r/R \"allowance\" \u03b1 = 100 \u00d7 (r + a)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Let", |
| "sec_num": null |
| }, |
| { |
| "text": "\"precision\" P = 1 if l < \u03b1 1 \u2212 l\u2212\u03b1 l otherwise Finally, the F (\u03b2) = (\u03b2 2 + 1) \u00d7 P \u00d7 R \u03b2 2 \u00d7 P + R Figure 3: Official definition of F-measure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Let", |
| "sec_num": null |
| }, |
| { |
| "text": "Automatic evaluation of systems is highly desirable. Developers need to know whether one system performs better or worse than another. Ideally, they would like to know which nuggets were lost or gained. Because there is no exhaustive list of snippets from the document collection that contain each nugget, an exact automatic solution is out of reach. Manual evaluation of system responses is too time consuming to be effective for a development cycle.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Let", |
| "sec_num": null |
| }, |
| { |
| "text": "The Qaviar system first described an approximate automatic evaluation technique using keywords, and Pourpre was the first publicly available implementation for these nugget-based tasks. (Breck et al., 2000; . Pourpre calculates an idf -or count-based, stemmed, unigram similarity between each nugget description and each tinction between 'vital' and 'okay'. candidate system response. If this similarity passes a threshold, then it uses this similarity to assign a partial value for recall and a partial length allowance, reflecting the uncertainty of the automatic judgement. Importantly, it yields a ranking of systems very similar to the official ranking (See Table 2 ).", |
| "cite_spans": [ |
| { |
| "start": 186, |
| "end": 206, |
| "text": "(Breck et al., 2000;", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 663, |
| "end": 670, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Let", |
| "sec_num": null |
| }, |
| { |
| "text": "Nuggeteer offers three important improvements:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Let", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 interpretability of the scores, as compared to official scores,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Let", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 use of known judgements for exact information about some responses, and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Let", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 information about individual nuggets, for detailed error analysis.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Let", |
| "sec_num": null |
| }, |
| { |
| "text": "Nuggeteer makes scores interpretable by making binary decisions about each nugget and each system response, just as assessors do, and then calculating the final score in the usual way. We will show that Nuggeteer's absolute error is comparable to human error, and that the 95% confidence intervals Nuggeteer reports are correct around 95% of the time.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Let", |
| "sec_num": null |
| }, |
| { |
| "text": "Nuggeteer assumes that if a system response was ever judged by a human assessor to contain a particular nugget, then other identical responses also contain that nugget. When this is not true among the human judgements, we claim it is due to annotator error. This assumption allows developers to add their own judgements and have the responses they've adjudicated scored \"exactly\" by Nuggeteer.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Let", |
| "sec_num": null |
| }, |
| { |
| "text": "These features empower developers to track not only the numeric value of a change to their system, but also its effect on retrieval of each nugget.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Let", |
| "sec_num": null |
| }, |
| { |
| "text": "Nuggeteer builds one binary classifier per nugget for each question, based on n-grams (up to trigrams) in the description and optionally in any provided judgement files. The classifiers use a weight for each n-gram, an informativeness measure for each n-gram, and a threshold for accepting a response as bearing the nugget.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The idf -based weight for an n-gram w 1 ...w n is the sum of unigram idf counts from the AQUAINT corpus of English newspaper text, the corpus from which responses for the TREC tasks are drawn. We did not explore using n-gram idf s. A tf component is not meaningful because the data are so sparse.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "N -gram weight", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Let G be the set of nuggets for some question. Informativeness of an n-gram for a nugget g is calculated based on how many other nuggets in that question (\u2208 G) contain the n-gram. Let", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Informativeness", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "i(g, w 1 ...w n ) = 1 if count(g, w 1 ..w n ) > 0 0 otherwise (1) where count(g, w 1 ...w n )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Informativeness", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "is the number of occurrences of the n-gram in responses containing the nugget g.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Informativeness", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Then informativeness is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Informativeness", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "I(g, w 1 ...w n ) = 1 \u2212 g \u2208G i(g , w 1 ...w n ) |G| (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Informativeness", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "This captures the Bayesian intuition that the more outcomes a piece of evidence is associated with, the less confidence we can have in predicting the outcome based on that evidence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Informativeness", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Nuggeteer does not guess on responses which have been judged by a human to contain a nugget, or those which have unambiguously judged not to, but assigns the known judgement. 2 For unseen responses, we determine the n-gram recall for each nugget g and candidate response w 1 ...w l by breaking the candidate into n-grams and finding the sum of scores:", |
| "cite_spans": [ |
| { |
| "start": 175, |
| "end": 176, |
| "text": "2", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judgement", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "Recall(g, w 1 ...w l ) = (3) n\u22121 k=0 l\u2212k i=0 W (g, w i ...w i+k ) * I(g, w i ...w i+k )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judgement", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "The candidate is considered to contain all nuggets whose recall exceeds some threshold. Put another way, we build an n-gram language model for each nugget, and assign those nuggets whose predicted likelihood exceeds a threshold.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judgement", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "When several responses contain a nugget, Nuggeteer picks the first (instead of the best, as assessors can) for purposes of scoring.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Judgement", |
| "sec_num": "2.3" |
| }, |
| { |
| "text": "We explored a number of parameters in the scoring function: stemming, n-gram size, idf weights vs. count weights, and the effect of removing stopwords. We tested all 24 combinations, and for each experiment, we cross-validated by leaving out one submitted system, or where possible, one submitting institution (to avoid training and testing on potentially very similar systems). 3 Each experiment was performed using a range of thresholds for Equation 3 above, and we selected the best performing threshold for each data set. 4 Because the threshold was selected after crossvalidation, it is exposed to overtraining. We used a single global threshold to minimize this risk, but we have no reason to think that the thresholds for different nuggets are related.", |
| "cite_spans": [ |
| { |
| "start": 379, |
| "end": 380, |
| "text": "3", |
| "ref_id": null |
| }, |
| { |
| "start": 526, |
| "end": 527, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "Selecting thresholds as part of the training process can maximize accuracy while eliminating overtraining. We therefore explored Bayesian models for automatic threshold selection. We model assignment of nuggets to responses as caused by the scores according to a noisy threshold function, with separate false positive and false negative error rates. We varied thresholds and error rates by entire dataset, by question, or by individual nugget, evaluating them using Bayesian model selection.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Parameter Estimation", |
| "sec_num": "2.4" |
| }, |
| { |
| "text": "For our experiments, we used the definition questions from TREC2003, the 'other' questions from TREC2004 and TREC2005, and the relationship questions from TREC2005. (Voorhees, 2003; Voorhees, 2004; Voorhees, 2005) The distribution of nuggets and questions is shown for each data set in Figure 4: Percents of nuggets, binned by the number of systems that found each nugget.", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 181, |
| "text": "(Voorhees, 2003;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 182, |
| "end": 197, |
| "text": "Voorhees, 2004;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 198, |
| "end": 213, |
| "text": "Voorhees, 2005)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "system responses assigned that nugget (difficulty of nuggets, in a sense) is shown in Figure 4 . More than a quarter of relationship nuggets were not found by any system. Among all data sets, many nuggets were found in none or just a few responses.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 86, |
| "end": 94, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We report correlation (R 2 ), and Kendall's \u03c4 b , following Lin and Demner-Fushman. Nuggeteer's scores are in the same range as real system scores, so we also report average root mean squared error from the official results. We 'corrected' the official judgements by assigning a nugget to a response if that response was judged to contain that nugget in any assessment for any system. Table 1 : For each data set (D=\"definition\", O=\"other\", R=\"relationship\"), the number of questions, the numbers of vital and okay nuggets, the average total number of nuggets per question, the number of participating systems, the average number of responses per system, and the average number of responses per question over all systems. Table 2 : Kendall's \u03c4 correlation between rankings generated by POURPRE/ROUGE/NUGGETEER and official scores, for each data set (D=\"definition\", O=\"other\", R=\"relationship\"). \u03c4 =1 means same order, \u03c4 =-1 means reverse order. Pourpre and Rouge scores reproduced from ). pected from the Kendall's \u03c4 comparisons, Pourpre's correlation is about the same or higher in 2003, but fares progressively worse in the subsequent tasks. To ensure that Pourpre scores correlated sufficiently with official scores, Lin and Demner-Fushman used the difference in official score between runs whose ranks Pourpre had swapped, and showed that the majority of swaps were between runs whose official scores were less than the 0.1 apart, a threshold for assessor agreement reported in (Voorhees, 2003) .", |
| "cite_spans": [ |
| { |
| "start": 1483, |
| "end": 1499, |
| "text": "(Voorhees, 2003)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 385, |
| "end": 392, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 722, |
| "end": 729, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Run R 2 R 2 \u221a", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "POURPRE NUGGETEER", |
| "sec_num": null |
| }, |
| { |
| "text": "Nuggeteer scores are not only correlated with, but actually meant to approximate, the assessment scores; thus we can use a stronger evaluation: root mean squared error of Nuggeteer scores against official scores. This estimates the average difference between the Nuggeteer score and the official score, and at 0.077, the estimate is below the 0.1 threshold. This evaluation is meant to show that the scores are \"good enough\" for experimental evaluation, and in Section 4.4 we will substantiate Lin and Demner-Fushman's observation that higher correlation scores may reflect overtraining rather than actual improvement. Accordingly, rather than reporting the best Nuggeteer scores (Kendall's \u03c4 and R 2 ) above, we follow Pourpre's lead in reporting a single variant (no stemming, bigrams) that performs well across the data sets. As with Pourpre's evaluation, the par- Figure 5 : Scatter graph of official scores plotted against Nuggeteer scores (idf term weighting, no stemming, bigrams) for each data set (all Fmeasures have \u03b2 = 3), with the Nuggeteer 95% confidence intervals on the score. Across the four datasets, 6 systems (3%) have an official score outside Nuggeteer's 95% confidence interval. ticular thresholds for each year are experimentally optimized. A scatter plot of Nuggeteer performance on the definition tasks is shown in Figure 5 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 868, |
| "end": 876, |
| "text": "Figure 5", |
| "ref_id": null |
| }, |
| { |
| "start": 1340, |
| "end": 1348, |
| "text": "Figure 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "POURPRE NUGGETEER", |
| "sec_num": null |
| }, |
| { |
| "text": "A hypothesis advanced with Pourpre is that bigrams, trigrams, and longer n-grams will primarily account for the fluency of an answer, rather than its semantic content, and thus not aid the scoring process. We included the option to use longer n-grams within Nuggeteer, and have found that using bigrams can yield very slightly better results than using unigrams. From inspection, bigrams sometimes capture named entity and grammatical order features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "N -gram size and stemming", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Experiments with Pourpre showed that stemming hurt slightly at peak performances. Nuggeteer has the same tendency at all n-gram sizes. Figure 6 compares Kendall's \u03c4 over the possible thresholds, n-gram lengths, and stemming. The choice of threshold matters by far the most.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 135, |
| "end": 143, |
| "text": "Figure 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "N -gram size and stemming", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Removing stopwords or giving unit weight to all terms rather than an idf -based weight made no substantial difference in Nuggeteer's performance. Figure 6 : Fixed thresholds vs. Kendall's \u03c4 for unigrams, bigrams, or trigrams averaged over the three years of definition data using F (\u03b2 = 3).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 146, |
| "end": 154, |
| "text": "Figure 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Term weighting and stopwords", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "log 10 P (Data|Model) optimally biased coin -2780 global threshold -2239 per-question thresholds -1977 per-nugget thresholds -1546 per-nugget errors and thr.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "-1595 Table 4 : The probabilities of the data given several models: a baseline coin, three models of different granularity with globally specified false positive and negative error rates, and a model with too many parameters, where even the error rates have per-nugget granularity. We select the most probable model, the per-nugget threshold model.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 6, |
| "end": 13, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": null |
| }, |
| { |
| "text": "We experimented with Bayesian models for automatic threshold selection. In the models, a system response contains or does not contain each nugget as a function of the response's Nuggeteer score plus noise. Table 4 shows that, as expected, the best models do not make assumptions about thresholds being equal within a question or dataset. It is interesting to note that Bayesian inference catches the overparametrization of the model where error rates vary per-nugget as well. In essence, we do not need those additional parameters to explain the variation in the data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 206, |
| "end": 213, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Thresholds", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The \u03c4 of the best selection of parameters on the 2003 data set using the model with one threshold per nugget and global errors is 0.837 ( \u221a mse=0.037).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Thresholds", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We have indeed overtrained the best threshold for this dataset (compare \u03c4 =0.879, \u221a mse=0.067 in Tables 2 and 3), suggesting that the numeric differences in Kendall's Tau shown between the Nuggeteer, Pourpre, and Rouge systems are not indicative of true performance. The Bayesian model promises settings free of overtraining, and thus more accurate judgements in terms of \u221a mse and individual nugget classification accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Thresholds", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Intuitively, if a fact is expressed by a system response, then another response with similar n-grams may also contain the same fact. To test this intuition, we tried expanding our judgement method (Equation 3) to select the maximum judgement score from among those of the nugget description and each of the system responses judged to contain that nugget. Unfortunately, the assessors did not mark which portion of a response expresses a nugget, so we also find spurious similarity, as shown in Figure 7 . The final results are not conclusively better or worse overall, and the process is far more expensive.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 494, |
| "end": 502, |
| "text": "Figure 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Training on System Responses", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "We are currently exploring the same extension for multiple \"nugget descriptions\" generated by manually selecting the appropriate portions of system responses containing each nugget.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training on System Responses", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Because Nuggeteer makes a nugget classification for each system response, we can report precision and recall on the nugget assignments. Table 5 shows Nuggeteer's agreement rate with assessors on whether each response contains a nugget. 6", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 136, |
| "end": 143, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Judgment Precision and Recall", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "Approximate evaluation will tend to undervalue new results, simply because they may not have keyword overlap with existing nugget descriptions. We are therefore creating tools to help developers manually assess their system outputs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Novel Judgements", |
| "sec_num": "4.7" |
| }, |
| { |
| "text": "As a proof of concept, we ran Nuggeteer on the best 2005 \"other\" system (not giving Nuggeteer Table 5 : Nuggeteer agreement with official judgements, under best settings for each year, and under the default settings.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 94, |
| "end": 101, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Novel Judgements", |
| "sec_num": "4.7" |
| }, |
| { |
| "text": "the official judgements), and manualy corrected its guesses. 7 Assessment took about 6 hours, and our judgements had precision of 78% and recall of 90%, for F-measure 0.803\u00b1 0.065 (compare Table 5 ). The official score of .299 was still within the confidence interval, but now on the high side rather than the low (.257\u00b1 .07), because we found the answers quite good. In fact, we were often tempted to add new nuggets! We later learned that it was a manual run, produced by a student at the University of Maryland.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 189, |
| "end": 196, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Novel Judgements", |
| "sec_num": "4.7" |
| }, |
| { |
| "text": "Pourpre pioneered automatic nugget-based assessment for definition questions, and thus enabled a rapid experimental cycle of system development.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Nuggeteer improves on that functionality, and critically adds:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 an interpretable score, comparable to official scores, with near-human error rates,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 a reliable confidence interval on the estimated score,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 scoring known responses exactly, \u2022 support for improving the accuracy of the score through additional annotation, and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "\u2022 a more robust training process", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We have shown that Nuggeteer evaluates the definition and relationship tasks with comparable rank swap rates to Pourpre. We explored the effects of stemming, term weighting, n-gram size, stopword removal, and use of system responses for training, all with little effect. We showed that previous methods of selecting a threshold overtrained, and have question id 1901, response rank 2, response score 0.14 response text: best american classical music bears its stamp: witness aaron copland, whose \"american-sounding\" music was composed by a (the response was a sentence fragment) assigned nugget description: born brooklyn ny 1900 bigram matches: \"american classical\", \"american-sounding music\", \"best american\", \"whose american-sounding\", \"witness aaron\", \"copland whose\", \"stamp witness\", ... response containing the nugget: Even the best American classical music bears its stamp:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "witness Aaron Copland, whose ''American-sounding'' music was composed by a Brooklyn-born Jew of Russian lineage who studied in France and salted his scores with jazz-derived syncopations, Mexican folk tunes and cowboy ballads.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "NYT19981210.0106 Figure 7 : This answer to the definition question on Aaron Copeland is assigned the nugget \"born brooklyn ny 1900\" at a recall score well above that of the background, despite containing none of those words.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 17, |
| "end": 25, |
| "text": "Figure 7", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "briefly described a promising way to select finergrained thresholds automatically.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our experiences in using judgements of system responses point to the need for a better annotation of nugget content. It is possible to give Nuggeteer multiple nugget descriptions for each nugget. Manually extracting the relevant portions of correctlyjudged system responses may not be an overly arduous task, and may offer higher accuracy. It would be ideal if the community-including the assessorswere able to create and promulgate a gold-standard set of nugget descriptions for previous years.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Nuggeteer currently supports evaluation for the TREC definition, 'other', and relationship tasks, for the AQUAINT opinion pilot 8 , and is under development for the DARPA GALE task 9 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Nuggeteer implements the pyramid scoring system from(Lin and Demner-Fushman, 2006), designed to soften the dis-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "If a response was submitted, and no response from the same system was judged to contain a nugget, then the response is considered to not contain the nugget. We normalized whitespace and case for matching previously seen responses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For TREC2003 and TREC2004, the run-tags indicate the submitting institution. For TREC2005 we did not run the nonanonymized data in time for this submission. In the TREC2005 Relationship task, RUN-1 was withdrawn.4 Thresholds for Pourpre were also selected this way.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We report only micro-averaged results, because we wish to emphasize the interpretability of Nuggeteer scores. While the correlations of macro-averaged scores with official scores may be higher (as seems to be the case for Pourpre), the actual values of the micro-averaged scores are more interpretable because they include a variance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Unlike human assessors, Nuggeteer is not able to pick the \"best\" response containing a nugget if multiple responses have it, and will instead pick the first, so these values are artifactually low. However, 2005 results may be high because these results reflect anonymized runs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We used a low threshold to make the task mostly correcting and less searching. This is clearly not how assessors should work, but is expedient for developers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank Jimmy Lin and Dina Demner-Fushman for valuable discussions, for Figure 3, and Table 2 , and for creating Pourpre. Thanks to Ozlem Uzuner and Sue Felshin for valuable comments on earlier drafts of this paper and to Boris Katz for his inspiration and support.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 87, |
| "end": 108, |
| "text": "Figure 3, and Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": "6" |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "How to evaluate your question answering system every day ... and still get real work done", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [ |
| "J" |
| ], |
| "last": "Breck", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "D" |
| ], |
| "last": "Burger", |
| "suffix": "" |
| }, |
| { |
| "first": "Lisa", |
| "middle": [], |
| "last": "Ferro", |
| "suffix": "" |
| }, |
| { |
| "first": "Lynette", |
| "middle": [], |
| "last": "Hirschman", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "House", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Light", |
| "suffix": "" |
| }, |
| { |
| "first": "Inderjeet", |
| "middle": [], |
| "last": "Mani", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Proceedings of the second international conference on Language Res ources and Evaluation (LREC2000)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric J. Breck, John D. Burger, Lisa Ferro, Lynette Hirschman, David House, Marc Light, and Inderjeet Mani. 2000. How to evaluate your question answer- ing system every day ... and still get real work done. In Proceedings of the second international conference on Language Res ources and Evaluation (LREC2000).", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Automatically evaluating answers to definition questions", |
| "authors": [ |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Dina", |
| "middle": [], |
| "last": "Demner-Fushman", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of HLT-EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jimmy Lin and Dina Demner-Fushman. 2005. Automat- ically evaluating answers to definition questions. In Proceedings of HLT-EMNLP.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Will pyramids built of nuggets topple over?", |
| "authors": [ |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Dina", |
| "middle": [], |
| "last": "Demner-Fushman", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proceedings of HLT-NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jimmy Lin and Dina Demner-Fushman. 2006. Will pyra- mids built of nuggets topple over? In Proceedings of HLT-NAACL.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A menagerie of tracks at maryland: HARD, Enterprise, QA, and Genomics, oh my!", |
| "authors": [ |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Eileen", |
| "middle": [], |
| "last": "Abels", |
| "suffix": "" |
| }, |
| { |
| "first": "Dina", |
| "middle": [], |
| "last": "Demner-Fushman", |
| "suffix": "" |
| }, |
| { |
| "first": "Douglas", |
| "middle": [ |
| "W" |
| ], |
| "last": "Oard", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejun", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of TREC", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jimmy Lin, Eileen Abels, Dina Demner-Fushman, Dou- glas W. Oard, Philip Wu, and Yejun Wu. 2005. A menagerie of tracks at maryland: HARD, Enterprise, QA, and Genomics, oh my! In Proceedings of TREC.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Overview of the TREC 2003 question answering track", |
| "authors": [ |
| { |
| "first": "Ellen", |
| "middle": [], |
| "last": "Voorhees", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellen Voorhees. 2003. Overview of the TREC 2003 question answering track.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Overview of the TREC 2004 question answering track", |
| "authors": [ |
| { |
| "first": "Ellen", |
| "middle": [], |
| "last": "Voorhees", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellen Voorhees. 2004. Overview of the TREC 2004 question answering track.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Overview of the TREC 2005 question answering track", |
| "authors": [ |
| { |
| "first": "Ellen", |
| "middle": [], |
| "last": "Voorhees", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellen Voorhees. 2005. Overview of the TREC 2005 question answering track.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "html": null, |
| "content": "<table><tr><td>0.3</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"2\">D2003 / 54</td><td/></tr><tr><td>0.25</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"2\">O2004 / 63 O2005 / 72</td><td/></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"2\">R2005 / 10</td><td/></tr><tr><td>0.2</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>0.15</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>0.1</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>0.05</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>0</td><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>0</td><td>2</td><td>4</td><td>6</td><td>8</td><td>10</td><td>12</td><td>14</td><td>16</td><td>18</td><td>20</td><td>22</td><td>24</td><td>26</td><td>28</td><td>!30</td></tr></table>", |
| "text": "The number of nuggets by number of", |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF3": { |
| "html": null, |
| "content": "<table><tr><td>: Correlation (R 2 ) and Root Mean Squared \u221a mse) between scores generated by Pour-Error (</td></tr><tr><td>pre/Nuggeteer and official scores, for the same set-</td></tr><tr><td>tings as the \u03c4 comparison above.</td></tr></table>", |
| "text": "", |
| "type_str": "table", |
| "num": null |
| } |
| } |
| } |
| } |