| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T11:30:45.916446Z" |
| }, |
| "title": "\"This is a Problem, Don't You Agree?\" Framing and Bias in Human Evaluation for Natural Language Generation", |
| "authors": [ |
| { |
| "first": "Stephanie", |
| "middle": [], |
| "last": "Schoch", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Virginia", |
| "location": { |
| "postCode": "22904", |
| "settlement": "Charlottesville", |
| "region": "VA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Diyi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Georgia Institute of Technology", |
| "location": { |
| "postCode": "30332", |
| "settlement": "Atlanta", |
| "region": "GA" |
| } |
| }, |
| "email": "diyi.yang@cc.gatech.edu" |
| }, |
| { |
| "first": "Yangfeng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Virginia", |
| "location": { |
| "postCode": "22904", |
| "settlement": "Charlottesville", |
| "region": "VA" |
| } |
| }, |
| "email": "yangfeng@virginia.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Despite recent efforts reviewing current human evaluation practices for natural language generation (NLG) research, the lack of reported question wording and potential for framing effects or cognitive biases influencing results has been widely overlooked. In this opinion paper, we detail three possible framing effects and cognitive biases that could be imposed on human evaluation in NLG. Based on this, we make a call for increased transparency for human evaluation in NLG and propose the concept of human evaluation statements. We make several recommendations for design details to report that could potentially influence results, such as question wording, and suggest that reporting pertinent design details can help increase comparability across studies as well as reproducibility of results.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Despite recent efforts reviewing current human evaluation practices for natural language generation (NLG) research, the lack of reported question wording and potential for framing effects or cognitive biases influencing results has been widely overlooked. In this opinion paper, we detail three possible framing effects and cognitive biases that could be imposed on human evaluation in NLG. Based on this, we make a call for increased transparency for human evaluation in NLG and propose the concept of human evaluation statements. We make several recommendations for design details to report that could potentially influence results, such as question wording, and suggest that reporting pertinent design details can help increase comparability across studies as well as reproducibility of results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Human evaluation is widely considered the gold standard for evaluating natural language generation (NLG), in part because existing automatic metrics display low correlations with human judgments (Belz and Reiter, 2006; Liu et al., 2016; Reiter and Belz, 2009; Novikova et al., 2017) . As a result, human evaluation is frequently used to demonstrate state-of-the-art results for generative tasks. However, this has the potential to be problematic due to the lack of consistency in how human evaluation is carried out (Gkatzia and Mahamood, 2015; van der Lee et al., 2019) . Beyond producing variability in results, this has implications for validity of human evaluation results due to the influence of evaluation design choices. To address this, a number of papers have proposed recommended best practices for different aspects of NLG human evaluation (Amidei et al., 2019; van der Lee et al., 2019) . However, overlooked have been the issues of transparency and the potential for question framing effects and other cognitive biases influencing results.", |
| "cite_spans": [ |
| { |
| "start": 195, |
| "end": 218, |
| "text": "(Belz and Reiter, 2006;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 219, |
| "end": 236, |
| "text": "Liu et al., 2016;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 237, |
| "end": 259, |
| "text": "Reiter and Belz, 2009;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 260, |
| "end": 282, |
| "text": "Novikova et al., 2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 516, |
| "end": 544, |
| "text": "(Gkatzia and Mahamood, 2015;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 545, |
| "end": 570, |
| "text": "van der Lee et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 851, |
| "end": 872, |
| "text": "(Amidei et al., 2019;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 873, |
| "end": 898, |
| "text": "van der Lee et al., 2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Cognitive biases refer to heuristics that arise in judgment or decision-making (Tversky and Kahneman, 1974) . Framing effects (Tversky and Kahneman, 1981) are types of cognitive biases that refer to how something is asked as opposed to what is asked. In the context of natural language generation research, these effects refer to the wording of questions asked and accompanying task descriptions and instructions, as opposed to what the target quality is that is being assessed.", |
| "cite_spans": [ |
| { |
| "start": 79, |
| "end": 107, |
| "text": "(Tversky and Kahneman, 1974)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 126, |
| "end": 154, |
| "text": "(Tversky and Kahneman, 1981)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this opinion paper, we demonstrate the lack of transparency in NLG human evaluation through empirically demonstrating the extent to which question wording is not included in evaluation design details, finding that only 15.68% of human evaluation studies in papers we surveyed explicitly reported the actual questions asked. We discuss three types of framing and cognitive biases that could influence results in NLG human evaluation: positive and negative framing, demand characteristics and response bias, and anchoring and adjusting. Using concrete examples from studies in humancomputer-interaction and psychology and hypothetical examples for NLG, we demonstrate the importance of including question wording when using human evaluation in NLG. Finally, we propose the concept of \"human evaluation statements\" and suggest a set of design parameters that should be included pertaining to human evaluation study design.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There is currently no standardized approach or consensus for how human evaluation for NLG should be carried out (Gkatzia and Mahamood, 2015; van der Lee et al., 2019) . As a result, it is currently very difficult to compare results across different studies due to the variability in evaluation design. Past efforts to address this have included overviews of evaluation design practices used during a particular time span (Amidei et al., 2018; Gkatzia and Mahamood, 2015; van der Lee et al., 2019) with corresponding recommendations for best practices (van der Lee et al., 2019) and empirical studies or overviews investigating the effects of different question types and scales (Amidei et al., 2019; Novikova et al., 2018) . Consistently, these studies have approached variability as a factor impacting the reliability of results.", |
| "cite_spans": [ |
| { |
| "start": 112, |
| "end": 140, |
| "text": "(Gkatzia and Mahamood, 2015;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 141, |
| "end": 166, |
| "text": "van der Lee et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 421, |
| "end": 442, |
| "text": "(Amidei et al., 2018;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 443, |
| "end": 470, |
| "text": "Gkatzia and Mahamood, 2015;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 471, |
| "end": 496, |
| "text": "van der Lee et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 551, |
| "end": 577, |
| "text": "(van der Lee et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 678, |
| "end": 699, |
| "text": "(Amidei et al., 2019;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 700, |
| "end": 722, |
| "text": "Novikova et al., 2018)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transparency in Human Evaluation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "However, yet to be addressed is the lack of transparency in how studies are designed and reported, which has implications for comparability across studies, as well as replicability and validity of results. While transparency has yet to be addressed in human evaluation, transparency of data, models, and automatic evaluations is a growing topic of concern in the machine learning and natural language processing communities. Bender and Friedman (2018) proposed the usage of \"data statements\" for mitigating bias and increasing transparency in natural language processing and Gebru et al. (2020) proposed \"datasheets for datasets\" for increased data transparency and accountability. Transparency in model reporting has also been advocated for. Mitchell et al. (2019) proposed the usage of \"model cards\" containing model performance characteristics for transparent model reporting. Pertaining to model evaluation, there have been numerous criticisms of task leaderboards (Linzen, 2020; Rogers, 2019) which has led to calls for transparency through reporting of a more informative suite of metrics (Dodge et al., 2019; Ethayarajh and Jurafsky, 2020) .", |
| "cite_spans": [ |
| { |
| "start": 575, |
| "end": 594, |
| "text": "Gebru et al. (2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 743, |
| "end": 765, |
| "text": "Mitchell et al. (2019)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1095, |
| "end": 1115, |
| "text": "(Dodge et al., 2019;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1116, |
| "end": 1146, |
| "text": "Ethayarajh and Jurafsky, 2020)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transparency in Human Evaluation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Driving the call for transparency has been the increased attention to issues of reproducibility. Crane (2018) identified a number of controllable environmental settings that are widely unreported in question answering research and demonstrated the impact they have on reproducibility of results, including whether or not a model would be considered state-of-the-art. When we consider the impact of environmental variables (Crane, 2018) , computational budget including number of hyperparameter search trials (Dodge et al., 2019) , and other factors that can impact results, we can draw comparisons to human evaluation design details that could similarly impact results.", |
| "cite_spans": [ |
| { |
| "start": 422, |
| "end": 435, |
| "text": "(Crane, 2018)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 508, |
| "end": 528, |
| "text": "(Dodge et al., 2019)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transparency in Human Evaluation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We suggest that the design details of human evaluations can be thought of analogously to model hyperparameters, in that careful tuning can directly influence results. It is currently an open question as to what parameters in human evaluation could influence results, but without reporting pertinent details, we cannot begin to make comparisons across studies, or reproduce results. For example, van der Lee et al. (2019) suggested their findings pertaining to sample sizes and demographics in a survey of 89 papers using human evaluation for NLG may not reflect reality, since only 55% of the papers reported the number of participants and 18% reported demographics. An additional design parameter that we believe is largely unreported but could have an immense impact on results is that of the actual wording of questions presented to participants. More specifically, if questions are framed in ways that elicit various cognitive biases such as framing effects, response biases, or anchoring and adjustment effects, results may reflect question design rather than model performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transparency in Human Evaluation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Empirical Analysis To identify the extent to which question wording is unreported in the details of human evaluation for NLG, we collected a set of 81 NLG papers published in ACL (n = 33), EMNLP (n = 30), NAACL (n = 11), and INLG (n = 7) in 2019 and 2020, randomly sampled from all papers containing the keyword \"generation\" in the title. 1 Of these, 51 (62.96%) included human evaluation as a means to assess model performance. However, only 8 of the 51 studies (15.68%) that included human evaluation reported the actual wording and setup of the questions that were asked, either written out (n = 4), included as a figure displaying the prompt (n = 3), or both (n = 1). Question wording does not only have implications for increasing transparency for the purposes of comparability of results across studies, but has further implications for the validity and reproducibility of results. In the following section, we bring attention to the potential of framing effects and other cognitive biases to impact the results of human evaluation for NLG, and use this to make a case for reporting question wording as part of study design.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Transparency in Human Evaluation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Framing (Tversky and Kahneman, 1981) refers to how something is asked as opposed to what is asked. In human evaluation for NLG, this would be reflected in the question wording or instructions provided to participants. In this section, we detail three possible framing effects and cognitive biases that could influence the results of human evalation: positive and negative framing, demand characteristics, and anchoring and adjusting. As question wording is extensively not reported in human evaluation in NLG, rather than providing empirical examples we provide hypothetical examples of the forms these effects could take when question wording is not reported.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 36, |
| "text": "(Tversky and Kahneman, 1981)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Framing Effects and Cognitive Biases", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Seminal work on the influence of framing in decision-making by Tversky and Kahneman (1981) demonstrated that people are more likely to make choices that are framed positively (in terms of gains) as opposed to negatively (in terms of losses) due to the increased perceived risk associated with choosing potential losses. This effect has been extended and further demonstrated as \"loss aversion\" in the field of economics (Levin et al., 2002) . In our context, the concept of framing based on positive or negative aspects can be extended and viewed as the framing of questions to induce positive or negative priming effects, in which participants are primed to view a choice as having more positive aspects than another, i.e. as the better option. For example, if fluency is the target quality in an NLG evaluation, we can consider it the positive aspect.", |
| "cite_spans": [ |
| { |
| "start": 63, |
| "end": 90, |
| "text": "Tversky and Kahneman (1981)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 420, |
| "end": 440, |
| "text": "(Levin et al., 2002)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Positive and Negative Framing", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We demonstrate the potential for the effects of imposing positive or negative framing and priming on questions in NLG human evaluation with the following example: Suppose a researcher is evaluating sentence A from their generative model against sentence B from a baseline model. The researcher asks participants to respond to the question:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Positive and Negative Framing", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\"How much more fluent is sentence A versus sentence B?\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Positive and Negative Framing", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Framing in this manner can prime participants to view sentence A as having more positive aspects, in this case, more fluency, as opposed to neutrally framed questions such as \"How do sentence A and sentence B compare in terms of fluency?\". Positive or negative framing could therefore have a direct impact on the results of the study, in other words, the results could reflect the framing rather than the actual model performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Positive and Negative Framing", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Demand characteristics are response biases that refer to cues in a study design that may reveal a researcher's hypothesis to the participants, resulting in adjusting responses to meet the expectations of the researcher (Orne, 1962) . Dell et al. (2012) demonstrated participant response bias due to interviewer demand characteristics in evaluating humancomputer interactive systems. Specifically, when participants knew which artifact was developed by an interviewer, they were consistently more likely to report preference for it, even when it was inferior. For human evaluation in NLG, if questions are framed in a way that cues the evaluators as to which output corresponds to the researcher's system, it is probable that similar response bias could be elicited. As an example, in the context of NLG, this could take form as follows:", |
| "cite_spans": [ |
| { |
| "start": 219, |
| "end": 231, |
| "text": "(Orne, 1962)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 234, |
| "end": 252, |
| "text": "Dell et al. (2012)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Demand Characteristics", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "A researcher has developed style transfer model A to generate formal sentences, and is evaluating sentence A from their generative model against sentence B from a baseline model. Unconsciously aware of model A's artifacts, in this case, as a system that only uses \".\" as end punctuation, the researcher states 'We consider sentences that end with \".\" as more formal than sentences that end with \"!\"' in the task description.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Demand Characteristics", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Framing the question in this manner subjects the responses to demand characteristics as the participants are aware of the researcher's expectations that they will rank sentences ending with \".\" as more formal than sentences with alternative end punctuation. Due to the fact that most studies are conducted via crowdsourcing platforms in which annotators receive compensation for responses, this adds an additional incentive to perform in accordance with the researcher's expectations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Demand Characteristics", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Anchoring and adjusting is a cognitive bias in which participants anchor their perceptions based on an initial value and adjust subsequent evaluations accordingly (Tversky and Kahneman, 1974) . Gehlbach and Barge (2012) demonstrated anchoring and adjustment effects on attitude-opinion questionnaires in which participants insufficiently adjusted responses on adjacent questionnaire items measuring similar constructs, which affected scale reliability. In the context of human evaluation for NLG, we present the following scenario in which we extend the concept of framing to include framing of task description and instructions displayed alongside questions to elicit advantageous anchoring effects:", |
| "cite_spans": [ |
| { |
| "start": 163, |
| "end": 191, |
| "text": "(Tversky and Kahneman, 1974)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Anchoring and Adjusting", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "A researcher has developed style transfer model A to generate formal sentences. As model design is an iterative process, the researcher has seen model A's outputs throughout the model design process. When selecting example formal sentences to include in the evaluation task description and instructions displayed to participants, the researcher inadvertently selects sentences that look similar to the types of outputs generated by model A. These examples become an anchor for participants in evaluating sentences generated by model A and model B.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Anchoring and Adjusting", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "By unintentionally framing the question instructions in a way that introduces an advantageous anchor, the results could reflect the overall framing and bias that is introduced rather than the objective model performance differences.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Anchoring and Adjusting", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Throughout the previous sections, we have provided examples demonstrating the potential question framing that could elicit human evaluation results for NLG that are biased in favor of a particular model. While these examples may at first glance seem implausible and only possible in cases of conscious (explicit) researcher bias in favor of a particular model, it is important to take into consideration the potential for researchers to possess unconscious (implicit) bias whether due to underlying expectations for a model's performance or due to influences of publication bias. During the peer review process reviewers may default to heuristics to simplify the task of review, including rejecting papers where models do not achieve SOTA results (Rogers and Augenstein, 2020) . This can implicitly motivate and incentivize researchers to show their model performs best on the gold standard of evaluation for NLG: human evaluation. We use this example to demonstrate the potential for the current lack of evaluation design details, in particular question wording, to leave the door open for results that have been subject to framing effects and bias which threatens the validity of the results.", |
| "cite_spans": [ |
| { |
| "start": 747, |
| "end": 776, |
| "text": "(Rogers and Augenstein, 2020)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation Design Statements", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We draw attention to these effects in an effort to both increase researcher awareness to their own evaluation study design, decrease the potential for questions framed in ways in which results reflect question framing rather than actual model performance, and increase the amount of transparency in human evaluation to aid in study replicability and comparability. We also suggest that the results for studies which do not include exact question wording should be viewed through a skeptical lens as though they could contain researcher imparted bias that could significantly impact results. Further, we use our demonstration of the potential for framing effects and biases in question wording as support for a call for transparency in human evaluation for NLG through the inclusion of study design details, which can aid in the development of more robust human evaluation guidelines.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation Design Statements", |
| "sec_num": "4" |
| }, |
| { |
| "text": "When guidelines exist that can reduce the complexity and time required to design human evaluation studies, they are used. For the evaluation of paraphrase generation, Li et al. (2018) included the human evaluation guidelines they used as an appendix, which have since been adopted by other studies (Qian et al., 2019) . This example shows that guidelines for human evaluation have value: guidelines make life easier and people often adopt those that are available. As such, we make the case for increased transparency in human evaluation with respect to design details that could potentially influence results. In an effort to take preliminary steps towards human evaluation guidelines, we propose the concept of \"human evaluation design statements\" akin to data statements (Bender and Friedman, 2018; Gebru et al., 2020) or model cards (Mitchell et al., 2019) . Determining what should be included on such statements will require additional input, perspectives, and empirical evidence. As a preliminary effort, we provide a list of design parameters that we believe could influence results and should therefore be included when describing human evaluation design setup:", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 183, |
| "text": "Li et al. (2018)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 298, |
| "end": 317, |
| "text": "(Qian et al., 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 774, |
| "end": 801, |
| "text": "(Bender and Friedman, 2018;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 802, |
| "end": 821, |
| "text": "Gebru et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 837, |
| "end": 860, |
| "text": "(Mitchell et al., 2019)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation Design Statements", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Question Design: Types, Scales, Wording Basic inclusions pertaining to question design are question type and corresponding scales due to the variability that can arise based on these design decisions (Novikova et al., 2018) . Further, as we demonstrated in this paper, question wording also has the potential to influence results. Because of the potential for empirical differences due to how questions are framed, it is imperative to report question wording as part of design details, especially in studies where researchers use human evaluation to claim state-of-the-art performance.", |
| "cite_spans": [ |
| { |
| "start": 200, |
| "end": 223, |
| "text": "(Novikova et al., 2018)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation Design Statements", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Question Presentation: Ordering, Questions per Annotator Ordering effects are influences on results that occur based on the order in which a sequence of questions is presented (Strack, 1992) .", |
| "cite_spans": [ |
| { |
| "start": 176, |
| "end": 190, |
| "text": "(Strack, 1992)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation Design Statements", |
| "sec_num": "4" |
| }, |
| { |
| "text": "As such, reporting question presentation order or balancing increases transparency as well as study comparability and reproducibility. In addition to ordering effects, response fatigue can occur when the quality and integrity of evaluations degrades as participants tire of a task (Lavrakas, 2008) . Due to the possibility of response fatigue effects, statistics regarding the number of questions per annotator should be reported to increase design transparency in terms of potential influences on variability in results.", |
| "cite_spans": [ |
| { |
| "start": 281, |
| "end": 297, |
| "text": "(Lavrakas, 2008)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation Design Statements", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Target Criteria: Definitions It makes intuitive sense that what is actually being measured in human evaluation would influence results, and further that measuring the same or different target criteria in different studies would impact the comparability of the results. However, naming conventions and definitions are inconsistent and may exhibit significant overlap, such as with naturalness, grammaticality, and fluency (Mir et al., 2019; Novikova et al., 2018) . As such, what is being measured should be compared across studies based on definition and the resulting participant understanding of the task, rather than simply based on naming convention: studies may measure the same aspect under different names or different aspects under the same name. Studies consistently reporting this detail in human evaluation is also a preliminary step towards agreed upon task definitions.", |
| "cite_spans": [ |
| { |
| "start": 421, |
| "end": 439, |
| "text": "(Mir et al., 2019;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 440, |
| "end": 462, |
| "text": "Novikova et al., 2018)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation Design Statements", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Annotators: Demographics, Background, Recruitment, Compensation Understanding and reporting the details of the human factor in human evaluation is intuitively one of the most important sets of details to include in terms of transparency and potential influence on results. Inclusions involve who annotators are in terms of demographics and background, how they were recruited, and whether or not annotators received fair compensation (Silberman et al., 2018) . As an example impact, annotator familiarity with the target language for a task might largely influence judgments towards biases, fluency, or grammatical correctness. The human factor in human evaluation, our annotators, is central to and interacts with every other detail of study design, and is therefore vital to report.", |
| "cite_spans": [ |
| { |
| "start": 434, |
| "end": 458, |
| "text": "(Silberman et al., 2018)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation Design Statements", |
| "sec_num": "4" |
| }, |
| { |
| "text": "While this list is not comprehensive, we believe these design details could have influences on evaluation results, and as such, are important details to consider and include.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation Design Statements", |
| "sec_num": "4" |
| }, |
| { |
| "text": "One of the factors that could limit the potential for widespread adoption of human evaluation statements that include human evaluation design details is the page limits imposed for many journal and conference papers. One approach to combat this is to include the details of human evaluation in Supplementary Material that accompanies papers. However, we suggest that many details in human evaluation design are central to understanding the meaningfulness of results, and further suggest that there will need to be community agreed upon guidelines for what details must be included within main papers. We further suggest that a complementary strategy would be the eventual development of comprehensive, agreed upon human evaluation guidelines that could operate similarly to \"long-form\" data statements (Bender and Friedman, 2018) . In this scenario, guidelines could be referenced, summarized briefly, and appended with pertinent additional study details as was proposed with \"short-form\" data statements (Bender and Friedman, 2018) .", |
| "cite_spans": [ |
| { |
| "start": 802, |
| "end": 829, |
| "text": "(Bender and Friedman, 2018)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1005, |
| "end": 1032, |
| "text": "(Bender and Friedman, 2018)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Other Considerations", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this paper, we demonstrate the extent to which including the details of human evaluation is limited in natural language generation. We further demonstrate the need for including design details such as question wording using existing work in psychology and human-computer interaction on framing and cognitive biases, and cite the recent push for transparency with datasets and model details, such as details of hyperparameter tuning, as support for similar efforts to increase transparency in human evaluation. Based on these observations, we propose working towards human evaluation statements and make several suggested inclusions, while noting the future need for additional perspectives and direct empirical support.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Proceedings of the 1st Workshop on Evaluating NLG Evaluation, pages 10-16, Online (Dublin, Ireland), December 2020.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Data is available at https://github.com/ stephanieschoch/framing-bias-nlg-eval", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Evaluation methodologies in automatic question generation", |
| "authors": [ |
| { |
| "first": "Jacopo", |
| "middle": [], |
| "last": "Amidei", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Piwek", |
| "suffix": "" |
| }, |
| { |
| "first": "Alistair", |
| "middle": [], |
| "last": "Willis", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 11th International Conference on Natural Language Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "307--317", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacopo Amidei, Paul Piwek, and Alistair Willis. 2018. Evaluation methodologies in automatic question generation 2013-2018. In Proceedings of the 11th International Conference on Natural Language Gen- eration, pages 307-317, Tilburg University, The Netherlands. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "The use of rating and Likert scales in natural language generation human evaluation tasks: A review and some recommendations", |
| "authors": [ |
| { |
| "first": "Jacopo", |
| "middle": [], |
| "last": "Amidei", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Piwek", |
| "suffix": "" |
| }, |
| { |
| "first": "Alistair", |
| "middle": [], |
| "last": "Willis", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 12th International Conference on Natural Language Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "397--402", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacopo Amidei, Paul Piwek, and Alistair Willis. 2019. The use of rating and Likert scales in natural lan- guage generation human evaluation tasks: A review and some recommendations. In Proceedings of the 12th International Conference on Natural Language Generation, pages 397-402, Tokyo, Japan. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Comparing automatic and human evaluation of NLG systems", |
| "authors": [ |
| { |
| "first": "Anja", |
| "middle": [], |
| "last": "Belz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ehud", |
| "middle": [], |
| "last": "Reiter", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "11th Conference of the European Chapter", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anja Belz and Ehud Reiter. 2006. Comparing auto- matic and human evaluation of NLG systems. In 11th Conference of the European Chapter of the Association for Computational Linguistics, Trento, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Data statements for natural language processing: Toward mitigating system bias and enabling better science", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [ |
| "M" |
| ], |
| "last": "Bender", |
| "suffix": "" |
| }, |
| { |
| "first": "Batya", |
| "middle": [], |
| "last": "Friedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "587--604", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily M. Bender and Batya Friedman. 2018. Data statements for natural language processing: Toward mitigating system bias and enabling better science. Transactions of the Association for Computational Linguistics, 6:587-604.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Questionable answers in question answering research: Reproducibility and variability of published results", |
| "authors": [ |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Crane", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "241--252", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matt Crane. 2018. Questionable answers in question answering research: Reproducibility and variability of published results. Transactions of the Association for Computational Linguistics, 6:241-252.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "yours is better!\": Participant response bias in hci", |
| "authors": [ |
| { |
| "first": "Nicola", |
| "middle": [], |
| "last": "Dell", |
| "suffix": "" |
| }, |
| { |
| "first": "Vidya", |
| "middle": [], |
| "last": "Vaidyanathan", |
| "suffix": "" |
| }, |
| { |
| "first": "Indrani", |
| "middle": [], |
| "last": "Medhi", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Cutrell", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Thies", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the SIGCHI Conference on Human Factors in Computing Systems, CHI '12", |
| "volume": "", |
| "issue": "", |
| "pages": "1321--1330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nicola Dell, Vidya Vaidyanathan, Indrani Medhi, Ed- ward Cutrell, and William Thies. 2012. \"yours is better!\": Participant response bias in hci. In Proceedings of the SIGCHI Conference on Hu- man Factors in Computing Systems, CHI '12, page 1321-1330, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Show your work: Improved reporting of experimental results", |
| "authors": [ |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Dodge", |
| "suffix": "" |
| }, |
| { |
| "first": "Suchin", |
| "middle": [], |
| "last": "Gururangan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dallas", |
| "middle": [], |
| "last": "Card", |
| "suffix": "" |
| }, |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "2185--2194", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jesse Dodge, Suchin Gururangan, Dallas Card, Roy Schwartz, and Noah A. Smith. 2019. Show your work: Improved reporting of experimental results. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 2185- 2194, Hong Kong, China. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Utility is in the eye of the user: A critique of nlp leaderboards", |
| "authors": [ |
| { |
| "first": "Kawin", |
| "middle": [], |
| "last": "Ethayarajh", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2009.13888" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kawin Ethayarajh and Dan Jurafsky. 2020. Utility is in the eye of the user: A critique of nlp leaderboards. arXiv preprint arXiv:2009.13888.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Hal Daum\u00e9 III au2, and Kate Crawford. 2020. Datasheets for datasets", |
| "authors": [ |
| { |
| "first": "Timnit", |
| "middle": [], |
| "last": "Gebru", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Morgenstern", |
| "suffix": "" |
| }, |
| { |
| "first": "Briana", |
| "middle": [], |
| "last": "Vecchione", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [ |
| "Wortman" |
| ], |
| "last": "Vaughan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanna", |
| "middle": [], |
| "last": "Wallach", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timnit Gebru, Jamie Morgenstern, Briana Vec- chione, Jennifer Wortman Vaughan, Hanna Wal- lach, Hal Daum\u00e9 III au2, and Kate Crawford. 2020. Datasheets for datasets.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Anchoring and adjusting in questionnaire responses", |
| "authors": [ |
| { |
| "first": "Hunter", |
| "middle": [], |
| "last": "Gehlbach", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Barge", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Basic and Applied Social Psychology", |
| "volume": "34", |
| "issue": "5", |
| "pages": "417--433", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hunter Gehlbach and Scott Barge. 2012. Anchoring and adjusting in questionnaire responses. Basic and Applied Social Psychology, 34(5):417-433.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A snapshot of NLG evaluation practices 2005 -2014", |
| "authors": [ |
| { |
| "first": "Dimitra", |
| "middle": [], |
| "last": "Gkatzia", |
| "suffix": "" |
| }, |
| { |
| "first": "Saad", |
| "middle": [], |
| "last": "Mahamood", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 15th European Workshop on Natural Language Generation (ENLG)", |
| "volume": "", |
| "issue": "", |
| "pages": "57--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dimitra Gkatzia and Saad Mahamood. 2015. A snap- shot of NLG evaluation practices 2005 -2014. In Proceedings of the 15th European Workshop on Nat- ural Language Generation (ENLG), pages 57-60, Brighton, UK. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Encyclopedia of survey research methods", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lavrakas", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul J Lavrakas. 2008. Encyclopedia of survey re- search methods. Sage Publications.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Best practices for the human evaluation of automatically generated text", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Van Der Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Albert", |
| "middle": [], |
| "last": "Gatt", |
| "suffix": "" |
| }, |
| { |
| "first": "Sander", |
| "middle": [], |
| "last": "Emiel Van Miltenburg", |
| "suffix": "" |
| }, |
| { |
| "first": "Emiel", |
| "middle": [], |
| "last": "Wubben", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Krahmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 12th International Conference on Natural Language Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "355--368", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chris van der Lee, Albert Gatt, Emiel van Miltenburg, Sander Wubben, and Emiel Krahmer. 2019. Best practices for the human evaluation of automatically generated text. In Proceedings of the 12th Interna- tional Conference on Natural Language Generation, pages 355-368, Tokyo, Japan. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A tale of two pizzas: Building up from a basic product versus scaling down from a fully-loaded product", |
| "authors": [ |
| { |
| "first": "Judy", |
| "middle": [], |
| "last": "Irwin P Levin", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Schreiber", |
| "suffix": "" |
| }, |
| { |
| "first": "Gary", |
| "middle": [ |
| "J" |
| ], |
| "last": "Lauriola", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gaeth", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Marketing Letters", |
| "volume": "13", |
| "issue": "4", |
| "pages": "335--344", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Irwin P Levin, Judy Schreiber, Marco Lauriola, and Gary J Gaeth. 2002. A tale of two pizzas: Building up from a basic product versus scaling down from a fully-loaded product. Marketing Letters, 13(4):335- 344.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Paraphrase generation with deep reinforcement learning", |
| "authors": [ |
| { |
| "first": "Zichao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lifeng", |
| "middle": [], |
| "last": "Shang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3865--3878", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zichao Li, Xin Jiang, Lifeng Shang, and Hang Li. 2018. Paraphrase generation with deep reinforce- ment learning. In Proceedings of the 2018 Confer- ence on Empirical Methods in Natural Language Processing, pages 3865-3878, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "How can we accelerate progress towards human-like linguistic generalization?", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tal Linzen", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "5210--5217", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tal Linzen. 2020. How can we accelerate progress to- wards human-like linguistic generalization? In Pro- ceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 5210- 5217, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "How NOT to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue response generation", |
| "authors": [ |
| { |
| "first": "Chia-Wei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| }, |
| { |
| "first": "Iulian", |
| "middle": [], |
| "last": "Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Noseworthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Charlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2122--2132", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chia-Wei Liu, Ryan Lowe, Iulian Serban, Mike Nose- worthy, Laurent Charlin, and Joelle Pineau. 2016. How NOT to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue response generation. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 2122-2132, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Evaluating style transfer for text", |
| "authors": [ |
| { |
| "first": "Remi", |
| "middle": [], |
| "last": "Mir", |
| "suffix": "" |
| }, |
| { |
| "first": "Bjarke", |
| "middle": [], |
| "last": "Felbo", |
| "suffix": "" |
| }, |
| { |
| "first": "Nick", |
| "middle": [], |
| "last": "Obradovich", |
| "suffix": "" |
| }, |
| { |
| "first": "Iyad", |
| "middle": [], |
| "last": "Rahwan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "495--504", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Remi Mir, Bjarke Felbo, Nick Obradovich, and Iyad Rahwan. 2019. Evaluating style transfer for text. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 495-504, Minneapolis, Minnesota. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Model cards for model reporting", |
| "authors": [ |
| { |
| "first": "Margaret", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Simone", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Zaldivar", |
| "suffix": "" |
| }, |
| { |
| "first": "Parker", |
| "middle": [], |
| "last": "Barnes", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucy", |
| "middle": [], |
| "last": "Vasserman", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Hutchinson", |
| "suffix": "" |
| }, |
| { |
| "first": "Elena", |
| "middle": [], |
| "last": "Spitzer", |
| "suffix": "" |
| }, |
| { |
| "first": "Deborah", |
| "middle": [], |
| "last": "Inioluwa", |
| "suffix": "" |
| }, |
| { |
| "first": "Timnit", |
| "middle": [], |
| "last": "Raji", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gebru", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Conference on Fairness, Accountability, and Transparency, FAT* '19", |
| "volume": "", |
| "issue": "", |
| "pages": "220--229", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji, and Timnit Gebru. 2019. Model cards for model reporting. In Proceedings of the Conference on Fairness, Account- ability, and Transparency, FAT* '19, page 220-229, New York, NY, USA. Association for Computing Machinery.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Why we need new evaluation metrics for NLG", |
| "authors": [ |
| { |
| "first": "Jekaterina", |
| "middle": [], |
| "last": "Novikova", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Du\u0161ek", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanda", |
| "middle": [ |
| "Cercas" |
| ], |
| "last": "Curry", |
| "suffix": "" |
| }, |
| { |
| "first": "Verena", |
| "middle": [], |
| "last": "Rieser", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2241--2252", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jekaterina Novikova, Ond\u0159ej Du\u0161ek, Amanda Cer- cas Curry, and Verena Rieser. 2017. Why we need new evaluation metrics for NLG. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2241-2252, Copenhagen, Denmark. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "RankME: Reliable human ratings for natural language generation", |
| "authors": [ |
| { |
| "first": "Jekaterina", |
| "middle": [], |
| "last": "Novikova", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Du\u0161ek", |
| "suffix": "" |
| }, |
| { |
| "first": "Verena", |
| "middle": [], |
| "last": "Rieser", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "72--78", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jekaterina Novikova, Ond\u0159ej Du\u0161ek, and Verena Rieser. 2018. RankME: Reliable human ratings for natural language generation. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 72-78, New Orleans, Louisiana. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "On the social psychology of the psychological experiment: With particular reference to demand characteristics and their implications", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Martin T Orne", |
| "suffix": "" |
| } |
| ], |
| "year": 1962, |
| "venue": "American psychologist", |
| "volume": "17", |
| "issue": "11", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin T Orne. 1962. On the social psychology of the psychological experiment: With particular ref- erence to demand characteristics and their implica- tions. American psychologist, 17(11):776.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Exploring diverse expressions for paraphrase generation", |
| "authors": [ |
| { |
| "first": "Lihua", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Lin", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "Weinan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3173--3182", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lihua Qian, Lin Qiu, Weinan Zhang, Xin Jiang, and Yong Yu. 2019. Exploring diverse expressions for paraphrase generation. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3173-3182, Hong Kong, China. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "An investigation into the validity of some metrics for automatically evaluating natural language generation systems", |
| "authors": [ |
| { |
| "first": "Ehud", |
| "middle": [], |
| "last": "Reiter", |
| "suffix": "" |
| }, |
| { |
| "first": "Anja", |
| "middle": [], |
| "last": "Belz", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Computational Linguistics", |
| "volume": "35", |
| "issue": "4", |
| "pages": "529--558", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ehud Reiter and Anja Belz. 2009. An investigation into the validity of some metrics for automatically evalu- ating natural language generation systems. Compu- tational Linguistics, 35(4):529-558.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "How the transformers broke nlp leaderboards", |
| "authors": [ |
| { |
| "first": "Anna", |
| "middle": [ |
| "Rogers" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anna Rogers. 2019. How the transformers broke nlp leaderboards.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "What can we do to improve peer review in nlp?", |
| "authors": [ |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Rogers", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabelle", |
| "middle": [], |
| "last": "Augenstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anna Rogers and Isabelle Augenstein. 2020. What can we do to improve peer review in nlp?", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Responsible research with crowds: pay crowdworkers at least minimum wage", |
| "authors": [ |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Six Silberman", |
| "suffix": "" |
| }, |
| { |
| "first": "Rochelle", |
| "middle": [], |
| "last": "Tomlinson", |
| "suffix": "" |
| }, |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Laplante", |
| "suffix": "" |
| }, |
| { |
| "first": "Lilly", |
| "middle": [], |
| "last": "Ross", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Irani", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zaldivar", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Communications of the ACM", |
| "volume": "61", |
| "issue": "3", |
| "pages": "39--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M Six Silberman, Bill Tomlinson, Rochelle LaPlante, Joel Ross, Lilly Irani, and Andrew Zaldivar. 2018. Responsible research with crowds: pay crowdwork- ers at least minimum wage. Communications of the ACM, 61(3):39-41.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "order effects\" in survey research: Activation and information functions of preceding questions", |
| "authors": [ |
| { |
| "first": "Fritz", |
| "middle": [], |
| "last": "Strack", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "Context effects in social and psychological research", |
| "volume": "", |
| "issue": "", |
| "pages": "23--34", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fritz Strack. 1992. \"order effects\" in survey research: Activation and information functions of preceding questions. In Context effects in social and psycho- logical research, pages 23-34. Springer.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Judgment under uncertainty: Heuristics and biases. science", |
| "authors": [ |
| { |
| "first": "Amos", |
| "middle": [], |
| "last": "Tversky", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Kahneman", |
| "suffix": "" |
| } |
| ], |
| "year": 1974, |
| "venue": "", |
| "volume": "185", |
| "issue": "", |
| "pages": "1124--1131", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amos Tversky and Daniel Kahneman. 1974. Judgment under uncertainty: Heuristics and biases. science, 185(4157):1124-1131.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "The framing of decisions and the psychology of choice. science", |
| "authors": [ |
| { |
| "first": "Amos", |
| "middle": [], |
| "last": "Tversky", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Kahneman", |
| "suffix": "" |
| } |
| ], |
| "year": 1981, |
| "venue": "", |
| "volume": "211", |
| "issue": "", |
| "pages": "453--458", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amos Tversky and Daniel Kahneman. 1981. The fram- ing of decisions and the psychology of choice. sci- ence, 211(4481):453-458.", |
| "links": null |
| } |
| }, |
| "ref_entries": {} |
| } |
| } |