| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:54:28.406029Z" |
| }, |
| "title": "A Comprehensive Assessment of Dialog Evaluation Metrics", |
| "authors": [ |
| { |
| "first": "Yi-Ting", |
| "middle": [], |
| "last": "Yeh", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "yitingye@cs.cmu.edu" |
| }, |
| { |
| "first": "Maxine", |
| "middle": [], |
| "last": "Eskenazi", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Shikib", |
| "middle": [], |
| "last": "Mehri", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "amehri@cs.cmu.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Automatic evaluation metrics are a crucial component of dialog systems research. Standard language evaluation metrics are known to be ineffective for evaluating dialog. As such, recent research has proposed a number of novel, dialog-specific metrics that correlate better with human judgements. Due to the fast pace of research, many of these metrics have been assessed on different datasets and there has as yet been no time for a systematic comparison between them. To this end, this paper provides a comprehensive assessment of recently proposed dialog evaluation metrics on a number of datasets. In this paper, 23 different automatic evaluation metrics are evaluated on 10 different datasets. Furthermore, the metrics are assessed in different settings, to better qualify their respective strengths and weaknesses. This comprehensive assessment offers several takeaways pertaining to dialog evaluation metrics in general. It also suggests how to best assess evaluation metrics and indicates promising directions for future work.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Automatic evaluation metrics are a crucial component of dialog systems research. Standard language evaluation metrics are known to be ineffective for evaluating dialog. As such, recent research has proposed a number of novel, dialog-specific metrics that correlate better with human judgements. Due to the fast pace of research, many of these metrics have been assessed on different datasets and there has as yet been no time for a systematic comparison between them. To this end, this paper provides a comprehensive assessment of recently proposed dialog evaluation metrics on a number of datasets. In this paper, 23 different automatic evaluation metrics are evaluated on 10 different datasets. Furthermore, the metrics are assessed in different settings, to better qualify their respective strengths and weaknesses. This comprehensive assessment offers several takeaways pertaining to dialog evaluation metrics in general. It also suggests how to best assess evaluation metrics and indicates promising directions for future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Evaluation is a crucial component of the research process. Evaluation metrics are used to shine light on the best models and thus they strongly influence the research directions of a field. Standard automatic language evaluation metrics (e.g., BLEU, METEOR) have been shown to be ineffective for evaluating dialog (Liu et al., 2016b; Deriu et al., 2021) . To this end, recent research has proposed a number of automatic metrics specifically designed to evaluate dialog (Tao et al., 2018; Ghazarian et al., 2019; Lan et al., 2020; Pang et al., 2020; Ghazarian et al., 2020; Sinha et al., 2020; Mehri and Eskenazi, 2020b,a; . These metrics address the weaknesses of the standard language evaluation metrics. They have also been shown to better correlate with human judgement. However, most of these metrics were devel-oped at about the same time and were evaluated on different datasets. As such, there has not yet been a consistent comparison amongst them. This paper describes an assessment of these metrics on several different datasets. It quantifies the relative performance and reveals strengths and weaknesses of each metric.", |
| "cite_spans": [ |
| { |
| "start": 314, |
| "end": 333, |
| "text": "(Liu et al., 2016b;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 334, |
| "end": 353, |
| "text": "Deriu et al., 2021)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 469, |
| "end": 487, |
| "text": "(Tao et al., 2018;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 488, |
| "end": 511, |
| "text": "Ghazarian et al., 2019;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 512, |
| "end": 529, |
| "text": "Lan et al., 2020;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 530, |
| "end": 548, |
| "text": "Pang et al., 2020;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 549, |
| "end": 572, |
| "text": "Ghazarian et al., 2020;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 573, |
| "end": 592, |
| "text": "Sinha et al., 2020;", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 593, |
| "end": 621, |
| "text": "Mehri and Eskenazi, 2020b,a;", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Due to the fact that standard automatic metrics have been shown to be ineffective for dialog evaluation (Liu et al., 2016b; Deriu et al., 2021) , dialog research typically relies on human evaluation. While useful and very informative, human evaluation is expensive and time-consuming. Given these considerations, at present it is usually only used for a final evaluation. Yet, during the development process, automatic evaluation metrics are essential since they are used to optimize the model design and the choice of hyperparameters. In order to be relevant and meaningful, these metrics must better correlate with human judgment so that they serve as a meaningful proxy for human judgements during the development process.", |
| "cite_spans": [ |
| { |
| "start": 104, |
| "end": 123, |
| "text": "(Liu et al., 2016b;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 124, |
| "end": 143, |
| "text": "Deriu et al., 2021)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This paper discusses the assessment of several recently-proposed automatic evaluation metrics for dialog over several datasets. These datasets have human annotations that measure the quality of the responses. Furthermore, this paper includes analysis experiments that qualify the performance of the metrics across different settings. This assessment measures the performance of the metrics (1) on both the turn level and the dialog level, (2) for different dialog lengths, (3) for different dialog qualities (e.g., coherence, engaging), (4) for different types of response generation models (i.e., generative, retrieval, simple models and state-ofthe-art models), and (5) exploring combinations of different metrics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "All of the code created for this assessment is open-sourced 1 to facilitate the easy assessment of 16 future evaluation metrics on a number of datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we studied 23 automatic metrics either designed for evaluating dialogues or general natural language generation (NLG) tasks. Table 1 presents an overview of metrics. Besides model designs and training objectives, the three aspects used to characterize them are: (1) Does the metric use a pretrained language model? (2) What data was used to train the metric? (3) Does the metric require a reference response or is it reference-free?", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 140, |
| "end": 147, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Overview of Automatic Metrics", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In most metrics, triplet ranking loss is used as a training objective, and it requires the model to give higher scores to appropriate responses. BERT (Devlin et al., 2019) and its variant RoBERTa (Liu et al., 2019) are the most popular pretrained language models utilized in state-of-the-art metrics, followed by GPT-2 language model (Radford et al., 2019) . DailyDialog (Li et al., 2017) and Per-sonaChat (Zhang et al., 2018) are two widely-used datasets for training metrics. Briefly, conversation topics of DailyDialog are about day-to-day life, while Personachat consists of dialogs where each participant is assigned a persona and the goal is to become familiar with the other individual. Wordoverlap metrics are ineffective for dialog (Liu et al., 2016b) largely due to the one-to-many nature of dialog (Zhao et al., 2017a) . Thus, reference-free metrics have been proposed to circumvent the oneto-many problem. Amongst the 24 metrics assessed here, we have 11 reference-free evaluation metrics.", |
| "cite_spans": [ |
| { |
| "start": 150, |
| "end": 171, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 196, |
| "end": 214, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 334, |
| "end": 356, |
| "text": "(Radford et al., 2019)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 371, |
| "end": 388, |
| "text": "(Li et al., 2017)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 406, |
| "end": 426, |
| "text": "(Zhang et al., 2018)", |
| "ref_id": null |
| }, |
| { |
| "start": 741, |
| "end": 760, |
| "text": "(Liu et al., 2016b)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 809, |
| "end": 829, |
| "text": "(Zhao et al., 2017a)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview of Automatic Metrics", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We refer readers to Appendix A for detailed discussion of aforementioned characteristics of assessed metrics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Overview of Automatic Metrics", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this work, we test metrics with the quality annotated datasets: USR (Mehri and Eskenazi, 2020b) , GRADE , HolisticEval (Pang et al., 2020) , FED (Mehri and Eskenazi, 2020a) , DSTC6 (Hori and Hori, 2017) and DSTC9 (Gunasekara et al., 2021) . Concretely, each sample in the dataset consists of a dialog context, a generated response and a quality score. Optionally, a groundtruth response may also be included. Due to the space constraint, in this section we only provide a high-level overview of the used datasets. The complete list and discussion of characteristics of used datasets is provided in Appendix B.", |
| "cite_spans": [ |
| { |
| "start": 71, |
| "end": 98, |
| "text": "(Mehri and Eskenazi, 2020b)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 122, |
| "end": 141, |
| "text": "(Pang et al., 2020)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 148, |
| "end": 175, |
| "text": "(Mehri and Eskenazi, 2020a)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 184, |
| "end": 205, |
| "text": "(Hori and Hori, 2017)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 216, |
| "end": 241, |
| "text": "(Gunasekara et al., 2021)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Testing Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In general, these datasets are constructed using the following steps:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Testing Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "1. Choose an existing dialog dataset. 2. Train a response generation model on the chosen dialog dataset. 3. Generate responses for the validation/test set of the chosen dataset 4. Collect human quality annotations for the generated responses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Testing Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "For data that does not contain referenced responses, only reference-free metrics were assessed. The characteristics of the quality-annotated data significantly influence the performance of the metrics since dialog metrics might be originally developed and trained on data in a very different domain than the test domain. For example, since many of these datasets were collected in different settings, they have diverse formality, complexity of the sentence structure, and the dialog length. Despite using the same underlying dialog dataset, the response generation model used would also influence the characteristics of responses. Distinguishing between responses with very different quality is easier, since the low quality response may not even follow grammar rules or the dialog context. On the other hand, it will be challenging to give appropriate scores to responses from state-of-the-art dialog systems. In the 10 datasets, responses in the data labeled by GRADE, FED, and DSTC9 data come from models with relatively better empirical performance such as Transformer Seq2Seq model (Vaswani et al., 2017) , Di-aloGPT (Zhang et al., 2020) , and Meena (Adiwardana et al., 2020b) .", |
| "cite_spans": [ |
| { |
| "start": 1087, |
| "end": 1109, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 1122, |
| "end": 1142, |
| "text": "(Zhang et al., 2020)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1155, |
| "end": 1181, |
| "text": "(Adiwardana et al., 2020b)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Testing Datasets", |
| "sec_num": "3" |
| }, |
| { |
| "text": "This section describes the assessment of the evaluation metrics. Wherever possible, the released pretrained model was used to reproduce results. Since RUBER, BERT-RUBER and PONE do not release their pretrained models, those models were trained on DailyDialog. For USR, we use the released model which is trained on TopicalChat. For Predic-tiveEngage, the original paper combined the model with RUBER for the best performance. In this assessment only the engagement score model is used for comparison to the other metrics. FlowScore only scores dialogs with more than 3 utterances, so it cannot be used on some of the quality-annotated data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Reference-Free? Objective Table 1 : Summary of the evaluation metrics assessed in this paper. The 'Pretrained Model' column indicates the specific pretrained language model used by the metric. The 'Training Dataset' and 'Objective' columns describe the dialog data and the objective used when training the metric. 'Reference-Free?' indicates whether the metric requires a reference response for evaluation. ED is the abbreviation of the EmpatheticDialogue dataset.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 26, |
| "end": 33, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Metric Pretrained Model Training Dataset", |
| "sec_num": null |
| }, |
| { |
| "text": "BLEU (2002) X X X X METEOR (2005) X X X X ROUGE (2004) X X X X ADEM (2017) X Ubuntu Dialogue + Twitter X MSE BERTScore (2019) BERT X X X BLEURT (2020) BERT WMT Metrics Shared Task X MSE QuestEval (2021) T5 SQuAD-v2/NewsQA \u221a QA/QG RUBER (2018) X DailyDialog / PersonaChat X Triplet BERT-RUBER (2019) BERT DailyDialog / PersonaChat X Triplet PONE (2020) BERT DailyDialog X Triplet MAUDE (2020) BERT PersonaChat \u221a NCE DEB (2020) BERT Reddit/DailyDialog++ \u221a MLM/NSP GRADE (2020) BERT DailyDialog \u221a Triplet DynaEval (2021a) RoBERTa ED/ConvAI2/DailyDialog \u221a Triplet USR (2020b) RoBERTa TopicalChat / PersonaChat \u221a MLM/CrossEntropy USL-H (2020) BERT DailyDialog \u221a VUP/NSP/MLM DialogRPT (2020) GPT-2 Reddit \u221a CrossEntropy Deep AM-FM (2021b) Multilingual BERT Twitter X MLM HolisticEval (2020) BERT DailyDialog \u221a LM PredictiveEngage (2020) BERT ConvAI X CrossEntropy FED (2020a) DialoGPT X \u221a X FlowScore (2021b) Plato Reddit \u221a ContextFlow FBD (2021) RoBERTa X X X", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metric Pretrained Model Training Dataset", |
| "sec_num": null |
| }, |
| { |
| "text": "This section describes the performance of the metrics on the quality-annotated datasets that contain a human reference. This distinction is important, because the referenced metrics (e.g., BLEU, ME-TEOR, BERTScore) can only be assessed on these datasets. Table 2 present the correlations of the metrics on the USR, GRADE, and DSTC6 data. Due to the space constraint, we only present Spearman correlation, and the full table could be found in Appendix C. Rule-based metrics perform surprisingly well on USR-TopicalChat and USR-PersonaChat. However, they fall short on the GRADE data. This may be due to the fact that the responses in the GRADE data are produced by better NLG models, as described in Section 3. Since rule-based metrics calculate word-overlap metrics between the system responses and a human reference, they can consistently detect poor responses which only contain irrelevant words. However, the rule-based metrics struggle to score responses from state-of-the-art dialog systems since those responses require a deeper semantic understanding of the dialog context and the generated response.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 255, |
| "end": 262, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results on Datasets with Human Reference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "USR, GRADE, DEB, and USL-H are the best performing metrics for evaluating response generation (i.e., turn-level correlation). The USR and GRADE metrics have high performance on their respective datasets. This phenomenon probably occurs for several reasons. First, both metrics finetune pretrained models in a self-supervised manner on certain dialog datasets. It is expected that the metrics would perform better on quality-annotated datasets corresponding to the data they were finetuned on. For example, the choice of the topic graph of GRADE may have been influenced by phenomena observed in the GRADE data. Finally, metrics may be optimized specifically for the data (i.e., through hyperparameter tuning on a validation set). This observation stresses the importance of testing on a variety of different quality-annotated datasets in order to ensure the generality of an evaluation metric and to avoid over-optimizing to specific datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results on Datasets with Human Reference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "The performance of DEB demonstrates the importance of training data in model-based metrics. DEB replaces the BERT pretraining data with a dialog corpus, and further finetunes the model on a manually-created dataset consisting of adversarial responses designed for the NSP objective. USL-H has a similar performance trend to USR, which is reasonable since the two metrics both combine response selection models and language models. The VUP model which validates whether a given response is grammatically correct helps USL-H outperforms USR on the USR-PersonaChat dataset.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results on Datasets with Human Reference", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "This section describes the results for the datasets that do not contain a reference response. Only reference-free metrics can be evaluated on these quality-annotated datasets. metrics on HolisticEval-DailyDialog, FED, and DSTC9 data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results on Datasets without Human Reference", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In the FED dataset, there are two types of annotations: turn-level and dialog-level. Turn-level annotations assess the quality of a single response, while dialog-level annotations assess the entire dialog. Similar to the GRADE data, the responses in the FED data are generated by state-of-the-art dialog systems (Adiwardana et al., 2020b) , which makes the data particularly challenging.", |
| "cite_spans": [ |
| { |
| "start": 312, |
| "end": 338, |
| "text": "(Adiwardana et al., 2020b)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results on Datasets without Human Reference", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Many metrics which perform well on the USR and GRADE data, do not do as well on the FED data. This may be because of the longer dialog context in the FED data. While the average number of words in the context of GRADE-ConvAI2 is 23.4, the context of FED has on average 86.5 words and 11.8 utterances. If a model has not seen long contexts at training time, it will struggle to perform well on longer contexts at test time.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results on Datasets without Human Reference", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "FlowScore, PredictiveEngage, DynaEval and FED perform the best on dialog-level evaluation on the FED and DSTC9 datasets. This may occur for two reasons. First, these models are designed to capture properties that are better aligned with dialog-level annotations: FlowScore models the dynamic infor-mation flow in the dialog history, DynaEval measures coherence using a graph-based representation of the dialog, PredictiveEngage measures engagement, FED measures a number of different dialog-level qualities (e.g., coherence, topic depth, etc.). Second, the model architectures of PredictiveEngage and FED are relatively simple. This may make the two metrics less sensitive to longer dialog contexts. Though these metrics do well at assessing state-of-the-art dialog systems in the FED and DSTC9 data, they underperform on the GRADE and USR data. This suggests that these two metrics are optimal for dialog-level evaluation, but less so for turn-level evaluation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results on Datasets without Human Reference", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "This section presents system-level correlation on quality-annotated datasets. System-level correlation is a strong indication of the ability of metrics to rank several response generation models. GRADE-DailyDialog and GRADE-EmpatheticDialog only consist of 2 systems, which makes system-level correlation much less informative.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results on System-Level Correlation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In general, metrics that have a higher turn-level correlation tend to have a higher system-level correlation. Interestingly, some metrics which perform poorly on turn-level correlation do much better on system-level correlation, suggesting that averaging out over all the examples produced by a dialog system reduces the noise in the metric's scores. For example, GRADE has a comparable system-level correlation to the best-performing Deep AM-FM on the DSTC6 data, despite having a lower turnlevel correlation. On the other hand, BERTScore, which has the highest turn-level correlation on the DSTC6 data, performs poorly on system-level correlation. This suggests that word-overlap metrics may struggle to accurately assess entire systems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results on System-Level Correlation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "System-level correlations are generally very strong across a variety of metrics. Most impressively are the high correlations of Deep AM-FM on the DSTC6 data (11 different systems) and of FlowScore on the DSTC9 data (20 different systems).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results on System-Level Correlation", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Since dialog quality is inherently multi-faceted (Walker et al., 1997; See et al., 2019) , it is inadequate to only evaluate dialog metrics on the overall response score. Therefore, this paper also presents the correlation of the metrics with various dialog qualities in Table 4 and Figure 1 .", |
| "cite_spans": [ |
| { |
| "start": 49, |
| "end": 70, |
| "text": "(Walker et al., 1997;", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 71, |
| "end": 88, |
| "text": "See et al., 2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 271, |
| "end": 278, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 283, |
| "end": 291, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance on Various Dialog Qualities", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "HolisticEval-DailyDialog annotates the context coherence of responses. For the FED data, we present results on 8 fine-grained turn-level qualitites, and results on dialog-level qualities could be found in Appendix. For metrics that produce fine-grained scores, we use the corresponding finegrained score to measure correlation. For example, because PredictiveEngage evaluates the engaging quality of the dialog, the engaging score, rather than the overall score, is used to measure correlation with the engaging quality in the FED dataset. USR and USL-H both do well predicting context coherence and engaging on the HolisticEval-DailyDialog and FED data, respectively. These two metrics also have good performance on the USR and GRADE data. In contrast, HolisticEval and PredictiveEngage do well on measuring coherence and engagement, but underperform on the USR and GRADE data. This suggests that while finegrained metrics are important qualities to measure, modelling only these qualities is insufficient. Instead, it is better to design a metric that measures many different qualities and can aggregate them to form an overall score, such as the combination of MLM/dialog retrieval in USR and MLM/VUP/NSP in USL-H. The evaluation of specific fine-grained qualities sheds much light on their underlying behavior. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance on Various Dialog Qualities", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Most reference-free metrics compute their scores by comparing the dialog context with the response. Therefore, it is interesting to determine if these metrics perform differently according to the length of the dialog context. The quality-annotated samples are grouped by their context lengths with a length interval of 10. Figure 2 shows performance differences of the reference-free metrics at different context lengths. Many metrics' performance decreases as the context lengthens. This may be due to the fact that those metrics are trained on shorter dialogs and struggle to understand longer dialog contexts. On the other hand, the performance of HolisticEval, Di-alogRPT, and FED increases as the context lengthens. These metrics incorporate GPT-2-based language models to score responses, while the other metrics mostly rely on BERT-based models. BERTbased models are optimized for local coherence on the BookCorpus and Wikipedia through the MLM objective and limit context length. Thus GPT-2based metrics perform better on longer dialogs.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 323, |
| "end": 331, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effect of Context Length", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "The performance of USL-H, DEB, and Predic-tiveEngage does not change much at different context lengths. For PredictiveEngage, this could be because PredictiveEngage uses several MLP layers during the pooling of the pretrained BERT embeddings to predict the output. USL-H uses the VUP model which aims to determine if a response is grammatically correct. The use of the VUP model may make USL-H robust to longer context lengths.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of Context Length", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "The BERT model used in DEB is pretrained on the Reddit corpus which may improve its ability to model long-range correlations in the dialog. These different factors make metrics more robust to different context lengths.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effect of Context Length", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Many metrics rely on an ensemble of different models. Inspired by this, this section explores the possibility that a combination of metrics may be successful (Table 2 ). Since it is not feasible to exhaustively explore all possible combinations of the metrics, some combinations of the best-performing metrics were explored here. The metrics are combined through simple averaging. Future work should explore more sophisticated mechanisms for combining metrics.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 158, |
| "end": 166, |
| "text": "(Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Combining Metrics", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "In the last three rows of Table 2 , we selectively present the three combinations of metrics with the best performance. The complete table and discussion could be found in Appendix C.3. In general, combinations of the best performing metrics USR, GRADE, USL-H, and DEB improve the performance. The last row shows results of taking the average of the scores of all of the metrics that were assessed in this paper. While the idea of an All metric is simple, and metrics are combined through simple averaging, the results are surprisingly good across the different referenced quality-annotated datasets. This result highlights the potential of combining various evaluation metrics in better manners.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 26, |
| "end": 33, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Combining Metrics", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "This paper provides a comprehensive assessment of various automatic evaluation metrics for dialog. Many different evaluation metrics have been proposed in recent years. The results indicate several important directions for future work on the assessment of dialog evaluation metrics: 1) Many metrics rely on pretrained language models, and the analysis in this paper highlights the impacts of them. Researchers could further explore the impact of choice of pretrianed models. 2) Unsurprisingly, Metrics generally perform better on the data they were trained on. Therefore, methods to adapt metrics to new domains should be an important research direction. In general, dialog evaluation metrics can be divided into rule-based and model-based metrics. Rulebased metrics use heuristic rules to evaluate the system response, conditioned on the dialog context and human reference(s). Model-based metrics are trained, often with self-supervised objectives, to measure the quality of the responses. BLEU (Papineni et al., 2002 ) is a popular rulebased metric often used to benchmark natural language generation (NLG) systems. BLEU computes the n-gram precision of the system responses using human references. While BLEU performs reasonably when evaluating NLG systems, it has issues reflecting grammaticality, use of semantically similar words, and meaning preservation (Novikova et al., 2017; Ananthakrishnan et al., 2007; Sulem et al., 2018; Reiter, 2018) .", |
| "cite_spans": [ |
| { |
| "start": 996, |
| "end": 1018, |
| "text": "(Papineni et al., 2002", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 1362, |
| "end": 1385, |
| "text": "(Novikova et al., 2017;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 1386, |
| "end": 1415, |
| "text": "Ananthakrishnan et al., 2007;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 1416, |
| "end": 1435, |
| "text": "Sulem et al., 2018;", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 1436, |
| "end": 1449, |
| "text": "Reiter, 2018)", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "METEOR (Banerjee and Lavie, 2005 ) and ROUGE (Lin, 2004) have been proposed to address the shortcomings of BLEU. METEOR incorporates stems and synonyms into its calculation, while ROUGE focuses on n-gram recall instead of precision. Although these two metrics improve upon BLEU, they remain ineffective for dialog evaluation (Liu et al., 2016b) . In general, word-overlap metrics struggle to evaluate dialog responses because of the one-to-many nature of dialog (Zhao et al., 2017b). Concretely, as mentioned above, since there are many appropriate responses for a given dialog context it is unreasonable to penalize a valid system response that deviates from the ground truth.", |
| "cite_spans": [ |
| { |
| "start": 7, |
| "end": 32, |
| "text": "(Banerjee and Lavie, 2005", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 45, |
| "end": 56, |
| "text": "(Lin, 2004)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 325, |
| "end": 344, |
| "text": "(Liu et al., 2016b)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "ADEM (Lowe et al., 2017) is an early learningbased metric that uses a recurrent neural network (RNN) to predict the quality of system responses. ADEM uses quality-annotated training data and the model is trained to predict human quality annotations with a mean squared error loss (MSE).", |
| "cite_spans": [ |
| { |
| "start": 5, |
| "end": 24, |
| "text": "(Lowe et al., 2017)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "RUBER (Tao et al., 2018 ) uses a hybrid model consisting of both a referenced metric and an unreferenced metric. The referenced metric calculates the cosine similarity of word embeddings between a system response and a human reference. The unreferenced metric is trained with a triplet ranking loss to predict whether the generated response is appropriate for the dialog history. BERT-RUBER (Ghazarian et al., 2019) proposes to replace the RNN in RUBER with BERT (Devlin et al., 2019) to further improve the performance with contextualized word embeddings.", |
| "cite_spans": [ |
| { |
| "start": 6, |
| "end": 23, |
| "text": "(Tao et al., 2018", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 391, |
| "end": 415, |
| "text": "(Ghazarian et al., 2019)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 463, |
| "end": 484, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Based on BERT-RUBER, PONE (Lan et al., 2020 ) uses a novel algorithm to sample negative examples during training, and trains the metric on a dataset augmented by other NLG models.", |
| "cite_spans": [ |
| { |
| "start": 26, |
| "end": 43, |
| "text": "(Lan et al., 2020", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "MAUDE (Sinha et al., 2020) is trained with Noise Contrastive Estimation (NCE) (Gutmann and Hyv\u00e4rinen, 2010 ) which requires the model to differentiate between a correct response and randomly sampled negative responses.", |
| "cite_spans": [ |
| { |
| "start": 6, |
| "end": 26, |
| "text": "(Sinha et al., 2020)", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 78, |
| "end": 106, |
| "text": "(Gutmann and Hyv\u00e4rinen, 2010", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "DEB (Sai et al., 2020) constructs a dialog dataset which consists of manually-created relevant responses and adversarial irrelevant responses. DEB uses BERT for dialog evaluation by first pretraining on a large-scale dialog corpus, and then fine-tuning on the proposed dataset with a next sentence prediction (NSP) objective.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "GRADE ) models topic transition dynamics in dialog by constructing a graph representation of the dialog history. This graph is then passed as input to a model that is trained with the same triplet loss as RUBER.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "While GRADE is focused on turn-level topic transition dynamics in dialog, DynaEval ) leverages a graph structure to model the dialog-level interactions between a user and a system. Through this graph-based approach, Dy-naEval is trained to distinguish well-formed dialogs from carefully constructed negative samples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "USR (Mehri and Eskenazi, 2020b) trains several models to measure different qualities of dialogs. USR relies on three different models, (1) a language model, trained with the masked language modelling (MLM) objectives, measures fluency, (2) a dialog retrieval model determines the relevance of a response and (3) a fact-to-response selection model measures whether a response conditions on knowledge.", |
| "cite_spans": [ |
| { |
| "start": 4, |
| "end": 31, |
| "text": "(Mehri and Eskenazi, 2020b)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Similarly, USL-H (Phy et al., 2020) combines three models trained with different objectives: valid utterance prediction (VUP), next sentence prediction (NSP), and MLM. The VUP model determines whether a response is valid and grammatically correct. The NSP model and MLM models are trained with self-supervised objectives to evaluate the sensibleness and the likelihood of a given response.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "DialogRPT is an ensemble of multiple GPT-2 based models, which were finetuned on the Reddit human feedback data with different tasks. The tasks include predicting human feedback of responses and whether the response is", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Reason for not using UNION (2020) Designed for story generation Embedding Methods (2015; 2016a; Computing the embedding similarity is identical to the referenced metric in RUBER. Language Model Evaluator 2020Using the language model probability is identical to the approach of USR and HolisticEval. Perplexity (2020a) No access to the NLG models that generated the quality-annotated data in order to obtain perplexity. Distinct N-grams (2016) This is not feasible for models trained on dialog corpora (Pang et al., 2020) . Spot The Bot 2020This still needs human annotation when evaluating models. Learning to Compare (2020) No released code, data, or pretrained models. RUSE 2018Designed for machine translation only. uBLEU 2020No released code. deltaBLEU 2015For each response, this requires multiple human references to calculate the score. Data-QuestEval 2021Designed for general NLU. not specifically for dialog. Topic-based Evaluation 2018No released code, data, or pretrained models. The Alexa Prize Evaluation Framework 2018No released code, data, or pretrained models. AIH 2021aNo released code, data, or pretrained models. human-like. The Deep AM-FM metric ) measures two aspects of dialog quality through the Adequacy Metric (AM) and the Fluency Metric (FM). AM assesses the semantic similarity of system responses and human references by comparing their BERT embeddings. FM compares the similarity of the language model probabilities for both the system response and the human reference, and produces a higher score if the probabilities are similar.", |
| "cite_spans": [ |
| { |
| "start": 74, |
| "end": 88, |
| "text": "Methods (2015;", |
| "ref_id": null |
| }, |
| { |
| "start": 89, |
| "end": 95, |
| "text": "2016a;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 310, |
| "end": 317, |
| "text": "(2020a)", |
| "ref_id": null |
| }, |
| { |
| "start": 428, |
| "end": 442, |
| "text": "N-grams (2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 501, |
| "end": 520, |
| "text": "(Pang et al., 2020)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 610, |
| "end": 624, |
| "text": "Compare (2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metrics", |
| "sec_num": null |
| }, |
| { |
| "text": "HolisticEval (Pang et al., 2020) evaluates several qualities of dialog: context coherence, language fluency, response diversity, and logical selfconsistency. The GPT-2 language model (Radford et al., 2019) and pretrained Natural Language Inference models are used to measure these qualities.", |
| "cite_spans": [ |
| { |
| "start": 13, |
| "end": 32, |
| "text": "(Pang et al., 2020)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 183, |
| "end": 205, |
| "text": "(Radford et al., 2019)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metrics", |
| "sec_num": null |
| }, |
| { |
| "text": "In addition to measuring the relevance of a response, PredictiveEngage (Ghazarian et al., 2020) incorporates an utterance-level engagement classifier to better assess the overall quality of a response.", |
| "cite_spans": [ |
| { |
| "start": 71, |
| "end": 95, |
| "text": "(Ghazarian et al., 2020)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metrics", |
| "sec_num": null |
| }, |
| { |
| "text": "FED (Mehri and Eskenazi, 2020a) is an unsupervised evaluation metric that uses DialoGPT (Zhang et al., 2020) to measure 18 fine-grained qualities of dialog. FED calculates the likelihood of manually designed follow-up utterances to measure multiple qualities of dialog without any supervision.", |
| "cite_spans": [ |
| { |
| "start": 4, |
| "end": 31, |
| "text": "(Mehri and Eskenazi, 2020a)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 88, |
| "end": 108, |
| "text": "(Zhang et al., 2020)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metrics", |
| "sec_num": null |
| }, |
| { |
| "text": "FlowScore which is based on the DialoFlow model, models the dynamic information flow in the dialog history to evaluate the quality of a dialog. DialoFlow is a response generation model that is trained with three objectives CFM, SIM and RGM in order to condition the response generation on the context flow in the dialog. FlowScore uses the representations produced by DialoFlow to measure the dialog quality.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metrics", |
| "sec_num": null |
| }, |
| { |
| "text": "FBD (Xiang et al., 2021) computes the distribution-wise difference between the system generated conversations and the human-written conversations to evaluate the performance of a di-alog system. FBD focuses on assessing systemlevel performance and leverages the pretrained RoBERTa model without any additional finetuning.", |
| "cite_spans": [ |
| { |
| "start": 4, |
| "end": 24, |
| "text": "(Xiang et al., 2021)", |
| "ref_id": "BIBREF58" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metrics", |
| "sec_num": null |
| }, |
| { |
| "text": "In addition to automatic metrics specifically designed for dialog evaluation, this paper also evaluates the performance of BERTScore (Zhang et al., 2019) , QuestEval , and BLEURT (Sellam et al., 2020) which were originally designed for evaluating machine translation, summarization and general natural language generation. BERTScore computes the F1 score by matching token embeddings in the human reference and system response. BLEURT generates synthetic data to pre-train BERT and fine-tune the model to predict a human score with MSE loss. QuestEval, which is based on question generation (QG) and question answering (QA), accounts for factual consistency, relevance, and information selection of the generated response. While these three metrics were not specifically designed for dialog, it is interesting to observe how they perform on dialog data.", |
| "cite_spans": [ |
| { |
| "start": 133, |
| "end": 153, |
| "text": "(Zhang et al., 2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metrics", |
| "sec_num": null |
| }, |
| { |
| "text": "Large-scale pretrained language models (Devlin et al., 2019; Radford et al., 2019) have been ubiquitous in NLP models. Embeddings from pretrained language models have been shown to be particularly effective for a variety of NLP tasks. Pretrained models are now a commonly-used strategy in dialog evaluation metrics. However, since different pretrained models use different training data and objectives, the choice of language model might significantly influence the final performance and generalizability of the evaluation metrics. Future work should explore the impact of the different pretrained language models on the performance of the evaluation metric.", |
| "cite_spans": [ |
| { |
| "start": 39, |
| "end": 60, |
| "text": "(Devlin et al., 2019;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 61, |
| "end": 82, |
| "text": "Radford et al., 2019)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Use of Pretrained Language Models", |
| "sec_num": null |
| }, |
| { |
| "text": "BERT (Devlin et al., 2019) is used in many of 27 the metrics that this paper assesses. BERT uses the Masked Language Modeling (MLM) and Next Sentence Prediction (NSP) objectives, and is trained on the BookCorpus (Zhu et al., 2015) and English Wikipedia. RoBERTa, which is employed in USR (Mehri and Eskenazi, 2020b) , improves the training techniques in BERT and trains the model on a much larger corpus which includes the CommonCrawl News dataset (Mackenzie et al., 2020) and text extracted from Reddit. Specifically, the full training data size of RoBERTa is 10 times larger than BERT, and empirically RoBERTa has better performance than BERT on common NLP tasks including the GLUE benchmark (Wang et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 5, |
| "end": 26, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 212, |
| "end": 230, |
| "text": "(Zhu et al., 2015)", |
| "ref_id": null |
| }, |
| { |
| "start": 288, |
| "end": 315, |
| "text": "(Mehri and Eskenazi, 2020b)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 448, |
| "end": 472, |
| "text": "(Mackenzie et al., 2020)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 694, |
| "end": 713, |
| "text": "(Wang et al., 2018)", |
| "ref_id": "BIBREF57" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Use of Pretrained Language Models", |
| "sec_num": null |
| }, |
| { |
| "text": "FED (Mehri and Eskenazi, 2020a) uses Di-aloGPT (Zhang et al., 2020) which is based on the GPT2 (Radford et al., 2019) architecture and trained with dialog data extracted from Reddit. Due to this, DialoGPT might better model human conversation, particularly open-domain chit-chat.", |
| "cite_spans": [ |
| { |
| "start": 4, |
| "end": 31, |
| "text": "(Mehri and Eskenazi, 2020a)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 47, |
| "end": 67, |
| "text": "(Zhang et al., 2020)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 95, |
| "end": 117, |
| "text": "(Radford et al., 2019)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Use of Pretrained Language Models", |
| "sec_num": null |
| }, |
| { |
| "text": "Deep AM-FM uses Multilingual BERT (Devlin et al., 2019) , which is BERT trained on multilingual datasets. The benefits of using multilingual language models to evaluate English data is unclear and is an interesting topic for future work.", |
| "cite_spans": [ |
| { |
| "start": 34, |
| "end": 55, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Use of Pretrained Language Models", |
| "sec_num": null |
| }, |
| { |
| "text": "T5 (Raffel et al., 2020) , which is used by QuestEval, is a Transformer Seq2Seq model pretrained on a massive text corpus. T5 achieved state-of-theart results on many text generation tasks including summarization, question answering, and text classification. The strong performance of T5 on NLG tasks, suggests that it may be valuable in the assessment of response generation models.", |
| "cite_spans": [ |
| { |
| "start": 3, |
| "end": 24, |
| "text": "(Raffel et al., 2020)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.2 Use of Pretrained Language Models", |
| "sec_num": null |
| }, |
| { |
| "text": "The choice of training data is one of the most important factors in model-based metrics. The domain, quality, and conversation setting of the dataset all play important roles in the relative quality of the resulting metric. For example, a metric trained on Twitter data may also perform well at evaluating dialogs generated for Reddit data, since the two datasets are constructed from online forums. This section introduces the characteristics of the datasets used to train the various model-based metrics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.3 Training Data", |
| "sec_num": null |
| }, |
| { |
| "text": "DailyDialog (Li et al., 2017 ) is a human-written dialog dataset where the conversation topics are about day-to-day life.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 28, |
| "text": "(Li et al., 2017", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.3 Training Data", |
| "sec_num": null |
| }, |
| { |
| "text": "PersonaChat (Zhang et al., 2018 ) is a dataset that consists of persona-conditioned dialogs where each participant is assigned a persona and the goal is to become familiar with the other individual. In contrast to DailyDialog, dialogs in PersonaChat have a clear objective and are more engaging.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 31, |
| "text": "(Zhang et al., 2018", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.3 Training Data", |
| "sec_num": null |
| }, |
| { |
| "text": "The ConvAI dataset (Dinan et al., 2019 ) is based on PersonaChat with modifications to preprocessing and additional training examples.", |
| "cite_spans": [ |
| { |
| "start": 19, |
| "end": 38, |
| "text": "(Dinan et al., 2019", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.3 Training Data", |
| "sec_num": null |
| }, |
| { |
| "text": "TopicalChat (Gopalakrishnan et al., 2019) consists of knowledge-grounded human-human conversations, wherein two individuals have a conversation grounded on 'interesting facts'. Models trained on TopicalChat are expected to be able to use external knowledge and have realistic knowledgegrounded conversations.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 41, |
| "text": "(Gopalakrishnan et al., 2019)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.3 Training Data", |
| "sec_num": null |
| }, |
| { |
| "text": "Twitter (Ritter et al., 2011) and Ubuntu Dialogue (Lowe et al., 2015) both result from a crawl of the internet. Ubuntu Dialogue has very technical conversations regarding computer systems, while Twitter covers a broad, general set of topics.", |
| "cite_spans": [ |
| { |
| "start": 8, |
| "end": 29, |
| "text": "(Ritter et al., 2011)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 50, |
| "end": 69, |
| "text": "(Lowe et al., 2015)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.3 Training Data", |
| "sec_num": null |
| }, |
| { |
| "text": "These datasets were generally used to train the metrics through the use of self-supervised objectives.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.3 Training Data", |
| "sec_num": null |
| }, |
| { |
| "text": "Word-overlap metrics are ineffective for dialog (Liu et al., 2016b) largely due to the one-to-many nature of dialog (Zhao et al., 2017a) . While this could be mitigated by using multiple reference responses, it is infeasible to collect a sufficiently large dataset to thoroughly cover the space of potential responses. Thus, reference-free metrics have been proposed to circumvent the one-to-many problem. Amongst the metrics assessed here, there are several referencefree evaluation metrics: HolisticEval, MAUDE, GRADE, USR, FED, FlowSore, USL-H, QuestEval, DEB, DynaEval, PredictiveEngage and Dialo-gRPT.", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 67, |
| "text": "(Liu et al., 2016b)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 116, |
| "end": 136, |
| "text": "(Zhao et al., 2017a)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.4 Referenced vs Reference-Free", |
| "sec_num": null |
| }, |
| { |
| "text": "In contrast to the referenced metrics, which compare the generated response to the reference response, reference-free metrics model the semantics of the dialog context and the generated response in order to reason about the response within the context of the dialog history. Table 5 lists recently proposed dialog metrics that are not assessed in this paper. There are several reasons a metric was not assessed:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 275, |
| "end": 282, |
| "text": "Table 5", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "A.4 Referenced vs Reference-Free", |
| "sec_num": null |
| }, |
| { |
| "text": "\u2022 The metric was not designed specifically for dialog. While most of these metrics were not included, a few metrics that fall into this 28 category (e.g., BERTScore, QuestEval, and BLEURT) were assessed here, as a baseline and to represent this category. \u2022 There was no released code, data, or pretrained model for reproducing their results. \u2022 The core idea of the metric is very similar to metrics that were assessed. \u2022 The metric is infeasible to assess in our experimental setting, as it requires additional annotations. This may include requiring human annotations or information about the response generation models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.5 Metrics not Assessed in this Paper", |
| "sec_num": null |
| }, |
| { |
| "text": "Some of the unassessed metrics that share ideas that are covered by other metrics assessed in this paper are:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.5 Metrics not Assessed in this Paper", |
| "sec_num": null |
| }, |
| { |
| "text": "Embedding Methods (Kiros et al., 2015; Liu et al., 2016a; Rodr\u00edguez-Cantelar et al., 2021) compute the similarity of system responses with the human reference through embeddings, which is equivalent to the approach used by RUBER and BERTScore.", |
| "cite_spans": [ |
| { |
| "start": 18, |
| "end": 38, |
| "text": "(Kiros et al., 2015;", |
| "ref_id": null |
| }, |
| { |
| "start": 39, |
| "end": 57, |
| "text": "Liu et al., 2016a;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 58, |
| "end": 90, |
| "text": "Rodr\u00edguez-Cantelar et al., 2021)", |
| "ref_id": "BIBREF47" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.5 Metrics not Assessed in this Paper", |
| "sec_num": null |
| }, |
| { |
| "text": "Language Model Evaluator (Nedelchev et al., 2020) evaluates dialog using the language model likelihood, which is identical to the approaches of USR and HolisticEval.", |
| "cite_spans": [ |
| { |
| "start": 25, |
| "end": 49, |
| "text": "(Nedelchev et al., 2020)", |
| "ref_id": "BIBREF36" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A.5 Metrics not Assessed in this Paper", |
| "sec_num": null |
| }, |
| { |
| "text": "This section provides an overview of the different quality-annotated dialog datasets. Many of these datasets were collected in different settings. For example, as described in Section A.3, DailyDialog consists of casual conversations about daily life while TopicalChat consists of knowledge-grounded conversations. These differences influence various aspects of the data, such as the formality and complexity of the sentence structure. However, since it is not easy to quantify the complexity of the sentence structure, the average length and the number of distinct words in the dialog context (Ctx), human reference (Ref) and model hypothesis (Hyp) are used in Table 6 . The length of the context and reference response in DailyDialog is shorter than on TopicalChat, which is likely influenced by the simpler topics in DailyDialog.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 662, |
| "end": 669, |
| "text": "Table 6", |
| "ref_id": "TABREF8" |
| } |
| ], |
| "eq_spans": [], |
| "section": "B.1 Data Collection", |
| "sec_num": null |
| }, |
| { |
| "text": "GRADE-DailyDialog has a significantly longer dialog history than the other two quality-annotated datasets that use DailyDialog. This is because those two datasets, HolisticEval and PredictiveEngage, only use one utterance of the dialog while GRADE uses two utterances. On the other hand, responses in PredictiveEngage-DailyDialog use a larger number of distinct words, which is due to the fact that PredictiveEngage-DailyDialog also contains human-written responses in addition to responses generated by NLG systems.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1 Data Collection", |
| "sec_num": null |
| }, |
| { |
| "text": "While most of the dialog data was collected by recruiting human annotators via Amazon Mechanical Turks (AMT), the DSTC6 data (Hori and Hori, 2017) uses dialogs from Twitter. Therefore, we can expect the DSTC6 data to be noisier and more realistic. It poses a challenge for both NLG models and evaluation metrics.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 146, |
| "text": "(Hori and Hori, 2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1 Data Collection", |
| "sec_num": null |
| }, |
| { |
| "text": "In contrast to other quality-annotated datasets in which the dialog contexts are conversations between humans and only the responses are generated by the system, the DSTC9 and FED datasets provide human-system dialogs that were collected in an interactive setting. The DSTC9 data (Gunasekara et al., 2021) was collected on the Dial-Port platform through direct interaction between real users and open-domain chit-chat systems. The FED data (Mehri and Eskenazi, 2020a) contains both human-human and human-system conversations released by Adiwardana et al. (2020b) .", |
| "cite_spans": [ |
| { |
| "start": 280, |
| "end": 305, |
| "text": "(Gunasekara et al., 2021)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 440, |
| "end": 467, |
| "text": "(Mehri and Eskenazi, 2020a)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 537, |
| "end": 562, |
| "text": "Adiwardana et al. (2020b)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1 Data Collection", |
| "sec_num": null |
| }, |
| { |
| "text": "Despite using the same underlying dialog dataset (e.g., DailyDialog), the complexity and quality of the response may differ significantly across the quality-annotated datasets depending on the response generation model that was used. If responses in a quality-annotated dataset are from both simple sequence-to-sequence (Seq2Seq) models (Cho et al., 2014) and state-of-the-art language models such as DialoGPT, this quality-annotated dataset can assess whether the metric can distinguish between high quality and low quality responses.", |
| "cite_spans": [ |
| { |
| "start": 337, |
| "end": 355, |
| "text": "(Cho et al., 2014)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1 Data Collection", |
| "sec_num": null |
| }, |
| { |
| "text": "Distinguishing between responses produced by systems of very different quality is easier, since the low quality responses may not follow grammar rules and make simple mistakes. On the other hand, if responses are only generated by state-of-the-art systems, the task becomes harder because the metric needs to rank responses as to whether they are appropriate in a dialog context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1 Data Collection", |
| "sec_num": null |
| }, |
| { |
| "text": "Responses in USR-TopicalChat, USR-PersonaChat, HolisticEval-DailyDialog and the DSTC6 data are generated by a relatively simple model, such as an LSTM language model (LSTM LM), LSTM or Transformer Seq2Seq model, or Memory Network. On the other hand, responses in the qualityannotated data labeled by GRADE come from the Transformer Seq2Seq model, DialoGPT, and retrieval model using Transformer or BERT (Transformer/BERT Ranker), which have relatively better empirical performance. Furthermore, FED and DSTC9 data use state-of-the-art dialog systems to generate responses. Specifically, FED data incorporates two systems, Meena (Adiwardana et al., 2020b) and Mitsuku 2 , and the DSTC9 data uses dialog systems including PLATO (Bao et al., 2020) . These high quality responses make the data more challenging.", |
| "cite_spans": [ |
| { |
| "start": 628, |
| "end": 654, |
| "text": "(Adiwardana et al., 2020b)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 726, |
| "end": 744, |
| "text": "(Bao et al., 2020)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.1 Data Collection", |
| "sec_num": null |
| }, |
| { |
| "text": "After generating responses for each dataset, human annotators labeled the quality of each response. While most quality-annotated datasets provide annotations for the overall score of a given response, HolisticEval-DailyDialog only labels the context coherence of a response. 3 . In addition to the overall score, USR and FED data provide annotations for different dialog qualities such as whether the response is coherent or interesting. These finegrained annotations allow for a more comprehensive analysis of metrics.", |
| "cite_spans": [ |
| { |
| "start": 275, |
| "end": 276, |
| "text": "3", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "B.2 Quality Annotation", |
| "sec_num": null |
| }, |
| { |
| "text": "To analyze the similarities between the metrics, we plot the correlation between metric outputs in Figure 3 . As expected, there is a strong correlation between the outputs of BERTSCore, and BLEURT, the rule-based metrics. Interestingly, Deep AM-FM also is strongly correlated with these word-2 https://medium.com/pandorabots-blog/ mitsuku-wins-loebner-prize-2018-3e8d98c5f2a7", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 99, |
| "end": 107, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "C.1 Metric Output Similarities", |
| "sec_num": null |
| }, |
| { |
| "text": "3 HolisticEval also released the data for evaluating response fluency. However since the data does not clearly disambiguate between the dialog context and the corresponding system response, it was not used in this paper overlap metrics. This is surprising since Deep AM-FM was specifically designed for evaluating dialog while the others are intended for general-purpose NLG evaluation. This may be because the Deep AM-FM metric compares the generated response to the reference response and, as such, will favor generated responses that have high word-overlap with the reference response.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C.1 Metric Output Similarities", |
| "sec_num": null |
| }, |
| { |
| "text": "BERT-RUBER, PONE, GRADE, and USL-H are another group of metrics with similar behavior. Since BERT-RUBER, PONE, and GRADE share a common triplet rank loss and train on DailyDialog, it is not surprising that these three metrics behave similarly. Although USL-H is trained with different objectives, the same behavior might be due to the use of the same training data (DailyDialog) and the same pretrained model (BERT). Moreover, USR has a slightly higher correlation to this group. This is likely because USR aggregates multiple qualities similar to USL-H and models the relevance of responses, similar to the RUBER-based metrics and GRADE.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "C.1 Metric Output Similarities", |
| "sec_num": null |
| }, |
| { |
| "text": "This section presents the correlations of the metrics on both generative and retrieval response generation models (see Table 10 ). Most of the metrics perform similarly on these two types of models. However, USR, GRADE, and FlowScore perform very differently. USR and GRADE are especially good at scoring responses from the retrieval model. USR performs significantly less well at evaluating generative models while GRADE still achieves the highest correlation. In contrast, FlowScore performs well at evaluating generative models but performs poorly on generative models. One reason for this may be that responses from generative models are longer and more complex than the ones from retrieval models. Figure 4 : Spearman correlation to dialog-level annotation qualities on the FED data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 119, |
| "end": 127, |
| "text": "Table 10", |
| "ref_id": "TABREF11" |
| }, |
| { |
| "start": 703, |
| "end": 711, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "C.2 Performance on Different Response Generation Models", |
| "sec_num": null |
| }, |
| { |
| "text": "Many metrics rely on an ensemble of different models. Inspired by this, this section explores the possibility that a combination of metrics may be successful (Table 11 ). Since it is not feasible to exhaustively explore all possible combinations of the metrics, some combinations of the best-performing metrics were explored here. The metrics are combined through simple averaging. Future work should explore more sophisticated mechanisms for combining metrics. First, PredictiveEngage, RUBER and PONE were combined, as proposed in (Ghazarian et al., 2020) . Indeed, the resulting combined metric shows significant improvement, especially on the USR-TopicalChat and USR-Personachat data, but correlation decreases on other datasets.", |
| "cite_spans": [ |
| { |
| "start": 532, |
| "end": 556, |
| "text": "(Ghazarian et al., 2020)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 158, |
| "end": 167, |
| "text": "(Table 11", |
| "ref_id": "TABREF12" |
| } |
| ], |
| "eq_spans": [], |
| "section": "C.3 Combining Metrics", |
| "sec_num": null |
| }, |
| { |
| "text": "Next, combinations of USR, GRADE, USL-H and DEB (the best performing metrics on the referenced quality-annotated datasets) were explored. The combination of USR with GRADE, USL-H, PONE and PredictiveEngage performs better on the USR data as well as on GRADE-ConvAI2. Given the strong performance of GRADE on the GRADE data, combining it with the other metrics negatively impacts its performance. However, combining GRADE and DEB does result in an improvement. Interestingly, combining DEB with USR and USL-H also negatively affects the results. In Figure 3, we observe that the outputs of GRADE and DEB have high correlation to each other, which is an indication of the similar behavior of the two metrics. The output similarities of metrics may be a good indicator for how to best combine metrics.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 548, |
| "end": 554, |
| "text": "Figure", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "C.3 Combining Metrics", |
| "sec_num": null |
| }, |
| { |
| "text": "The last row of Table 11 , shows the result of taking the average of the scores of all of the metrics that were assessed in this paper. While the idea of an All metric is simple, and metrics are combined through simple averaging, the results are surprisingly good across the different referenced quality-annotated datasets. This result highlights the potential of combining various evaluation metrics in some smart manner.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 16, |
| "end": 24, |
| "text": "Table 11", |
| "ref_id": "TABREF12" |
| } |
| ], |
| "eq_spans": [], |
| "section": "C.3 Combining Metrics", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank the authors of the various metrics for releasing their code. This work is funded by National Science Foundation grant CNS-1512973. The opinions expressed in this paper do not necessarily reflect those of the National Science Foundation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "acknowledgement", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Towards a human-like opendomain chatbot", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Adiwardana", |
| "suffix": "" |
| }, |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "R" |
| ], |
| "last": "So", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Hall", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "Fiedel", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Thoppilan", |
| "suffix": "" |
| }, |
| { |
| "first": "Z", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Apoorv", |
| "middle": [], |
| "last": "Kulshreshtha", |
| "suffix": "" |
| }, |
| { |
| "first": "Gaurav", |
| "middle": [], |
| "last": "Nemade", |
| "suffix": "" |
| }, |
| { |
| "first": "Yifeng", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [ |
| "V" |
| ], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Adiwardana, Minh-Thang Luong, David R. So, J. Hall, Noah Fiedel, R. Thoppilan, Z. Yang, Apoorv Kulshreshtha, Gaurav Nemade, Yifeng Lu, and Quoc V. Le. 2020a. Towards a human-like open- domain chatbot. ArXiv, abs/2001.09977.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Towards a human-like open-domain chatbot", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Adiwardana", |
| "suffix": "" |
| }, |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "R" |
| ], |
| "last": "So", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Hall", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "Fiedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Romal", |
| "middle": [], |
| "last": "Thoppilan", |
| "suffix": "" |
| }, |
| { |
| "first": "Zi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Apoorv", |
| "middle": [], |
| "last": "Kulshreshtha", |
| "suffix": "" |
| }, |
| { |
| "first": "Gaurav", |
| "middle": [], |
| "last": "Nemade", |
| "suffix": "" |
| }, |
| { |
| "first": "Yifeng", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2001.09977" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Adiwardana, Minh-Thang Luong, David R So, Jamie Hall, Noah Fiedel, Romal Thoppilan, Zi Yang, Apoorv Kulshreshtha, Gaurav Nemade, Yifeng Lu, et al. 2020b. Towards a human-like open-domain chatbot. arXiv preprint arXiv:2001.09977.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Some issues in automatic evaluation of english-hindi mt: more blues for bleu", |
| "authors": [ |
| { |
| "first": "Pushpak", |
| "middle": [], |
| "last": "R Ananthakrishnan", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Bhattacharyya", |
| "suffix": "" |
| }, |
| { |
| "first": "Ritesh M", |
| "middle": [], |
| "last": "Sasikumar", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R Ananthakrishnan, Pushpak Bhattacharyya, M Sasiku- mar, and Ritesh M Shah. 2007. Some issues in auto- matic evaluation of english-hindi mt: more blues for bleu. ICON.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "METEOR: An automatic metric for MT evaluation with improved correlation with human judgments", |
| "authors": [ |
| { |
| "first": "Satanjeev", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization", |
| "volume": "", |
| "issue": "", |
| "pages": "65--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An automatic metric for MT evaluation with im- proved correlation with human judgments. In Pro- ceedings of the ACL Workshop on Intrinsic and Ex- trinsic Evaluation Measures for Machine Transla- tion and/or Summarization, pages 65-72, Ann Ar- bor, Michigan. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "PLATO: Pre-trained dialogue generation model with discrete latent variable", |
| "authors": [ |
| { |
| "first": "Siqi", |
| "middle": [], |
| "last": "Bao", |
| "suffix": "" |
| }, |
| { |
| "first": "Huang", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Fan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Haifeng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "85--96", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.9" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siqi Bao, Huang He, Fan Wang, Hua Wu, and Haifeng Wang. 2020. PLATO: Pre-trained dialogue genera- tion model with discrete latent variable. In Proceed- ings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 85-96, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Learning phrase representations using RNN encoder-decoder for statistical machine translation", |
| "authors": [ |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Van Merri\u00ebnboer", |
| "suffix": "" |
| }, |
| { |
| "first": "Caglar", |
| "middle": [], |
| "last": "Gulcehre", |
| "suffix": "" |
| }, |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Fethi", |
| "middle": [], |
| "last": "Bougares", |
| "suffix": "" |
| }, |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Schwenk", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1724--1734", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/D14-1179" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kyunghyun Cho, Bart van Merri\u00ebnboer, Caglar Gul- cehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using RNN encoder-decoder for statistical machine translation. In Proceedings of the 2014 Conference on Empirical Methods in Nat- ural Language Processing (EMNLP), pages 1724- 1734, Doha, Qatar. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Survey on evaluation methods for dialogue systems", |
| "authors": [ |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Deriu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alvaro", |
| "middle": [], |
| "last": "Rodrigo", |
| "suffix": "" |
| }, |
| { |
| "first": "Arantxa", |
| "middle": [], |
| "last": "Otegi", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillermo", |
| "middle": [], |
| "last": "Echegoyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sophie", |
| "middle": [], |
| "last": "Rosset", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Cieliebak", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Artificial Intelligence Review", |
| "volume": "54", |
| "issue": "1", |
| "pages": "755--810", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jan Deriu, Alvaro Rodrigo, Arantxa Otegi, Guillermo Echegoyen, Sophie Rosset, Eneko Agirre, and Mark Cieliebak. 2021. Survey on evaluation methods for dialogue systems. Artificial Intelligence Review, 54(1):755-810.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Spot the bot: A robust and efficient framework for the evaluation of conversational dialogue systems", |
| "authors": [ |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Deriu", |
| "suffix": "" |
| }, |
| { |
| "first": "Don", |
| "middle": [], |
| "last": "Tuggener", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [ |
| "Ander" |
| ], |
| "last": "Pius Von D\u00e4niken", |
| "suffix": "" |
| }, |
| { |
| "first": "Alvaro", |
| "middle": [], |
| "last": "Campos", |
| "suffix": "" |
| }, |
| { |
| "first": "Thiziri", |
| "middle": [], |
| "last": "Rodrigo", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Belkacem", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "3971--3984", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.326" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jan Deriu, Don Tuggener, Pius von D\u00e4niken, Jon Ander Campos, Alvaro Rodrigo, Thiziri Belkacem, Aitor Soroa, Eneko Agirre, and Mark Cieliebak. 2020. Spot the bot: A robust and efficient framework for the evaluation of conversational dialogue systems. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 3971-3984, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "The second conversational intelligence challenge (convai2)", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Dinan", |
| "suffix": "" |
| }, |
| { |
| "first": "Varvara", |
| "middle": [], |
| "last": "Logacheva", |
| "suffix": "" |
| }, |
| { |
| "first": "Valentin", |
| "middle": [], |
| "last": "Malykh", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Kurt", |
| "middle": [], |
| "last": "Shuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Jack", |
| "middle": [], |
| "last": "Urbanek", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Arthur", |
| "middle": [], |
| "last": "Szlam", |
| "suffix": "" |
| }, |
| { |
| "first": "Iulian", |
| "middle": [], |
| "last": "Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1902.00098" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily Dinan, Varvara Logacheva, Valentin Malykh, Alexander Miller, Kurt Shuster, Jack Urbanek, Douwe Kiela, Arthur Szlam, Iulian Serban, Ryan Lowe, et al. 2019. The second conversational intelligence challenge (convai2). arXiv preprint arXiv:1902.00098.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "deltaBLEU: A discriminative metric for generation tasks with intrinsically diverse targets", |
| "authors": [ |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Sordoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangfeng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Quirk", |
| "suffix": "" |
| }, |
| { |
| "first": "Margaret", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "2", |
| "issue": "", |
| "pages": "445--450", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/P15-2073" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michel Galley, Chris Brockett, Alessandro Sordoni, Yangfeng Ji, Michael Auli, Chris Quirk, Mar- garet Mitchell, Jianfeng Gao, and Bill Dolan. 2015. deltaBLEU: A discriminative metric for generation tasks with intrinsically diverse targets. In Proceed- ings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th Interna- tional Joint Conference on Natural Language Pro- cessing (Volume 2: Short Papers), pages 445-450, Beijing, China. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Dialogue response rankingtraining with large-scale human feedback data", |
| "authors": [ |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yizhe", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiang Gao, Yizhe Zhang, Michel Galley, Chris Brock- ett, and Bill Dolan. 2020. Dialogue response rank- ingtraining with large-scale human feedback data. In EMNLP.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Better automatic evaluation of open-domain dialogue systems with contextualized embeddings", |
| "authors": [ |
| { |
| "first": "Johnny", |
| "middle": [], |
| "last": "Sarik Ghazarian", |
| "suffix": "" |
| }, |
| { |
| "first": "Aram", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Nanyun", |
| "middle": [], |
| "last": "Galstyan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Workshop on Methods for Optimizing and Evaluating Neural Language Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "82--89", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-2310" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sarik Ghazarian, Johnny Wei, Aram Galstyan, and Nanyun Peng. 2019. Better automatic evaluation of open-domain dialogue systems with contextualized embeddings. In Proceedings of the Workshop on Methods for Optimizing and Evaluating Neural Lan- guage Generation, pages 82-89, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Predictive engagement: An efficient metric for automatic evaluation of opendomain dialogue systems", |
| "authors": [ |
| { |
| "first": "Sarik", |
| "middle": [], |
| "last": "Ghazarian", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Weischedel", |
| "suffix": "" |
| }, |
| { |
| "first": "Aram", |
| "middle": [], |
| "last": "Galstyan", |
| "suffix": "" |
| }, |
| { |
| "first": "Nanyun", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "34", |
| "issue": "", |
| "pages": "7789--7796", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sarik Ghazarian, Ralph Weischedel, Aram Galstyan, and Nanyun Peng. 2020. Predictive engagement: An efficient metric for automatic evaluation of open- domain dialogue systems. In Proceedings of the AAAI Conference on Artificial Intelligence, vol- ume 34, pages 7789-7796.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Topical-Chat: Towards Knowledge-Grounded Open-Domain Conversations", |
| "authors": [ |
| { |
| "first": "Karthik", |
| "middle": [], |
| "last": "Gopalakrishnan", |
| "suffix": "" |
| }, |
| { |
| "first": "Behnam", |
| "middle": [], |
| "last": "Hedayatnia", |
| "suffix": "" |
| }, |
| { |
| "first": "Qinlang", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Gottardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjeev", |
| "middle": [], |
| "last": "Kwatra", |
| "suffix": "" |
| }, |
| { |
| "first": "Anu", |
| "middle": [], |
| "last": "Venkatesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Raefer", |
| "middle": [], |
| "last": "Gabriel", |
| "suffix": "" |
| }, |
| { |
| "first": "Dilek", |
| "middle": [], |
| "last": "Hakkani-T\u00fcr", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proc. Interspeech 2019", |
| "volume": "", |
| "issue": "", |
| "pages": "1891--1895", |
| "other_ids": { |
| "DOI": [ |
| "10.21437/Interspeech.2019-3079" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karthik Gopalakrishnan, Behnam Hedayatnia, Qin- lang Chen, Anna Gottardi, Sanjeev Kwatra, Anu Venkatesh, Raefer Gabriel, and Dilek Hakkani- T\u00fcr. 2019. Topical-Chat: Towards Knowledge- Grounded Open-Domain Conversations. In Proc. In- terspeech 2019, pages 1891-1895.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "UNION: An Unreferenced Metric for Evaluating Open-ended Story Generation", |
| "authors": [ |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Guan", |
| "suffix": "" |
| }, |
| { |
| "first": "Minlie", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference 22 on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "9157--9166", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.736" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jian Guan and Minlie Huang. 2020. UNION: An Un- referenced Metric for Evaluating Open-ended Story Generation. In Proceedings of the 2020 Conference 22 on Empirical Methods in Natural Language Process- ing (EMNLP), pages 9157-9166, Online. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Overview of the ninth dialog system technology challenge: Dstc9. Proceedings of the 9th Dialog System Technology Challenge Workshop in AAAI2021", |
| "authors": [ |
| { |
| "first": "Chulaka", |
| "middle": [], |
| "last": "Gunasekara", |
| "suffix": "" |
| }, |
| { |
| "first": "Seokhwan", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Fernando", |
| "suffix": "" |
| }, |
| { |
| "first": "D'", |
| "middle": [], |
| "last": "Haro", |
| "suffix": "" |
| }, |
| { |
| "first": "Abhinav", |
| "middle": [], |
| "last": "Rastogi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yun-Nung", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihail", |
| "middle": [], |
| "last": "Eric", |
| "suffix": "" |
| }, |
| { |
| "first": "Behnam", |
| "middle": [], |
| "last": "Hedayatnia", |
| "suffix": "" |
| }, |
| { |
| "first": "Karthik", |
| "middle": [], |
| "last": "Gopalakrishnan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chao-Wei", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chulaka Gunasekara, Seokhwan Kim, Luis Fernando D'Haro, Abhinav Rastogi, Yun-Nung Chen, Mihail Eric, Behnam Hedayatnia, Karthik Gopalakrishnan, Yang Liu, Chao-Wei Huang, et al. 2021. Overview of the ninth dialog system technology challenge: Dstc9. Proceedings of the 9th Dialog System Tech- nology Challenge Workshop in AAAI2021.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Topicbased evaluation for conversational bots", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Fenfei Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Chandra", |
| "middle": [], |
| "last": "Metallinou", |
| "suffix": "" |
| }, |
| { |
| "first": "Anirudh", |
| "middle": [], |
| "last": "Khatri", |
| "suffix": "" |
| }, |
| { |
| "first": "Anu", |
| "middle": [], |
| "last": "Raju", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Venkatesh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ram", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fenfei Guo, A. Metallinou, Chandra Khatri, Anirudh Raju, Anu Venkatesh, and A. Ram. 2018. Topic- based evaluation for conversational bots. ArXiv, abs/1801.03622.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Noisecontrastive estimation: A new estimation principle for unnormalized statistical models", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Gutmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Aapo", |
| "middle": [], |
| "last": "Hyv\u00e4rinen", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics", |
| "volume": "", |
| "issue": "", |
| "pages": "297--304", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Gutmann and Aapo Hyv\u00e4rinen. 2010. Noise- contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceed- ings of the Thirteenth International Conference on Artificial Intelligence and Statistics, pages 297-304. JMLR Workshop and Conference Proceedings.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Endto-end conversation modeling track in dstc6", |
| "authors": [ |
| { |
| "first": "Chiori", |
| "middle": [], |
| "last": "Hori", |
| "suffix": "" |
| }, |
| { |
| "first": "Takaaki", |
| "middle": [], |
| "last": "Hori", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1706.07440" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chiori Hori and Takaaki Hori. 2017. End- to-end conversation modeling track in dstc6. arXiv:1706.07440.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "GRADE: Automatic graphenhanced coherence metric for evaluating opendomain dialogue systems", |
| "authors": [ |
| { |
| "first": "Lishan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng", |
| "middle": [], |
| "last": "Ye", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinghui", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodan", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "9230--9240", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.742" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lishan Huang, Zheng Ye, Jinghui Qin, Liang Lin, and Xiaodan Liang. 2020. GRADE: Automatic graph- enhanced coherence metric for evaluating open- domain dialogue systems. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 9230-9240, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Raquel Urtasun, and Sanja Fidler. 2015. Skip-thought vectors", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Kiros", |
| "suffix": "" |
| }, |
| { |
| "first": "Yukun", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Richard", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Zemel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Torralba", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1506.06726" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Kiros, Yukun Zhu, Ruslan Salakhutdinov, Richard S Zemel, Antonio Torralba, Raquel Urtasun, and Sanja Fidler. 2015. Skip-thought vectors. arXiv preprint arXiv:1506.06726.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Pone: A novel automatic evaluation metric for open-domain generative dialogue systems", |
| "authors": [ |
| { |
| "first": "Tian", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Xian-Ling", |
| "middle": [], |
| "last": "Mao", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyan", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Heyan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ACM Trans. Inf. Syst", |
| "volume": "39", |
| "issue": "1", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3423168" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tian Lan, Xian-Ling Mao, Wei Wei, Xiaoyan Gao, and Heyan Huang. 2020. Pone: A novel automatic eval- uation metric for open-domain generative dialogue systems. ACM Trans. Inf. Syst., 39(1).", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "A diversity-promoting objective function for neural conversation models", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "110--119", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N16-1014" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016. A diversity-promoting ob- jective function for neural conversation models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 110-119, San Diego, California. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "DailyDialog: A manually labelled multi-turn dialogue dataset", |
| "authors": [ |
| { |
| "first": "Yanran", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyu", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenjie", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ziqiang", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuzi", |
| "middle": [], |
| "last": "Niu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "986--995", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yanran Li, Hui Su, Xiaoyu Shen, Wenjie Li, Ziqiang Cao, and Shuzi Niu. 2017. DailyDialog: A manu- ally labelled multi-turn dialogue dataset. In Proceed- ings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Pa- pers), pages 986-995, Taipei, Taiwan. Asian Federa- tion of Natural Language Processing.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Addressing inquiries about history: An efficient and practical framework for evaluating open-domain chatbot consistency", |
| "authors": [ |
| { |
| "first": "Zekang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinchao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengcong", |
| "middle": [], |
| "last": "Fei", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Findings of Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zekang Li, Jinchao Zhang, Zhengcong Fei, Yang Feng, and Jie Zhou. 2021a. Addressing inquiries about his- tory: An efficient and practical framework for evalu- ating open-domain chatbot consistency. In Findings of Proceedings of the 59th Annual Meeting of the As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Conversations are not flat: Modeling the intrinsic information flow between dialogue utterances", |
| "authors": [ |
| { |
| "first": "Zekang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jinchao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengcong", |
| "middle": [], |
| "last": "Fei", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Feng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zekang Li, Jinchao Zhang, Zhengcong Fei, Yang Feng, and Jie Zhou. 2021b. Conversations are not flat: Modeling the intrinsic information flow between dia- logue utterances. In Proceedings of the 59th Annual Meeting of the Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "ROUGE: A package for automatic evaluation of summaries", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Text Summarization Branches Out", |
| "volume": "", |
| "issue": "", |
| "pages": "74--81", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin. 2004. ROUGE: A package for auto- matic evaluation of summaries. In Text Summariza- tion Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "How NOT to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue response generation", |
| "authors": [ |
| { |
| "first": "Chia-Wei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| }, |
| { |
| "first": "Iulian", |
| "middle": [], |
| "last": "Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Noseworthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Charlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2122--2132", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1230" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chia-Wei Liu, Ryan Lowe, Iulian Serban, Mike Nose- worthy, Laurent Charlin, and Joelle Pineau. 2016a. How NOT to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue response generation. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 2122-2132, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "How not to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue response generation", |
| "authors": [ |
| { |
| "first": "Chia-Wei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Iulian", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Noseworthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Charlin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1603.08023" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chia-Wei Liu, Ryan Lowe, Iulian V Serban, Michael Noseworthy, Laurent Charlin, and Joelle Pineau. 2016b. How not to evaluate your dialogue system: An empirical study of unsupervised evaluation met- rics for dialogue response generation. arXiv preprint arXiv:1603.08023.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Roberta: A robustly optimized bert pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Towards an automatic Turing test: Learning to evaluate dialogue responses", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Noseworthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Iulian", |
| "middle": [], |
| "last": "Vlad Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Angelard-Gontier", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1116--1126", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1103" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Lowe, Michael Noseworthy, Iulian Vlad Ser- ban, Nicolas Angelard-Gontier, Yoshua Bengio, and Joelle Pineau. 2017. Towards an automatic Tur- ing test: Learning to evaluate dialogue responses. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1116-1126, Vancouver, Canada. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "The Ubuntu dialogue corpus: A large dataset for research in unstructured multi-turn dialogue systems", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| }, |
| { |
| "first": "Nissan", |
| "middle": [], |
| "last": "Pow", |
| "suffix": "" |
| }, |
| { |
| "first": "Iulian", |
| "middle": [], |
| "last": "Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 16th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "285--294", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W15-4640" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Lowe, Nissan Pow, Iulian Serban, and Joelle Pineau. 2015. The Ubuntu dialogue corpus: A large dataset for research in unstructured multi-turn dia- logue systems. In Proceedings of the 16th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 285-294, Prague, Czech Re- public. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Cc-news-en: A large english news corpus", |
| "authors": [ |
| { |
| "first": "Joel", |
| "middle": [], |
| "last": "Mackenzie", |
| "suffix": "" |
| }, |
| { |
| "first": "Rodger", |
| "middle": [], |
| "last": "Benham", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthias", |
| "middle": [], |
| "last": "Petri", |
| "suffix": "" |
| }, |
| { |
| "first": "Johanne", |
| "middle": [ |
| "R" |
| ], |
| "last": "Trippas", |
| "suffix": "" |
| }, |
| { |
| "first": "Shane", |
| "middle": [], |
| "last": "Culpepper", |
| "suffix": "" |
| }, |
| { |
| "first": "Alistair", |
| "middle": [], |
| "last": "Moffat", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 29th ACM International Conference on Information & Knowledge Management", |
| "volume": "", |
| "issue": "", |
| "pages": "3077--3084", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joel Mackenzie, Rodger Benham, Matthias Petri, Jo- hanne R Trippas, J Shane Culpepper, and Alistair Moffat. 2020. Cc-news-en: A large english news corpus. In Proceedings of the 29th ACM Inter- national Conference on Information & Knowledge Management, pages 3077-3084.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Unsupervised evaluation of interactive dialog with Di-aloGPT", |
| "authors": [ |
| { |
| "first": "Shikib", |
| "middle": [], |
| "last": "Mehri", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxine", |
| "middle": [], |
| "last": "Eskenazi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 21th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "225--235", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shikib Mehri and Maxine Eskenazi. 2020a. Unsu- pervised evaluation of interactive dialog with Di- aloGPT. In Proceedings of the 21th Annual Meet- ing of the Special Interest Group on Discourse and Dialogue, pages 225-235, 1st virtual meeting. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "USR: An unsupervised and reference free evaluation metric for dialog generation", |
| "authors": [ |
| { |
| "first": "Shikib", |
| "middle": [], |
| "last": "Mehri", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxine", |
| "middle": [], |
| "last": "Eskenazi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "681--707", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.64" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shikib Mehri and Maxine Eskenazi. 2020b. USR: An unsupervised and reference free evaluation metric for dialog generation. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 681-707, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Language model transformers as evaluators for open-domain dialogues", |
| "authors": [ |
| { |
| "first": "Rostislav", |
| "middle": [], |
| "last": "Nedelchev", |
| "suffix": "" |
| }, |
| { |
| "first": "Jens", |
| "middle": [], |
| "last": "Lehmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Ricardo", |
| "middle": [], |
| "last": "Usbeck", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "6797--6808", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.coling-main.599" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rostislav Nedelchev, Jens Lehmann, and Ricardo Us- beck. 2020. Language model transformers as eval- uators for open-domain dialogues. In Proceedings of the 28th International Conference on Compu- tational Linguistics, pages 6797-6808, Barcelona, Spain (Online). International Committee on Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Why we need new evaluation metrics for NLG", |
| "authors": [ |
| { |
| "first": "Jekaterina", |
| "middle": [], |
| "last": "Novikova", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Du\u0161ek", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanda", |
| "middle": [ |
| "Cercas" |
| ], |
| "last": "Curry", |
| "suffix": "" |
| }, |
| { |
| "first": "Verena", |
| "middle": [], |
| "last": "Rieser", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2241--2252", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1238" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jekaterina Novikova, Ond\u0159ej Du\u0161ek, Amanda Cer- cas Curry, and Verena Rieser. 2017. Why we need new evaluation metrics for NLG. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2241-2252, Copenhagen, Denmark. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Towards holistic and automatic evaluation of open-domain dialogue generation", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Erik", |
| "middle": [], |
| "last": "Nijkamp", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenjuan", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Linqi", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yixian", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kewei", |
| "middle": [], |
| "last": "Tu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3619--3629", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.333" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Pang, Erik Nijkamp, Wenjuan Han, Linqi Zhou, Yixian Liu, and Kewei Tu. 2020. Towards holistic and automatic evaluation of open-domain dialogue generation. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 3619-3629, Online. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/1073083.1073135" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Deconstruct to reconstruct a configurable evaluation metric for open-domain dialogue systems", |
| "authors": [ |
| { |
| "first": "Vitou", |
| "middle": [], |
| "last": "Phy", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Akiko", |
| "middle": [], |
| "last": "Aizawa", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4164--4178", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.coling-main.368" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vitou Phy, Yang Zhao, and Akiko Aizawa. 2020. Deconstruct to reconstruct a configurable evalua- tion metric for open-domain dialogue systems. In Proceedings of the 28th International Conference on Computational Linguistics, pages 4164-4178, Barcelona, Spain (Online). International Committee on Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Language models are unsupervised multitask learners", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "OpenAI blog", |
| "volume": "1", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Exploring the limits of transfer learning with a unified text-totext transformer", |
| "authors": [ |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Raffel", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharan", |
| "middle": [], |
| "last": "Narang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Matena", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanqi", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "J" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "21", |
| "issue": "140", |
| "pages": "1--67", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Kather- ine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to- text transformer. Journal of Machine Learning Re- search, 21(140):1-67.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Towards empathetic opendomain conversation models: A new benchmark and dataset", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [ |
| "Michael" |
| ], |
| "last": "Hannah Rashkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Margaret", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Y-Lan", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Boureau", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "5370--5381", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1534" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hannah Rashkin, Eric Michael Smith, Margaret Li, and Y-Lan Boureau. 2019. Towards empathetic open- domain conversation models: A new benchmark and dataset. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguis- tics, pages 5370-5381, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Data-questeval: A referenceless metric for data to text semantic evaluation", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Cl'ement Rebuffel", |
| "suffix": "" |
| }, |
| { |
| "first": "Laure", |
| "middle": [], |
| "last": "Scialom", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Soulier", |
| "suffix": "" |
| }, |
| { |
| "first": "Sylvain", |
| "middle": [], |
| "last": "Piwowarski", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacopo", |
| "middle": [], |
| "last": "Lamprier", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Staiano", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Scoutheeten", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gallinari", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "ArXiv", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cl'ement Rebuffel, Thomas Scialom, Laure Soulier, Benjamin Piwowarski, Sylvain Lamprier, Jacopo Staiano, Geoffrey Scoutheeten, and P. Gallinari. 2021. Data-questeval: A referenceless metric for data to text semantic evaluation. ArXiv, abs/2104.07555.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "A structured review of the validity of BLEU", |
| "authors": [ |
| { |
| "first": "Ehud", |
| "middle": [], |
| "last": "Reiter", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Computational Linguistics", |
| "volume": "44", |
| "issue": "3", |
| "pages": "393--401", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/coli_a_00322" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ehud Reiter. 2018. A structured review of the validity of BLEU. Computational Linguistics, 44(3):393- 401.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Data-driven response generation in social media", |
| "authors": [ |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Cherry", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "B" |
| ], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "583--593", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alan Ritter, Colin Cherry, and William B. Dolan. 2011. Data-driven response generation in social media. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 583-593, Edinburgh, Scotland, UK. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Automatic evaluation of nontask oriented dialog systems by using sentence embeddings projections and their dynamics", |
| "authors": [ |
| { |
| "first": "Mario", |
| "middle": [], |
| "last": "Rodr\u00edguez-Cantelar", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Fernando", |
| "suffix": "" |
| }, |
| { |
| "first": "D'", |
| "middle": [], |
| "last": "Haro", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Mat\u00eda", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Conversational Dialogue Systems for the Next Decade", |
| "volume": "", |
| "issue": "", |
| "pages": "71--84", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mario Rodr\u00edguez-Cantelar, Luis Fernando D'Haro, and Fernando Mat\u00eda. 2021. Automatic evaluation of non- task oriented dialog systems by using sentence em- beddings projections and their dynamics. In Con- versational Dialogue Systems for the Next Decade, pages 71-84. Springer.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Improving dialog evaluation with a multi-reference adversarial dataset and large scale pretraining", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Ananya", |
| "suffix": "" |
| }, |
| { |
| "first": "Akash", |
| "middle": [], |
| "last": "Sai", |
| "suffix": "" |
| }, |
| { |
| "first": "Siddhartha", |
| "middle": [], |
| "last": "Kumar Mohankumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitesh", |
| "middle": [ |
| "M" |
| ], |
| "last": "Arora", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Khapra", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "8", |
| "issue": "", |
| "pages": "810--827", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00347" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ananya B. Sai, Akash Kumar Mohankumar, Sid- dhartha Arora, and Mitesh M. Khapra. 2020. Im- proving dialog evaluation with a multi-reference ad- versarial dataset and large scale pretraining. Trans- actions of the Association for Computational Lin- guistics, 8:810-827.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Questeval: Summarization asks for fact-based evaluation", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Scialom", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul-Alexis", |
| "middle": [], |
| "last": "Dray", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Gallinari", |
| "suffix": "" |
| }, |
| { |
| "first": "Sylvain", |
| "middle": [], |
| "last": "Lamprier", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Piwowarski", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacopo", |
| "middle": [], |
| "last": "Staiano", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2103.12693" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Scialom, Paul-Alexis Dray, Patrick Gallinari, Sylvain Lamprier, Benjamin Piwowarski, Jacopo Staiano, and Alex Wang. 2021. Questeval: Sum- marization asks for fact-based evaluation. arXiv preprint arXiv:2103.12693.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Association for Computational Linguistics", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "751--758", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "751-758, Belgium, Brussels. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Learning an unreferenced metric for online dialogue evaluation", |
| "authors": [ |
| { |
| "first": "Koustuv", |
| "middle": [], |
| "last": "Sinha", |
| "suffix": "" |
| }, |
| { |
| "first": "Prasanna", |
| "middle": [], |
| "last": "Parthasarathi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jasmine", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "L" |
| ], |
| "last": "Hamilton", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2430--2441", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.220" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Koustuv Sinha, Prasanna Parthasarathi, Jasmine Wang, Ryan Lowe, William L. Hamilton, and Joelle Pineau. 2020. Learning an unreferenced metric for online dialogue evaluation. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 2430-2441, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "BLEU is not suitable for the evaluation of text simplification", |
| "authors": [ |
| { |
| "first": "Elior", |
| "middle": [], |
| "last": "Sulem", |
| "suffix": "" |
| }, |
| { |
| "first": "Omri", |
| "middle": [], |
| "last": "Abend", |
| "suffix": "" |
| }, |
| { |
| "first": "Ari", |
| "middle": [], |
| "last": "Rappoport", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "738--744", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D18-1081" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elior Sulem, Omri Abend, and Ari Rappoport. 2018. BLEU is not suitable for the evaluation of text sim- plification. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Process- ing, pages 738-744, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Ruber: An unsupervised method for automatic evaluation of open-domain dialog systems", |
| "authors": [ |
| { |
| "first": "Chongyang", |
| "middle": [], |
| "last": "Tao", |
| "suffix": "" |
| }, |
| { |
| "first": "Lili", |
| "middle": [], |
| "last": "Mou", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongyan", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "32", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chongyang Tao, Lili Mou, Dongyan Zhao, and Rui Yan. 2018. Ruber: An unsupervised method for au- tomatic evaluation of open-domain dialog systems. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "On evaluating and comparing conversational agents", |
| "authors": [ |
| { |
| "first": "Anu", |
| "middle": [], |
| "last": "Venkatesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Chandra", |
| "middle": [], |
| "last": "Khatri", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashwin", |
| "middle": [], |
| "last": "Ram", |
| "suffix": "" |
| }, |
| { |
| "first": "Fenfei", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Raefer", |
| "middle": [], |
| "last": "Gabriel", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Nagar", |
| "suffix": "" |
| }, |
| { |
| "first": "Rohit", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Behnam", |
| "middle": [], |
| "last": "Hedayatnia", |
| "suffix": "" |
| }, |
| { |
| "first": "Angeliki", |
| "middle": [], |
| "last": "Metallinou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "4", |
| "issue": "", |
| "pages": "60--68", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1801.03625" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anu Venkatesh, Chandra Khatri, Ashwin Ram, Fen- fei Guo, Raefer Gabriel, Ashish Nagar, Rohit Prasad, Ming Cheng, Behnam Hedayatnia, Ange- liki Metallinou, et al. 2018. On evaluating and comparing conversational agents. arXiv preprint arXiv:1801.03625, 4:60-68.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "PARADISE: A framework for evaluating spoken dialogue agents", |
| "authors": [ |
| { |
| "first": "Marilyn", |
| "middle": [ |
| "A" |
| ], |
| "last": "Walker", |
| "suffix": "" |
| }, |
| { |
| "first": "Diane", |
| "middle": [ |
| "J" |
| ], |
| "last": "Litman", |
| "suffix": "" |
| }, |
| { |
| "first": "Candace", |
| "middle": [ |
| "A" |
| ], |
| "last": "Kamm", |
| "suffix": "" |
| }, |
| { |
| "first": "Alicia", |
| "middle": [], |
| "last": "Abella", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "35th Annual Meeting of the Association for Computational Linguistics and 8th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "271--280", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/976909.979652" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marilyn A. Walker, Diane J. Litman, Candace A. Kamm, and Alicia Abella. 1997. PARADISE: A framework for evaluating spoken dialogue agents. In 35th Annual Meeting of the Association for Com- putational Linguistics and 8th Conference of the European Chapter of the Association for Computa- tional Linguistics, pages 271-280, Madrid, Spain. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "GLUE: A multi-task benchmark and analysis platform for natural language understanding", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanpreet", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 EMNLP Workshop Black-boxNLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "353--355", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W18-5446" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Fe- lix Hill, Omer Levy, and Samuel Bowman. 2018. GLUE: A multi-task benchmark and analysis plat- form for natural language understanding. In Pro- ceedings of the 2018 EMNLP Workshop Black- boxNLP: Analyzing and Interpreting Neural Net- works for NLP, pages 353-355, Brussels, Belgium. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Assessing dialogue systems with distribution distances", |
| "authors": [ |
| { |
| "first": "Jiannan", |
| "middle": [], |
| "last": "Xiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yahui", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Deng", |
| "middle": [], |
| "last": "Cai", |
| "suffix": "" |
| }, |
| { |
| "first": "Huayang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Defu", |
| "middle": [], |
| "last": "Lian", |
| "suffix": "" |
| }, |
| { |
| "first": "Lemao", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2105.02573" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiannan Xiang, Yahui Liu, Deng Cai, Huayang Li, Defu Lian, and Lemao Liu. 2021. Assessing dialogue systems with distribution distances. arXiv preprint arXiv:2105.02573.", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "2020. uBLEU: Uncertainty-aware automatic evaluation method for open-domain dialogue systems", |
| "authors": [ |
| { |
| "first": "Tsuta", |
| "middle": [], |
| "last": "Yuma", |
| "suffix": "" |
| }, |
| { |
| "first": "Naoki", |
| "middle": [], |
| "last": "Yoshinaga", |
| "suffix": "" |
| }, |
| { |
| "first": "Masashi", |
| "middle": [], |
| "last": "Toyoda", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "199--206", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-srw.27" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsuta Yuma, Naoki Yoshinaga, and Masashi Toyoda. 2020. uBLEU: Uncertainty-aware automatic evalua- tion method for open-domain dialogue systems. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics: Student Re- search Workshop, pages 199-206, Online. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "Dynaeval: Unifying turn and dialogue level evaluation", |
| "authors": [ |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Fernando", |
| "suffix": "" |
| }, |
| { |
| "first": "D'", |
| "middle": [], |
| "last": "Haro", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Friedrichs", |
| "suffix": "" |
| }, |
| { |
| "first": "Grandee", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Haizhou", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "The Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (ACL-IJCNLP 2021)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen Zhang, Yiming Chen, Luis Fernando D'Haro, Yan Zhang, Thomas Friedrichs, Grandee Lee, and Haizhou Li. 2021a. Dynaeval: Unifying turn and di- alogue level evaluation. In The Joint Conference of the 59th Annual Meeting of the Association for Com- putational Linguistics and the 11th International Joint Conference on Natural Language Processing (ACL-IJCNLP 2021), Online.", |
| "links": null |
| }, |
| "BIBREF61": { |
| "ref_id": "b61", |
| "title": "Deep am-fm: Toolkit for automatic dialogue evaluation", |
| "authors": [ |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Fernando", |
| "suffix": "" |
| }, |
| { |
| "first": "D'", |
| "middle": [], |
| "last": "Haro", |
| "suffix": "" |
| }, |
| { |
| "first": "Rafael", |
| "middle": [ |
| "E" |
| ], |
| "last": "Banchs", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Friedrichs", |
| "suffix": "" |
| }, |
| { |
| "first": "Haizhou", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Conversational Dialogue Systems for the Next Decade", |
| "volume": "", |
| "issue": "", |
| "pages": "53--69", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chen Zhang, Luis Fernando D'Haro, Rafael E Banchs, Thomas Friedrichs, and Haizhou Li. 2021b. Deep am-fm: Toolkit for automatic dialogue evaluation. In Conversational Dialogue Systems for the Next Decade, pages 53-69. Springer.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Spearman correlation to different turn-level annotation qualities on the FED data. Spearman correlation when varying the dialog context length.", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| }, |
| "TABREF0": { |
| "text": "", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td>presents results of the reference-free</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "text": "", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td colspan=\"11\">: Results on datasets with human reference. All values are statistically significant to p < 0.05, unless</td></tr><tr><td colspan=\"11\">marked by *. GRADE-DD and GRADE-ED indicate the GRADE-DailyDialog and GRADE-EmpatheticDialogue</td></tr><tr><td colspan=\"7\">dataset, respectively. PE is the abbreviation of PredictiveEngage metric.</td><td/><td/><td/><td/></tr><tr><td/><td colspan=\"2\">PE-DailyDialog</td><td/><td>FED</td><td/><td/><td/><td colspan=\"2\">DSTC9</td><td/></tr><tr><td/><td colspan=\"2\">Turn-Level</td><td colspan=\"2\">Turn-Level</td><td colspan=\"2\">Dialog-Level</td><td colspan=\"2\">Dialog-Level</td><td colspan=\"2\">System-Level</td></tr><tr><td/><td>P</td><td>S</td><td>P</td><td>S</td><td>P</td><td>S</td><td>P</td><td>S</td><td>P</td><td>S</td></tr><tr><td>QuestEval</td><td>0.296</td><td>0.341</td><td>0.037*</td><td>0.093*</td><td>-0.032*</td><td>0.080*</td><td>0.026*</td><td>0.043</td><td>0.604</td><td>0.527*</td></tr><tr><td>MAUDE</td><td>0.104</td><td>0.060*</td><td>0.018*</td><td>-0.094*</td><td>-0.047*</td><td>-0.280</td><td>0.059</td><td>0.042*</td><td>0.224*</td><td>0.045*</td></tr><tr><td>DEB</td><td>0.516</td><td>0.580</td><td>0.230</td><td>0.187</td><td>-0.130*</td><td>0.006*</td><td>0.085</td><td>0.131</td><td>0.683</td><td>0.473*</td></tr><tr><td>GRADE</td><td>0.600</td><td>0.622</td><td>0.134</td><td>0.118</td><td>-0.034*</td><td>-0.065*</td><td>-0.078</td><td>-0.070</td><td>-0.674</td><td>-0.482*</td></tr><tr><td>DynaEval</td><td>0.167</td><td>0.160</td><td>0.319</td><td>0.323</td><td>0.503</td><td>0.547</td><td>0.093</td><td>0.101</td><td>0.652</td><td>0.727</td></tr><tr><td>USR</td><td>0.582</td><td>0.640</td><td>0.114</td><td>0.117</td><td>0.093*</td><td>0.062*</td><td>0.019*</td><td>0.020*</td><td>0.149*</td><td>0.127*</td></tr><tr><td>USL-H</td><td>0.688</td><td>0.699</td><td>0.201</td><td>0.189</td><td>0.073*</td><td>0.152*</td><td>0.105</td><td>0.105</td><td>0.566*</td><td>0.755</td></tr><tr><td>DialogRPT</td><td>0.489</td><td>0.533</td><td>-0.118</td><td>-0.086*</td><td>-0.221</td><td>-0.214</td><td>0.076</td><td>0.069</td><td>0.685</td><td>0.555*</td></tr><tr><td>HolisticEval</td><td>0.368</td><td>0.365</td><td>0.122</td><td>0.125</td><td>-0.276</td><td>-0.304</td><td>0.015*</td><td>0.002*</td><td>-0.019*</td><td>-0.100*</td></tr><tr><td>PredictiveEngage</td><td>0.429</td><td>0.414</td><td>0.024*</td><td>0.094*</td><td>0.026*</td><td>0.155*</td><td>0.114</td><td>0.115</td><td>0.809</td><td>0.664</td></tr><tr><td>FED</td><td>0.164</td><td>0.159</td><td>0.120</td><td>0.095</td><td>0.222</td><td>0.320</td><td>0.128</td><td>0.120</td><td>0.559*</td><td>0.391*</td></tr><tr><td>FlowScore</td><td>-</td><td>-</td><td>-0.065*</td><td>-0.055*</td><td>-0.073*</td><td>-0.003*</td><td>0.147</td><td>0.140</td><td>0.907</td><td>0.900</td></tr><tr><td>FBD</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-0.669</td><td>-0.627</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "text": "", |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "text": "Results on HolisticEval-DailyDialog. All values are statistically significant to p < 0.05, unless marked by *.", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td>Interesting</td><td>Engaging</td><td>Specific</td><td>Relevant</td><td>Correct</td><td>Semantically appropriate Understandable</td><td>Fluent</td><td>Overall</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "text": "Wangchunshu Zhou and Ke Xu. 2020. Learning to compare for better training and evaluation of open domain natural language generation models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 9717-9724.", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td>6 Acknowledgements</td><td/></tr><tr><td colspan=\"2\">Saizheng Zhang, Emily Dinan, Jack Urbanek, Arthur</td></tr><tr><td colspan=\"2\">Szlam, Douwe Kiela, and Jason Weston. 2018. Per-</td></tr><tr><td colspan=\"2\">sonalizing dialogue agents: I have a dog, do you</td></tr><tr><td>have pets too?</td><td>In Proceedings of the 56th An-</td></tr><tr><td colspan=\"2\">nual Meeting of the Association for Computational</td></tr><tr><td colspan=\"2\">Linguistics (Volume 1: Long Papers), pages 2204-</td></tr><tr><td colspan=\"2\">2213, Melbourne, Australia. Association for Com-</td></tr><tr><td colspan=\"2\">putational Linguistics.</td></tr><tr><td colspan=\"2\">Abigail See, Stephen Roller, Douwe Kiela, and Ja-</td></tr><tr><td colspan=\"2\">son Weston. 2019. What makes a good conver-</td></tr><tr><td colspan=\"2\">sation? how controllable attributes affect human</td></tr><tr><td colspan=\"2\">judgments. In Proceedings of the 2019 Conference</td></tr><tr><td colspan=\"2\">of the North American Chapter of the Association</td></tr><tr><td colspan=\"2\">for Computational Linguistics: Human Language</td></tr><tr><td colspan=\"2\">Technologies, Volume 1 (Long and Short Papers),</td></tr><tr><td colspan=\"2\">pages 1702-1723, Minneapolis, Minnesota. Associ-</td></tr><tr><td colspan=\"2\">ation for Computational Linguistics.</td></tr><tr><td colspan=\"2\">Thibault Sellam, Dipanjan Das, and Ankur Parikh.</td></tr><tr><td colspan=\"2\">2020. BLEURT: Learning robust metrics for text</td></tr><tr><td colspan=\"2\">generation. In Proceedings of the 58th Annual Meet-</td></tr><tr><td colspan=\"2\">ing of the Association for Computational Linguistics,</td></tr><tr><td colspan=\"2\">pages 7881-7892, Online. Association for Computa-</td></tr><tr><td>tional Linguistics.</td><td/></tr><tr><td/><td>3) Simple combinations</td></tr><tr><td colspan=\"2\">of metrics yield promising results. Future work</td></tr><tr><td colspan=\"2\">should explore more sophisticated ways for com-</td></tr><tr><td>bining metrics.</td><td/></tr><tr><td>21</td><td/></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF6": { |
| "text": "The metrics that were not assessed in this paper and the reasons behind that choice.", |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF8": { |
| "text": "Statistics of quality-annotated datasets. Ctx, Ref, and Hyp indicate dialog context, human reference, and model hypothesis, respectively. Trans and ED are the abbreviation of the Transformer and EmpatheticDialogue respectively.", |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF10": { |
| "text": "Results on the DSTC6 data. All values are statistically significant to p < 0.05, unless marked by *.", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td>ADEM BERT-RUBER BERTScore BLEU-4 BLEURT DEB Deep AM-FM DialogRPT DynaEval FED FlowScore GRADE HolisticEval MAUDE METEOR PONE PredictiveEngage QuestEval ROUGE-L RUBER USL-H USR</td><td>0.4 0.2 0.0 0.2 0.4 0.6 0.8 1.0</td><td>ADEM USR USL-H RUBER ROUGE-L QuestEval PredictiveEngage PONE METEOR MAUDE HolisticEval GRADE FlowScore FED DynaEval DialogRPT Deep AM-FM DEB BLEURT BLEU-4 BERTScore BERT-RUBER</td><td>0.4 0.2 0.0 0.2 0.4 0.6 0.8 1.0</td></tr><tr><td>ADEM BERT-RUBER BERTScore BLEU-4 BLEURT DEB Deep AM-FM DialogRPT DynaEval FED FlowScore GRADE HolisticEval MAUDE METEOR PONE PredictiveEngage QuestEval ROUGE-L RUBER USL-H USR</td><td/><td>ADEM BERT-RUBER BERTScore BLEU-4 BLEURT DEB Deep AM-FM DialogRPT DynaEval FED FlowScore GRADE HolisticEval MAUDE METEOR PONE PredictiveEngage QuestEval ROUGE-L RUBER USL-H USR</td><td/></tr><tr><td>(a) Pearson</td><td/><td>(b) Spearman</td><td/></tr><tr><td colspan=\"4\">Figure 3: Pearson and Spearman correlation between different system outputs. We use datasets with human</td></tr><tr><td>references to compute scores.</td><td/><td/><td/></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF11": { |
| "text": "Results on evaluating different response generation models. All values are statistically significant to p < 0.05, unless marked by *.", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td/><td colspan=\"2\">USR-TopicalChat</td><td colspan=\"2\">USR-PersonaChat</td><td colspan=\"2\">GRADE-ConvAI2</td><td colspan=\"2\">GRADE-DailyDialog</td><td colspan=\"2\">GRADE-EmpatheticDialogue</td><td colspan=\"2\">DSTC6</td></tr><tr><td/><td>P</td><td>S</td><td>P</td><td>S</td><td>P</td><td>S</td><td>P</td><td>S</td><td>P</td><td>S</td><td>P</td><td>S</td></tr><tr><td>PredictiveEngage</td><td>0.222</td><td>0.310</td><td>-0.003*</td><td>0.033*</td><td>0.154</td><td>0.164</td><td>-0.133</td><td>-0.135</td><td>-0.032*</td><td>-0.078*</td><td>0.043</td><td>0.004*</td></tr><tr><td>+ RUBER</td><td>0.283</td><td>0.327</td><td>0.057*</td><td>0.096*</td><td>0.105</td><td>0.102</td><td>-0.149</td><td>-0.156</td><td>-0.065*</td><td>-0.055*</td><td>0.103</td><td>0.055</td></tr><tr><td>+ PONE</td><td>0.308</td><td>0.350</td><td>0.218</td><td>0.209</td><td>0.333</td><td>0.339</td><td>-0.034*</td><td>-0.035*</td><td>0.077*</td><td>0.069*</td><td>0.156</td><td>0.122</td></tr><tr><td>USR</td><td>0.412</td><td>0.423</td><td>0.440</td><td>0.418</td><td>0.501</td><td>0.500</td><td>0.057*</td><td>0.057*</td><td>0.264</td><td>0.255</td><td>0.184</td><td>0.166</td></tr><tr><td>+ GRADE</td><td>0.424</td><td>0.432</td><td>0.456</td><td>0.430</td><td>0.523</td><td>0.528</td><td>0.081*</td><td>0.078*</td><td>0.281</td><td>0.267</td><td>0.191</td><td>0.173</td></tr><tr><td>+ USL-H</td><td>0.429</td><td>0.440</td><td>0.468</td><td>0.451</td><td>0.517</td><td>0.522</td><td>0.062*</td><td>0.062*</td><td>0.278</td><td>0.265</td><td>0.197</td><td>0.178</td></tr><tr><td>+ DEB</td><td>0.359</td><td>0.362</td><td>0.453</td><td>0.473</td><td>0.552</td><td>0.551</td><td>0.222</td><td>0.190</td><td>0.391</td><td>0.385</td><td>0.248</td><td>0.234</td></tr><tr><td>+ GRADE + PONE</td><td>0.435</td><td>0.447</td><td>0.473</td><td>0.452</td><td>0.535</td><td>0.539</td><td>0.089</td><td>0.086</td><td>0.289</td><td>0.270</td><td>0.206</td><td>0.187</td></tr><tr><td>+ GRADE + PONE + PE</td><td>0.447</td><td>0.468</td><td>0.468</td><td>0.449</td><td>0.534</td><td>0.538</td><td>0.072*</td><td>0.070*</td><td>0.275</td><td>0.261</td><td>0.202</td><td>0.180</td></tr><tr><td>+ GRADE + USL-H + DEB</td><td>0.377</td><td>0.382</td><td>0.476</td><td>0.495</td><td>0.571</td><td>0.579</td><td>0.229</td><td>0.198</td><td>0.399</td><td>0.387</td><td>0.254</td><td>0.238</td></tr><tr><td>GRADE</td><td>0.200</td><td>0.217</td><td>0.358</td><td>0.352</td><td>0.566</td><td>0.571</td><td>0.278</td><td>0.253</td><td>0.330</td><td>0.297</td><td>0.119</td><td>0.122</td></tr><tr><td>+ PONE</td><td>0.282</td><td>0.297</td><td>0.435</td><td>0.436</td><td>0.547</td><td>0.556</td><td>0.275</td><td>0.279</td><td>0.306</td><td>0.279</td><td>0.191</td><td>0.179</td></tr><tr><td>+ PONE + PE</td><td>0.335</td><td>0.348</td><td>0.373</td><td>0.366</td><td>0.515</td><td>0.530</td><td>0.118</td><td>0.117</td><td>0.219</td><td>0.186</td><td>0.181</td><td>0.155</td></tr><tr><td>+ USL-H</td><td>0.302</td><td>0.304</td><td>0.493</td><td>0.480</td><td>0.551</td><td>0.556</td><td>0.229</td><td>0.237</td><td>0.342</td><td>0.307</td><td>0.185</td><td>0.169</td></tr><tr><td>+ DEB</td><td>0.201</td><td>0.217</td><td>0.319</td><td>0.403</td><td>0.463</td><td>0.549</td><td>0.345</td><td>0.325</td><td>0.368</td><td>0.391</td><td>0.214</td><td>0.200</td></tr><tr><td>+ DEB + USL-H</td><td>0.219</td><td>0.270</td><td>0.347</td><td>0.465</td><td>0.484</td><td>0.547</td><td>0.339</td><td>0.311</td><td>0.377</td><td>0.395</td><td>0.225</td><td>0.215</td></tr><tr><td>USL-H</td><td>0.322</td><td>0.340</td><td>0.495</td><td>0.523</td><td>0.443</td><td>0.457</td><td>0.108*</td><td>0.093*</td><td>0.293</td><td>0.235</td><td>0.217</td><td>0.179</td></tr><tr><td>+ DEB</td><td>0.201</td><td>0.269</td><td>0.322</td><td>0.482</td><td>0.452</td><td>0.501</td><td>0.333</td><td>0.258</td><td>0.366</td><td>0.390</td><td>0.224</td><td>0.223</td></tr><tr><td>All-Metrics</td><td>0.459</td><td>0.473</td><td>0.522</td><td>0.534</td><td>0.566</td><td>0.561</td><td>0.163</td><td>0.149</td><td>0.366</td><td>0.349</td><td>0.288</td><td>0.268</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF12": { |
| "text": "Results of different combinations of metrics. All values are statistically significant to p < 0.05, unless marked by *. PE is the abbreviation of PredictiveEngage. The last row (All-Metrics), is the average of all the metrics.", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td>33</td></tr></table>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |