| { |
| "paper_id": "2019", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:37:27.498843Z" |
| }, |
| "title": "Emotional Neural Language Generation Grounded in Situational Contexts", |
| "authors": [ |
| { |
| "first": "Sashank", |
| "middle": [], |
| "last": "Santhanam", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of North Carolina at Charlotte Charlotte", |
| "location": { |
| "region": "NC", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Samira", |
| "middle": [], |
| "last": "Shaikh", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of North Carolina at Charlotte Charlotte", |
| "location": { |
| "region": "NC", |
| "country": "USA" |
| } |
| }, |
| "email": "samirashaikh@uncc.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Emotional language generation is one of the keys to human-like artificial intelligence. Humans use different type of emotions depending on the situation of the conversation. Emotions also play an important role in mediating the engagement level with conversational partners. However, current conversational agents do not effectively account for emotional content in the language generation process. To address this problem, we develop a language modeling approach that generates affective content when the dialogue is situated in a given context. We use the recently released Empathetic-Dialogues corpus to build our models. Through detailed experiments, we find that our approach outperforms the stateof-the-art method on the perplexity metric by about 5 points and achieves a higher BLEU metric score.", |
| "pdf_parse": { |
| "paper_id": "2019", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Emotional language generation is one of the keys to human-like artificial intelligence. Humans use different type of emotions depending on the situation of the conversation. Emotions also play an important role in mediating the engagement level with conversational partners. However, current conversational agents do not effectively account for emotional content in the language generation process. To address this problem, we develop a language modeling approach that generates affective content when the dialogue is situated in a given context. We use the recently released Empathetic-Dialogues corpus to build our models. Through detailed experiments, we find that our approach outperforms the stateof-the-art method on the perplexity metric by about 5 points and achieves a higher BLEU metric score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Rapid advancement in the field of generative modeling through the use of neural networks has helped advance the creation of more intelligent conversational agents. Traditionally these conversational agents are built using seq2seq framework that is widely used in the field of machine translation (Vinyals and Le, 2015) . However, prior research has shown that engaging with these agents produces dull and generic responses whilst also being inconsistent with the emotional tone of conversation (Vinyals and Le, 2015; Li et al., 2016c) . These issues also affect engagement with the conversational agent, that leads to short conversations (Venkatesh et al., 2018) . Apart from producing engaging responses, understanding the situation and producing the right emotional response to a that situation is another desirable trait (Rashkin et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 296, |
| "end": 318, |
| "text": "(Vinyals and Le, 2015)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 494, |
| "end": 516, |
| "text": "(Vinyals and Le, 2015;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 517, |
| "end": 534, |
| "text": "Li et al., 2016c)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 638, |
| "end": 662, |
| "text": "(Venkatesh et al., 2018)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 824, |
| "end": 846, |
| "text": "(Rashkin et al., 2019)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Emotions are intrinsic to humans and help in creation of a more engaging conversation (Poria et al., 2019) . Recent work has focused on approaches towards incorporating emotion in conversational agents (Asghar et al., 2018; Zhou et al., 2018; Ghosh et al., 2017) , however these approaches are focused towards seq2seq task. We approach this problem of emotional generation as a form of transfer learning, using large pretrained language models. These language models, including BERT, GPT-2 and XL-Net, have helped achieve state of the art across several natural language understanding tasks (Devlin et al., 2019; Radford et al., 2019; Yang et al., 2019 ). However, their success in language modeling tasks have been inconsistent (Ziegler et al., 2019) . In our approach, we use these pretrained language models as the base model and perform transfer learning to fine-tune and condition these models on a given emotion. This helps towards producing more emotionally relevant responses for a given situation. In contrast, the work done by Rashkin et al. (2019) also uses large pretrained models but their approach is from the perspective of seq2seq task.", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 106, |
| "text": "(Poria et al., 2019)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 202, |
| "end": 223, |
| "text": "(Asghar et al., 2018;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 224, |
| "end": 242, |
| "text": "Zhou et al., 2018;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 243, |
| "end": 262, |
| "text": "Ghosh et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 591, |
| "end": 612, |
| "text": "(Devlin et al., 2019;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 613, |
| "end": 634, |
| "text": "Radford et al., 2019;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 635, |
| "end": 652, |
| "text": "Yang et al., 2019", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 729, |
| "end": 751, |
| "text": "(Ziegler et al., 2019)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our work advances the field of conversational agents by applying the transfer learning approach towards generating emotionally relevant responses that is grounded on emotion and situational context. We find that our fine-tuning based approach outperforms the current state of the art approach on the automated metrics of the BLEU and perplexity. We also show that transfer learning approach helps produce well crafted responses on smaller dialogue corpus.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Consider the example show in Table 1 that shows a snippet of the conversation between a speaker and a listener that is grounded in a situation representing a type of emotion. Our goal is to pro-duce responses to conversation that are emotionally appropriate to the situation and emotion portrayed. We approach this problem through a lan-Emotion: Confident Situation: I just knew I was going to do well at work this morning.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 29, |
| "end": 36, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Speaker: I just knew I was going to do well at work this morning. I was prepared Listener: That is the way to go! Keep it up! Table 1 : Example of conversations between a speaker and a listener guage modeling approach. We use large pretrained language model as the base model for our response generation. This model is based on the transformer architecture and makes uses of the multi-headed self-attention mechanism to condition itself of the previously seen tokens to its left and produces a distribution over the target tokens. Our goal is to make the language model p(y) = p(y 1 , y 2 , ...., y t ; \u03b8) learn on new data and estimate the conditional probability p(y|x). Radford et al. (2019) demonstrated the effectiveness of language models to learn from a zero-shot approach in a multi-task setting. We take inspiration from this approach to condition our model on the task-specific variable p(y t |x, y <t ), where x is the task-specific variable, in this case the emotion label. We prepend the conditional variable (emotion, situational context) to the dialogue similar to the approach from Wolf et al (2019) . We ensure that that the sequences are separated by special tokens.", |
| "cite_spans": [ |
| { |
| "start": 1098, |
| "end": 1115, |
| "text": "Wolf et al (2019)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 126, |
| "end": 133, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In our experiments we use the Empathetic Dialogues dataset made available by Rashkin et al. (2019). Empathetic dialogues is crowdsourced dataset that contains dialogue grounded in a emotional situation. The dataset comprises of 32 emotion labels including surprised, excited, angry, proud, grateful. The speaker initiates the conversation using the grounded emotional situation and the listener responds in an appropriate manner 1 . ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In all our experiments, we use the GPT-2 pretrained language model. We use the publicly available model containing 117M parameters with 12 layers; each layer has 12 heads. We implemented our models using PyTorch Transformers. 2 The input sentences are tokenized using byte-pair encoding(BPE) (Sennrich et al., 2016 ) (vocabulary size of 50263). While decoding, we use the nucleus sampling (p = 0.9) approach instead of beam-search to overcome the drawbacks of beam search (Holtzman et al., 2019; Ippolito et al., 2019) . All our models are trained on a single TitanV GPU and takes around 2 hours to fine-tune the model. The fine-tuned models along with the configuration files and the code will be made available at: https://github.com/ sashank06/CCNLG-emotion.", |
| "cite_spans": [ |
| { |
| "start": 226, |
| "end": 227, |
| "text": "2", |
| "ref_id": null |
| }, |
| { |
| "start": 292, |
| "end": 314, |
| "text": "(Sennrich et al., 2016", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 472, |
| "end": 495, |
| "text": "(Holtzman et al., 2019;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 496, |
| "end": 518, |
| "text": "Ippolito et al., 2019)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Evaluating the quality of responses in open domain situations where the goal is not defined is an important area of research. Researchers have used methods such as BLEU , METEOR (Banerjee and Lavie, 2005), ROUGE (Lin, 2004) from machine translation and text summarization (Liu et al., 2016) tasks. BLEU and METEOR are based on word overlap between the proposed and ground truth responses; they do not adequately account for the diversity of responses that are possible for a given input utterance and show little to no correlation with human judgments (Liu et al., 2016) . We report on the BLEU (Papineni et al., 2002) and Perplexity (PPL) metric to provide a comparison with the current state-of-the-art methods. We also report our performance using other metrics such as length of responses produced by the model. Following, Mei et al (2017), we also report the diversity metric that helps us measure the ability of the model to promote diversity in responses (Li et al., 2016a) . Diversity is calculated as the as the number of distinct unigrams in the generation scaled by the total number of generated tokens (Mei et al., 2017; Li et al., 2016c) . We report on two additional automated metrics of readability and coherence. Readability quantifies the linguistic quality of text and the difficulty of the reader in understanding the text (Novikova et al., 2017) . We measure readability through the Flesch Reading Ease (FRE) (Kincaid et al., 1975) which computes the number of words, syllables and sentences in the text. Higher readability scores indicate that utterance is easier to read and comprehend. Similarly, coherence measures the ability of the dialogue system to produce responses consistent with the topic of conversation. To calculate coherence, we use the method proposed by .", |
| "cite_spans": [ |
| { |
| "start": 212, |
| "end": 223, |
| "text": "(Lin, 2004)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 272, |
| "end": 290, |
| "text": "(Liu et al., 2016)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 552, |
| "end": 570, |
| "text": "(Liu et al., 2016)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 595, |
| "end": 618, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 962, |
| "end": 980, |
| "text": "(Li et al., 2016a)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1114, |
| "end": 1132, |
| "text": "(Mei et al., 2017;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1133, |
| "end": 1150, |
| "text": "Li et al., 2016c)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1342, |
| "end": 1365, |
| "text": "(Novikova et al., 2017)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 1429, |
| "end": 1451, |
| "text": "(Kincaid et al., 1975)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Metrics", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We first compare the performance of our approach with the baseline results obtained from Rashkin et al. (2019) that uses a full transformer architecture (Vaswani et al., 2017) , consisting of an encoder and decoder. Table 3 provides a comparison of our approach with to the baseline approach.", |
| "cite_spans": [ |
| { |
| "start": 153, |
| "end": 175, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 216, |
| "end": 223, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Automated Metrics", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In Table 3 , we refer our \"Our Model Fine-Tuned\" as the baseline fine-tuned GPT-2 model trained on the dialogue and \"Our-model Emo-prepend\" as the GPT-2 model that is fine-tuned on the dialogues but also conditioned on the emotion displayed in the conversation. We find that fine-tuning the GPT-2 language model using a transfer learning approach helps us achieve a lower perplexity and a higher BLEU scores. The results from our approach are consistent with the empirical study conducted by Edunov et al (2019) that demonstrate the effectiveness of the using pre-trained model diminishes when added to the decoder network in an seq2seq approach. We also perform a comparison between our two models on the metrics of length, diversity, readability and coherence. We find that our baseline model produces less diverse responses compared to when the model is conditioned on emotion. We find that the our emoprepend model also higher a slightly higher readability score that our baseline model.", |
| "cite_spans": [ |
| { |
| "start": 492, |
| "end": 511, |
| "text": "Edunov et al (2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Automated Metrics", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "To assess the quality of generations, we conducted a MTurk human evaluation. We recruited a total of 15 participants and each participant was asked to evaluate 25 randomly sampled outputs from the test set on three metrics: 1. Readability -Is the response easy to understand, fluent and grammatical and does not have any consecutive repeating words. 2. Coherence -Is the response relevant to the context of the conversation. 3. Emotional Appropriateness-Does the response convey emotion suitable to the context of the conversation? Table 5 shows the results obtained from the human evaluation comparing the performance of our fine-tuned, emotion pre-pend model to the groundtruth response. We find that our fine-tuned model outperforms the emo-prepend on all three metrics from the ratings provided by the human ratings.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 532, |
| "end": 539, |
| "text": "Table 5", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Qualitative Evaluation", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The area of dialogue systems has been studied extensively in both open-domain (Niu and Bansal, 2018) and goal-oriented (Lipton et al., 2018) situations. Extant approaches towards building dialogue systems has been done predominantly through the seq2seq framework (Vinyals and Le, 2015) . However, prior research has shown that these systems are prone to producing dull and generic responses that causes engagement with the human to be affected (Vinyals and Le, 2015; Venkatesh et al., 2018) . Researchers have tackled this problem of dull and generic responses through different optimization function such as MMI (Li et al., 2016b) and through reinforcement learning approaches (Li et al., 2016d) . Alternative approaches towards generating more engaging responses is by grounding them in personality of the speakers that enables in creating more personalized and consistent responses (Li et al., 2016c; Wolf et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 78, |
| "end": 100, |
| "text": "(Niu and Bansal, 2018)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 119, |
| "end": 140, |
| "text": "(Lipton et al., 2018)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 263, |
| "end": 285, |
| "text": "(Vinyals and Le, 2015)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 444, |
| "end": 466, |
| "text": "(Vinyals and Le, 2015;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 467, |
| "end": 490, |
| "text": "Venkatesh et al., 2018)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 613, |
| "end": 631, |
| "text": "(Li et al., 2016b)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 678, |
| "end": 696, |
| "text": "(Li et al., 2016d)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 885, |
| "end": 903, |
| "text": "(Li et al., 2016c;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 904, |
| "end": 922, |
| "text": "Wolf et al., 2019)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Several other works have focused on creating more engaging responses by producing affective responses. One of the earlier works to incorporate affect through language modeling is the work done by Ghosh et al. (Ghosh et al., 2017) . This work leverages the LIWC (Pennebaker et al., 2001 ) text analysis platform for affective features. Alternative approaches of inducing emotion in generated responses from a seq2seq framework include the work done by Zhou et al(2018) that uses internal and external memory, Asghar et al. (2018) (Vaswani et al., 2017) . These trans-formers models have also helped created large pretrained language models such as BERT (Devlin et al., 2019) , XL-NET (Yang et al., 2019) , GPT-2 (Radford et al., 2019) . However, these pre-trained models show inconsistent behavior towards language generation (Ziegler et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 196, |
| "end": 229, |
| "text": "Ghosh et al. (Ghosh et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 261, |
| "end": 285, |
| "text": "(Pennebaker et al., 2001", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 451, |
| "end": 467, |
| "text": "Zhou et al(2018)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 508, |
| "end": 528, |
| "text": "Asghar et al. (2018)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 529, |
| "end": 551, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 652, |
| "end": 673, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 683, |
| "end": 702, |
| "text": "(Yang et al., 2019)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 705, |
| "end": 733, |
| "text": "GPT-2 (Radford et al., 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 825, |
| "end": 847, |
| "text": "(Ziegler et al., 2019)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this work, we study how pre-trained language models can be adopted for conditional language generation on smaller datasets. Specifically, we look at conditioning the pre-trained model on the emotion of the situation produce more affective responses that are appropriate for a particular situation. We notice that our fine-tuned and emoprepend models outperform the current state of the art approach relative to the automated metrics such as BLEU and perplexity on the validation set. We also notice that the emo-prepend approach does not out perform a simple fine tuning approach on the dataset. We plan to investigate the cause of this in future work from the perspective of better experiment design for evaluation (Santhanam and Shaikh, 2019) and analyzing the models focus when emotion is prepended to the sequence (Clark et al., 2019) . Along with this, we also notice other drawbacks in our work such as not having an emotional classifier to predict the outcome of the generated sentence, which we plan to address in future work.", |
| "cite_spans": [ |
| { |
| "start": 719, |
| "end": 747, |
| "text": "(Santhanam and Shaikh, 2019)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 821, |
| "end": 841, |
| "text": "(Clark et al., 2019)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "More information about the dataset made available on the(Rashkin et al., 2019)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/huggingface/ pytorch-transformers", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by the Defense Advanced Research Projects Agency (DARPA) under Contract No FA8650-18-C-7881. All statements of fact, opinion or conclusions contained herein are those of the authors and should not be construed as representing the official views or policies of AFRL, DARPA, or the U.S. Government. We thank the anonymous reviewers for the helpful feedback.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Affective neural response generation", |
| "authors": [ |
| { |
| "first": "Nabiha", |
| "middle": [], |
| "last": "Asghar", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Poupart", |
| "suffix": "" |
| }, |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Hoey", |
| "suffix": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lili", |
| "middle": [], |
| "last": "Mou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "European Conference on Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "154--166", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nabiha Asghar, Pascal Poupart, Jesse Hoey, Xin Jiang, and Lili Mou. 2018. Affective neural response gen- eration. In European Conference on Information Retrieval, pages 154-166. Springer.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Meteor: An automatic metric for mt evaluation with improved correlation with human judgments", |
| "authors": [ |
| { |
| "first": "Satanjeev", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the acl workshop on intrinsic and extrinsic evaluation measures for machine translation and/or summarization", |
| "volume": "", |
| "issue": "", |
| "pages": "65--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. Meteor: An automatic metric for mt evaluation with improved correlation with human judgments. In Proceedings of the acl workshop on intrinsic and extrinsic evalu- ation measures for machine translation and/or sum- marization, pages 65-72.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "What does bert look at? an analysis of bert's attention", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Urvashi", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.04341" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D Manning. 2019. What does bert look at? an analysis of bert's attention. arXiv preprint arXiv:1906.04341.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Augmenting neural response generation with context-aware topical attention", |
| "authors": [ |
| { |
| "first": "Nouha", |
| "middle": [], |
| "last": "Dziri", |
| "suffix": "" |
| }, |
| { |
| "first": "Ehsan", |
| "middle": [], |
| "last": "Kamalloo", |
| "suffix": "" |
| }, |
| { |
| "first": "Kory", |
| "middle": [ |
| "W" |
| ], |
| "last": "Mathewson", |
| "suffix": "" |
| }, |
| { |
| "first": "Osmar", |
| "middle": [], |
| "last": "Zaiane", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1811.01063" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nouha Dziri, Ehsan Kamalloo, Kory W Mathewson, and Osmar Zaiane. 2018. Augmenting neural re- sponse generation with context-aware topical atten- tion. arXiv preprint arXiv:1811.01063.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Pre-trained language model representations for language generation", |
| "authors": [ |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Edunov", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexei", |
| "middle": [], |
| "last": "Baevski", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4052--4059", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1409" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sergey Edunov, Alexei Baevski, and Michael Auli. 2019. Pre-trained language model representations for language generation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 1 (Long and Short Papers), pages 4052-4059, Minneapolis, Min- nesota. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Affect-LM: A neural language model for customizable affective text generation", |
| "authors": [ |
| { |
| "first": "Sayan", |
| "middle": [], |
| "last": "Ghosh", |
| "suffix": "" |
| }, |
| { |
| "first": "Mathieu", |
| "middle": [], |
| "last": "Chollet", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Laksana", |
| "suffix": "" |
| }, |
| { |
| "first": "Louis-Philippe", |
| "middle": [], |
| "last": "Morency", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Scherer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "634--642", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1059" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sayan Ghosh, Mathieu Chollet, Eugene Laksana, Louis-Philippe Morency, and Stefan Scherer. 2017. Affect-LM: A neural language model for customiz- able affective text generation. In Proceedings of the 55th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 634-642, Vancouver, Canada. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "The curious case of neural text degeneration", |
| "authors": [ |
| { |
| "first": "Ari", |
| "middle": [], |
| "last": "Holtzman", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Buys", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxwell", |
| "middle": [], |
| "last": "Forbes", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1904.09751" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ari Holtzman, Jan Buys, Maxwell Forbes, and Yejin Choi. 2019. The curious case of neural text degen- eration. arXiv preprint arXiv:1904.09751.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Automatic dialogue generation with expressed emotions", |
| "authors": [ |
| { |
| "first": "Chenyang", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Osmar", |
| "middle": [], |
| "last": "Zaiane", |
| "suffix": "" |
| }, |
| { |
| "first": "Amine", |
| "middle": [], |
| "last": "Trabelsi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nouha", |
| "middle": [], |
| "last": "Dziri", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "49--54", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chenyang Huang, Osmar Zaiane, Amine Trabelsi, and Nouha Dziri. 2018. Automatic dialogue genera- tion with expressed emotions. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, Volume 2 (Short Pa- pers), pages 49-54.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Comparison of diverse decoding methods from conditional language models", |
| "authors": [ |
| { |
| "first": "Daphne", |
| "middle": [], |
| "last": "Ippolito", |
| "suffix": "" |
| }, |
| { |
| "first": "Reno", |
| "middle": [], |
| "last": "Kriz", |
| "suffix": "" |
| }, |
| { |
| "first": "Joao", |
| "middle": [], |
| "last": "Sedoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Kustikova", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3752--3762", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daphne Ippolito, Reno Kriz, Joao Sedoc, Maria Kustikova, and Chris Callison-Burch. 2019. Com- parison of diverse decoding methods from condi- tional language models. In Proceedings of the 57th Annual Meeting of the Association for Computa- tional Linguistics, pages 3752-3762, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Derivation of new readability formulas (automated readability index, fog count and flesch reading ease formula) for navy enlisted personnel", |
| "authors": [ |
| { |
| "first": "Robert P Fishburne", |
| "middle": [], |
| "last": "Peter Kincaid", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [ |
| "L" |
| ], |
| "last": "Jr", |
| "suffix": "" |
| }, |
| { |
| "first": "Brad", |
| "middle": [ |
| "S" |
| ], |
| "last": "Rogers", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chissom", |
| "suffix": "" |
| } |
| ], |
| "year": 1975, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J Peter Kincaid, Robert P Fishburne Jr, Richard L Rogers, and Brad S Chissom. 1975. Derivation of new readability formulas (automated readability in- dex, fog count and flesch reading ease formula) for navy enlisted personnel.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A diversity-promoting objective function for neural conversation models", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "110--119", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N16-1014" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016a. A diversity-promoting ob- jective function for neural conversation models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 110-119. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A diversity-promoting objective function for neural conversation models", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "110--119", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N16-1014" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016b. A diversity-promoting ob- jective function for neural conversation models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 110-119, San Diego, California. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "A persona-based neural conversation model", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgios", |
| "middle": [], |
| "last": "Spithourakis", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "994--1003", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1094" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Georgios Sp- ithourakis, Jianfeng Gao, and Bill Dolan. 2016c. A persona-based neural conversation model. In Pro- ceedings of the 54th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), pages 994-1003, Berlin, Germany. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Deep reinforcement learning for dialogue generation", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Monroe", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1192--1202", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1127" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Will Monroe, Alan Ritter, Dan Jurafsky, Michel Galley, and Jianfeng Gao. 2016d. Deep rein- forcement learning for dialogue generation. In Pro- ceedings of the 2016 Conference on Empirical Meth- ods in Natural Language Processing, pages 1192- 1202. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Rouge: A package for automatic evaluation of summaries. Text Summarization Branches Out", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin. 2004. Rouge: A package for auto- matic evaluation of summaries. Text Summarization Branches Out.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Bbq-networks: Efficient exploration in deep reinforcement learning for task-oriented dialogue systems", |
| "authors": [ |
| { |
| "first": "Zachary", |
| "middle": [], |
| "last": "Lipton", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiujun", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Lihong", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Faisal", |
| "middle": [], |
| "last": "Ahmed", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zachary Lipton, Xiujun Li, Jianfeng Gao, Lihong Li, Faisal Ahmed, and Li Deng. 2018. Bbq-networks: Efficient exploration in deep reinforcement learn- ing for task-oriented dialogue systems. In Thirty- Second AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "How not to evaluate your dialogue system: An empirical study of unsupervised evaluation metrics for dialogue response generation", |
| "authors": [ |
| { |
| "first": "Chia-Wei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| }, |
| { |
| "first": "Iulian", |
| "middle": [], |
| "last": "Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Noseworthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Charlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2122--2132", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1230" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chia-Wei Liu, Ryan Lowe, Iulian Serban, Mike Nose- worthy, Laurent Charlin, and Joelle Pineau. 2016. How not to evaluate your dialogue system: An em- pirical study of unsupervised evaluation metrics for dialogue response generation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2122-2132. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Coherent dialogue with attention-based language models", |
| "authors": [ |
| { |
| "first": "Hongyuan", |
| "middle": [], |
| "last": "Mei", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [ |
| "R" |
| ], |
| "last": "Walter", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the National Conference on Artificial Intelligence (AAAI)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hongyuan Mei, Mohit Bansal, and Matthew R. Walter. 2017. Coherent dialogue with attention-based lan- guage models. In Proceedings of the National Con- ference on Artificial Intelligence (AAAI), San Fran- cisco, CA.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Polite dialogue generation without parallel data", |
| "authors": [ |
| { |
| "first": "Tong", |
| "middle": [], |
| "last": "Niu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association of Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "373--389", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tong Niu and Mohit Bansal. 2018. Polite dialogue gen- eration without parallel data. Transactions of the As- sociation of Computational Linguistics, 6:373-389.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Why we need new evaluation metrics for NLG", |
| "authors": [ |
| { |
| "first": "Jekaterina", |
| "middle": [], |
| "last": "Novikova", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Du\u0161ek", |
| "suffix": "" |
| }, |
| { |
| "first": "Amanda", |
| "middle": [ |
| "Cercas" |
| ], |
| "last": "Curry", |
| "suffix": "" |
| }, |
| { |
| "first": "Verena", |
| "middle": [], |
| "last": "Rieser", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2241--2252", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1238" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jekaterina Novikova, Ond\u0159ej Du\u0161ek, Amanda Cer- cas Curry, and Verena Rieser. 2017. Why we need new evaluation metrics for NLG. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2241-2252, Copenhagen, Denmark. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th annual meeting on association for computational linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th annual meeting on association for compu- tational linguistics, pages 311-318. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Linguistic inquiry and word count: Liwc", |
| "authors": [ |
| { |
| "first": "Martha", |
| "middle": [ |
| "E" |
| ], |
| "last": "James W Pennebaker", |
| "suffix": "" |
| }, |
| { |
| "first": "Roger J", |
| "middle": [], |
| "last": "Francis", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Booth", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Mahway: Lawrence Erlbaum Associates", |
| "volume": "71", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James W Pennebaker, Martha E Francis, and Roger J Booth. 2001. Linguistic inquiry and word count: Liwc 2001. Mahway: Lawrence Erlbaum Asso- ciates, 71(2001):2001.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Emotion recognition in conversation: Research challenges, datasets, and recent advances", |
| "authors": [ |
| { |
| "first": "Soujanya", |
| "middle": [], |
| "last": "Poria", |
| "suffix": "" |
| }, |
| { |
| "first": "Navonil", |
| "middle": [], |
| "last": "Majumder", |
| "suffix": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1905.02947" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Soujanya Poria, Navonil Majumder, Rada Mihalcea, and Eduard Hovy. 2019. Emotion recognition in conversation: Research challenges, datasets, and re- cent advances. arXiv preprint arXiv:1905.02947.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Language models are unsupervised multitask learners", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "OpenAI Blog", |
| "volume": "", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI Blog, 1(8).", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Towards empathetic opendomain conversation models: a new benchmark and dataset", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [ |
| "Michael" |
| ], |
| "last": "Hannah Rashkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Margaret", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Y-Lan", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Boureau", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hannah Rashkin, Eric Michael Smith, Margaret Li, and Y-Lan Boureau. 2019. Towards empathetic open- domain conversation models: a new benchmark and dataset. In ACL.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Towards best experiment design for evaluating dialogue system output", |
| "authors": [ |
| { |
| "first": "Sashank", |
| "middle": [], |
| "last": "Santhanam", |
| "suffix": "" |
| }, |
| { |
| "first": "Samira", |
| "middle": [], |
| "last": "Shaikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.10122" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sashank Santhanam and Samira Shaikh. 2019. Towards best experiment design for evaluat- ing dialogue system output. arXiv preprint arXiv:1909.10122.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Neural machine translation of rare words with subword units", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1715--1725", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P16-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715- 1725, Berlin, Germany. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "On evaluating and comparing conversational agents", |
| "authors": [ |
| { |
| "first": "Anu", |
| "middle": [], |
| "last": "Venkatesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Chandra", |
| "middle": [], |
| "last": "Khatri", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashwin", |
| "middle": [], |
| "last": "Ram", |
| "suffix": "" |
| }, |
| { |
| "first": "Fenfei", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| }, |
| { |
| "first": "Raefer", |
| "middle": [], |
| "last": "Gabriel", |
| "suffix": "" |
| }, |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Nagar", |
| "suffix": "" |
| }, |
| { |
| "first": "Rohit", |
| "middle": [], |
| "last": "Prasad", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Behnam", |
| "middle": [], |
| "last": "Hedayatnia", |
| "suffix": "" |
| }, |
| { |
| "first": "Angeliki", |
| "middle": [], |
| "last": "Metallinou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1801.03625" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anu Venkatesh, Chandra Khatri, Ashwin Ram, Fen- fei Guo, Raefer Gabriel, Ashish Nagar, Rohit Prasad, Ming Cheng, Behnam Hedayatnia, Ange- liki Metallinou, et al. 2018. On evaluating and comparing conversational agents. arXiv preprint arXiv:1801.03625.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "A neural conversational model", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1506.05869" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals and Quoc Le. 2015. A neural conversa- tional model. arXiv preprint arXiv:1506.05869.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Transfertransfo: A transfer learning approach for neural network based conversational agents", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1901.08149" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Victor Sanh, Julien Chaumond, and Clement Delangue. 2019. Transfertransfo: A transfer learning approach for neural network based conversational agents. arXiv preprint arXiv:1901.08149.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1906.08237" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Ruslan Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretrain- ing for language understanding. arXiv preprint arXiv:1906.08237.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Personalizing dialogue agents: I have a dog, do you have pets too?", |
| "authors": [ |
| { |
| "first": "Saizheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Dinan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jack", |
| "middle": [], |
| "last": "Urbanek", |
| "suffix": "" |
| }, |
| { |
| "first": "Arthur", |
| "middle": [], |
| "last": "Szlam", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "2204--2213", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1205" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saizheng Zhang, Emily Dinan, Jack Urbanek, Arthur Szlam, Douwe Kiela, and Jason Weston. 2018. Per- sonalizing dialogue agents: I have a dog, do you have pets too? In Proceedings of the 56th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2204- 2213, Melbourne, Australia. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Emotional chatting machine: Emotional conversation generation with internal and external memory", |
| "authors": [ |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Minlie", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tianyang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hao Zhou, Minlie Huang, Tianyang Zhang, Xiaoyan Zhu, and Bing Liu. 2018. Emotional chatting ma- chine: Emotional conversation generation with in- ternal and external memory. In Thirty-Second AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Encoderagnostic adaptation for conditional language generation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Zachary", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Ziegler", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Melas-Kyriazi", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander M", |
| "middle": [], |
| "last": "Gehrmann", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1908.06938" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zachary M Ziegler, Luke Melas-Kyriazi, Sebastian Gehrmann, and Alexander M Rush. 2019. Encoder- agnostic adaptation for conditional language gener- ation. arXiv preprint arXiv:1908.06938.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "html": null, |
| "text": "provides the basic statistics of the corpus.", |
| "content": "<table><tr><td/><td colspan=\"2\">Train Valid.</td><td>Test</td></tr><tr><td colspan=\"2\">Num. Conversations 19433</td><td>2770</td><td>2547</td></tr><tr><td>Utterances</td><td colspan=\"3\">84324 12078 10973</td></tr><tr><td>Avg Length Conversations</td><td>4.31</td><td>4.36</td><td>4.31</td></tr></table>", |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF1": { |
| "html": null, |
| "text": "Statistics of Empathetic Dialogue dataset used in our experiments", |
| "content": "<table/>", |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF2": { |
| "html": null, |
| "text": "that models emotion through af-", |
| "content": "<table><tr><td>Experiment</td><td>Valid PPL</td><td colspan=\"5\">BLEU Readability Coherence Length Diversity</td></tr><tr><td>Baseline</td><td/><td/><td/><td/><td/><td/></tr><tr><td>Fine-Tuned</td><td>21.24</td><td>6.27</td><td>x</td><td>x</td><td>x</td><td>x</td></tr><tr><td>(Rashkin et al., 2019)</td><td/><td/><td/><td/><td/><td/></tr><tr><td>Baseline</td><td/><td/><td/><td/><td/><td/></tr><tr><td>Emo-prepend</td><td>24.30</td><td>4.36</td><td>x</td><td>x</td><td>x</td><td>x</td></tr><tr><td>(Rashkin et al., 2019)</td><td/><td/><td/><td/><td/><td/></tr><tr><td>Our Model Fine-Tuned</td><td>18.32</td><td>7.71</td><td>0.78</td><td>0.93</td><td>9.77</td><td>0.0031</td></tr><tr><td>Our Model Emo-prepend</td><td>19.49</td><td>7.78</td><td>0.79</td><td>0.93</td><td>9.71</td><td>0.0033</td></tr></table>", |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF3": { |
| "html": null, |
| "text": "Comparison of the performance of our model to the baseline model proposed by Rashkin et al (2019) across a variety of automated metrics to provide a thorough comparison. x indicates that these metrics were not provided in the Rashkin et al (2019) work.", |
| "content": "<table><tr><td>Input Conversation</td><td>Model</td><td>Outputs</td></tr><tr><td>Person A: I think I aced my supervisor</td><td>Ground Truth</td><td>Woo hoo! That is awesome! Congrats! When will you find out</td></tr><tr><td>exam!</td><td>Fine Tuned</td><td>What was it?</td></tr><tr><td/><td>Emo Prepend</td><td>Wow, that's impressive!</td></tr><tr><td>Person A: I had pizza at a restaurant a couple days ago and found a hair on my</td><td>Ground Truth</td><td>It is disgusting. What did you do?</td></tr><tr><td>pizza!</td><td>Fine Tuned</td><td>I was so angry</td></tr><tr><td/><td>Emo Prepend</td><td>That's terrible, did you clean it up?</td></tr></table>", |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF4": { |
| "html": null, |
| "text": "Example generations from our two model along with the ground truth responses.", |
| "content": "<table><tr><td/><td/><td/><td>Emotional</td></tr><tr><td/><td colspan=\"2\">Readability Coherence</td><td>Appropriate-</td></tr><tr><td/><td/><td/><td>ness</td></tr><tr><td>Our Model Fine-Tuned</td><td>4.14</td><td>3.50</td><td>3.70</td></tr><tr><td>Our Model Emo-prepend</td><td>3.54</td><td>3.4</td><td>3.19</td></tr><tr><td>Ground Truth</td><td>3.92</td><td>3.86</td><td>4</td></tr></table>", |
| "type_str": "table", |
| "num": null |
| }, |
| "TABREF5": { |
| "html": null, |
| "text": "", |
| "content": "<table><tr><td>: Human ratings demonstrating a comparison</td></tr><tr><td>between our models to the ground truth responses on</td></tr><tr><td>the metrics of readability, coherence and emotional ap-</td></tr><tr><td>propriateness</td></tr><tr><td>fective embeddings and Huang et al (2018) that</td></tr><tr><td>induce emotion through concatenation with input</td></tr><tr><td>sequence. More recently, introduction of trans-</td></tr><tr><td>former based approaches have helped advance the</td></tr><tr><td>state of art across several natural language under-</td></tr><tr><td>standing tasks</td></tr></table>", |
| "type_str": "table", |
| "num": null |
| } |
| } |
| } |
| } |