| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:31:33.089876Z" |
| }, |
| "title": "Towards Low-Resource Real-Time Assessment of Empathy in Counselling", |
| "authors": [ |
| { |
| "first": "Zixiu", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Cagliari", |
| "location": {} |
| }, |
| "email": "zixiu.wu@philips.com" |
| }, |
| { |
| "first": "Diego", |
| "middle": [], |
| "last": "Reforgiato", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Cagliari", |
| "location": {} |
| }, |
| "email": "diego.reforgiato@unica.it" |
| }, |
| { |
| "first": "Daniele", |
| "middle": [], |
| "last": "Riboni", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Cagliari", |
| "location": {} |
| }, |
| "email": "riboni@unica.it" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Gauging therapist empathy in counselling is an important component of understanding counselling quality. While session-level empathy assessment based on machine learning has been investigated extensively, it relies on relatively large amounts of well-annotated dialogue data, and real-time evaluation has been overlooked in the past. In this paper, we focus on the task of low-resource utterance-level binary empathy assessment. We train deep learning models on heuristically constructed empathy vs. non-empathy contrast in general conversations, and apply the models directly to therapeutic dialogues, assuming correlation between empathy manifested in those two domains. We show that such training yields poor performance in general, probe its causes, and examine the actual effect of learning from empathy contrast in general conversation. r/offmychest ... Speaker: being married to a depressed person is so lonely. that is all. thanks for listening. Listener: sorry to say this but it 's not worth being in a relationship if both of you are n't happy. r/CasualConversation ... Speaker: can you recommend me some good music ? i 've decided to expand my taste in music and i need some advice. what are your favourite songs , redditors ? Listener: something corporate , jimmy eat world , and fall out boy are my top three recommendations. RolePlayMI ... Client: well I guess if I don't want to take more pills I have to give up some of my sweets my cookies and my potato chips Therapist: so those types of sweets and crunchy stuff and salty stuff is is pretty important to you Ground Truth: Empathetic RolePlayMI ... Client: I do all of the above yeah Therapist: So you know the smoking is going be related to not just to some of the other negative consequences you know physically it it effects dental carries, dental cavities", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Gauging therapist empathy in counselling is an important component of understanding counselling quality. While session-level empathy assessment based on machine learning has been investigated extensively, it relies on relatively large amounts of well-annotated dialogue data, and real-time evaluation has been overlooked in the past. In this paper, we focus on the task of low-resource utterance-level binary empathy assessment. We train deep learning models on heuristically constructed empathy vs. non-empathy contrast in general conversations, and apply the models directly to therapeutic dialogues, assuming correlation between empathy manifested in those two domains. We show that such training yields poor performance in general, probe its causes, and examine the actual effect of learning from empathy contrast in general conversation. r/offmychest ... Speaker: being married to a depressed person is so lonely. that is all. thanks for listening. Listener: sorry to say this but it 's not worth being in a relationship if both of you are n't happy. r/CasualConversation ... Speaker: can you recommend me some good music ? i 've decided to expand my taste in music and i need some advice. what are your favourite songs , redditors ? Listener: something corporate , jimmy eat world , and fall out boy are my top three recommendations. RolePlayMI ... Client: well I guess if I don't want to take more pills I have to give up some of my sweets my cookies and my potato chips Therapist: so those types of sweets and crunchy stuff and salty stuff is is pretty important to you Ground Truth: Empathetic RolePlayMI ... Client: I do all of the above yeah Therapist: So you know the smoking is going be related to not just to some of the other negative consequences you know physically it it effects dental carries, dental cavities", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "As a pillar of psychotherapy, empathy is crucial to effective counselling, owing to its importance in building counsellor 1 -client rapport (Elliott et al., 2011) that can enable more effective interventions and better outcomes (McCambridge et al., 2011; Gaume et al., 2009) . In particular, \"listening with empathy\" is considered a guiding principle (Rollnick et al., 2008) for motivational interviewing (Miller and Rollnick, 2012) (MI), a psychotherapeutic approach widely adopted to elicit positive behaviour change by evoking motivation from clients. Gauging counsellor-side empathy is, therefore, essential to assessing MI integrity (Moyers et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 140, |
| "end": 162, |
| "text": "(Elliott et al., 2011)", |
| "ref_id": null |
| }, |
| { |
| "start": 228, |
| "end": 254, |
| "text": "(McCambridge et al., 2011;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 255, |
| "end": 274, |
| "text": "Gaume et al., 2009)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 351, |
| "end": 374, |
| "text": "(Rollnick et al., 2008)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 638, |
| "end": 659, |
| "text": "(Moyers et al., 2016)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Empathy assessment for MI has conventionally been conducted manually by trained annotators, which requires extensive annotator training and transcript review. Since such a time-consuming and costly setup is difficult to scale up, recent years have seen attempts of automating the process with machine learning, including transcriptbased (Xiao et al., 2012; Gibson et al., 2015 , speech-based (Xiao et al., 2014 (Xiao et al., , 2015 , and multimodal (Xiao et al., 2016b) methods. Those works are, however, limited in that 1) therapist empathy is only assessed at session-level rather than utterance-level; 2) classical machine learning with heuristic feature engineering is used, while recent deep-learning frameworks have not been utilised for this purpose; 3) the machine-learning-based approaches all assume access to privately-owned sizeable corpora of therapeutic dialogues with empathy annotation at session level, but in reality such well-annotated data are often very limited, even more so at utterance level; and 4) the link between empathy manifested in general conversation and in MI counselling remains unexplored.", |
| "cite_spans": [ |
| { |
| "start": 337, |
| "end": 356, |
| "text": "(Xiao et al., 2012;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 357, |
| "end": 376, |
| "text": "Gibson et al., 2015", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 392, |
| "end": 410, |
| "text": "(Xiao et al., 2014", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 411, |
| "end": 431, |
| "text": "(Xiao et al., , 2015", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 449, |
| "end": 469, |
| "text": "(Xiao et al., 2016b)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we make the first attempt (to the best of our knowledge) at addressing those limitations while probing the correlation between empathy manifestations in different domains. Specifically, we employ pre-trained language models such as BERT (Devlin et al., 2019) for text-based binary classification of utterance-level therapist empathy, optionally taking the conversation context as input. We consider any counsellor utterance to be empathetic if it shows empathy, and non-empathetic if it does not (ranging from neutral to apathetic). Our models have no access to counselling conversations during their training and validation, as we experiment with learning from contrast of empathy vs. non-empathy in out-of-domain (OOD) training data. To that end, we leverage publicly available datasets of general conversations with heuristic empathy labels (Rashkin et al., 2019; Zhong et al., 2020) for OOD training, investigating the connections between general-conversational empathy and therapeutic empathy, as illustrated in Figure 1 .", |
| "cite_spans": [ |
| { |
| "start": 251, |
| "end": 272, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 858, |
| "end": 880, |
| "text": "(Rashkin et al., 2019;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 881, |
| "end": 900, |
| "text": "Zhong et al., 2020)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1031, |
| "end": 1039, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To benchmark the models, we manually anno- Figure 1 : Training a binary empathy classifier on heuristically constructed empathetic vs. non-empathetic utterances in general conversations (i.e. out-of-domain w.r.t. MI), and then testing it on MI conversations. In this case, the empathy contrast for training is r/OffMyChest vs. r/CasualConversation. The classifier can take only the listener/therapist utterance (bold) as input or additionally use the preceding speaker/client utterance (italic).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 43, |
| "end": 51, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "tated utterance-level empathy for a subset of transcribed high-vs. low-quality counselling demonstrations (P\u00e9rez-Rosas et al., 2019) that are publicly available. We also build unsupervised baselines for the task by a) formulating binary empathy classification as natural language inference (NLI), as proposed by Yin et al. (2019) , and b) tackling the surrogate task of client-counsellor agreement via NLI, under the assumption that an empathetic reply from the counsellor tends to show accordance with the client utterance in the preceding turn. Our experiments show that models trained on OOD empathy contrast are not sufficiently accurate predictors of MI empathy/non-empathy, even though the benefit of such training can be observed when compared to training on OOD data without empathy contrast. Upon probing, we argue that more fine-grained (e.g. sentence-level) empathy annotation and prediction could yield better results.", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 132, |
| "text": "(P\u00e9rez-Rosas et al., 2019)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 312, |
| "end": 329, |
| "text": "Yin et al. (2019)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Prior work has approached assessment of empathy in MI delivery via speech and linguistic features. Among text-based methods, Xiao et al. (2012) proposed one of the earliest approaches for utterance-level empathy classification using an n-gram language model. Psycholinguistic norm features are used in addition to other linguistic features in the work of (Gibson et al., 2015) . More recently, utilised long shortterm memory networks (LSTMs) (Hochreiter and Schmidhuber, 1997) to generate turn-level behavioural acts that are further processed by a deep neural network to predict session-level empathy.", |
| "cite_spans": [ |
| { |
| "start": 125, |
| "end": 143, |
| "text": "Xiao et al. (2012)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 355, |
| "end": 376, |
| "text": "(Gibson et al., 2015)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 442, |
| "end": 476, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine-Learning-Based Approaches to Empathy Analysis for MI", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Speech features have also been examined. Xiao et al. (2014) investigated features such as jitter and shimmer from speech signals, Xiao et al. (2015) studied speech rate entrainment, while P\u00e9rez-Rosas et al. (2017) used an array of acoustic and linguistic features to train their multimodal models.", |
| "cite_spans": [ |
| { |
| "start": 41, |
| "end": 59, |
| "text": "Xiao et al. (2014)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 130, |
| "end": 148, |
| "text": "Xiao et al. (2015)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 188, |
| "end": 213, |
| "text": "P\u00e9rez-Rosas et al. (2017)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine-Learning-Based Approaches to Empathy Analysis for MI", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "There are also a number of recent studies on datadriven MI behaviour coding based on text (Cao et al., 2019; Tanana et al., 2016; Xiao et al., 2016a; Gibson et al., 2018) , speech (Singla et al., 2020) , and both (Chen et al., 2019; Flemotomos et al., 2021) , but they are less relevant to this work due to their lack of explicit empathy modelling.", |
| "cite_spans": [ |
| { |
| "start": 90, |
| "end": 108, |
| "text": "(Cao et al., 2019;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 109, |
| "end": 129, |
| "text": "Tanana et al., 2016;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 130, |
| "end": 149, |
| "text": "Xiao et al., 2016a;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 150, |
| "end": 170, |
| "text": "Gibson et al., 2018)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 180, |
| "end": 201, |
| "text": "(Singla et al., 2020)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 213, |
| "end": 232, |
| "text": "(Chen et al., 2019;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 233, |
| "end": 257, |
| "text": "Flemotomos et al., 2021)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine-Learning-Based Approaches to Empathy Analysis for MI", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Different from the research listed above, this work addresses utterance-level empathy classification instead of session-level assessment, similar to Wu et al. (2020) which proposes utterance-level prediction of whether the therapist needs to show empathy given the context.", |
| "cite_spans": [ |
| { |
| "start": 149, |
| "end": 165, |
| "text": "Wu et al. (2020)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Machine-Learning-Based Approaches to Empathy Analysis for MI", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Recent years have witnessed a boom of research on data-driven analysis and application of empathy in general conversations. In terms of empathy analysis for open-domain conversations, Zhou et al. (2021) addressed scoring empathy grounded in specific situations, Welivita and Pu (2020) created a taxonomy of empathetic response intents in social dialogues, while Guda et al. 2021proposed to take user demographic information into account for empathy prediction.", |
| "cite_spans": [ |
| { |
| "start": 184, |
| "end": 202, |
| "text": "Zhou et al. (2021)", |
| "ref_id": "BIBREF43" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data-Driven Text-Based Research on Empathy in General Conversation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "As therapeutic conversation data is scarce, recent works on empathy analysis have also turned to peersupport dialogues from online communities. Zhou and Jurgens (2020) analysed Reddit 2 conversations for the relationships between condolence, distress and empathy, Hosseini and Caragea (2021) studied empathy seeking and providing with dialogues from a cancer survivor network, and Sharma et al. (2020) proposed an empathy framework of reactioninterpretation-exploration for conversations from mental-health-related online forums.", |
| "cite_spans": [ |
| { |
| "start": 381, |
| "end": 401, |
| "text": "Sharma et al. (2020)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data-Driven Text-Based Research on Empathy in General Conversation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "While early general empathetic chatbots (Zhou and Wang, 2018; Lubis et al., 2018) were mostly based on recurrent neural networks and produced emotion-conditioned output, their more recent counterparts are predominantly based on pretrained language models and leverage emotions in various ways, including emotion detection as an auxiliary objective (Lin et al., 2020) , emotionbased mixture-of-experts decoding (Lin et al., 2019) , and rewarding response candidates likely to induce positive user emotion .", |
| "cite_spans": [ |
| { |
| "start": 40, |
| "end": 61, |
| "text": "(Zhou and Wang, 2018;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 62, |
| "end": 81, |
| "text": "Lubis et al., 2018)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 348, |
| "end": 366, |
| "text": "(Lin et al., 2020)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 410, |
| "end": 428, |
| "text": "(Lin et al., 2019)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data-Driven Text-Based Research on Empathy in General Conversation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We leverage 3 two types of data: general conversations and transcripts of MI demonstration videos.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We define an utterance as everything said by an interlocutor in their turn in a 2-person conversation, which is the most widely used definition of utterance in the literature of deep-learning-based conversational intelligence. This differs from some utterance definitions in psychotherapy. For example, an \"utterance\" in this work is identical to a \"volley\" as defined in the motivational interviewing skill code (MISC) (Miller et al., 2003) , while an \"utterance\" in MISC is \"a complete thought\" that \"ends either when one thought is completed or a new thought begins with the same speaker, or by an utterance from the other speaker\".", |
| "cite_spans": [ |
| { |
| "start": 420, |
| "end": 441, |
| "text": "(Miller et al., 2003)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our general conversation data is from two datasets: Persona-based Empathetic Conversation (PEC) (Zhong et al., 2020) and EmpatheticDialogues (ED) (Rashkin et al., 2019) . Their statistics are listed in Table 1 . For each 2-interlocutor dialogue, we consider the initiator of the conversation as the speaker and the other as the listener.", |
| "cite_spans": [ |
| { |
| "start": 96, |
| "end": 116, |
| "text": "(Zhong et al., 2020)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 146, |
| "end": 168, |
| "text": "(Rashkin et al., 2019)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 202, |
| "end": 209, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "General Conversations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "PEC consists of general conversations crawled from 3 subreddits: r/Happy 4 (r/H), r/OffMyChest 5 (r/OMC), and r/CasualConversation 6 (r/CC). Reddit users exchange happy experiences and thoughts in r/H, share emotional stories that cannot be told easily in r/OMC, and simply talk casually in r/CC. Since the original PEC dataset includes conversations between more than two participants and some conversations are actually subsets of other conversations (e.g. a 2-turn conversation that in effect constitutes the first 2 turns of a 4-turn conversation), we retain only the non-subset conversations that are between 2 interlocutors, in order to align with the counsellor-client nature of therapeutic conversations, and the filtered PEC contains around 56% of the conversations in the original one.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "General Conversations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EmpatheticDialogues (abbreviated as ED) is comprised of 23.1K general conversations from MTurker pairs. The speaker of each dialogue was first given an emotion label (e.g. \"Afraid\"), then described a situation where they had felt the emotion before (e.g. \"I've been hearing noises around the house at night\"), and finally initiated the conversation about this situation with a listener.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "General Conversations", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We divide the general conversation data into 2 parts: empathetic-listener conversations and nonempathetic-listener ones. Specifically, we assign \"empathetic\" labels to all the listener utterances of the dialogues in r/H, r/OMC and ED, and \"nonempathetic\" to the counterparts in r/CC.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Empathy vs. Non-Empathy", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "For PEC, the heuristic empathy labelling is based on the annotator ratings from the original paper that suggest comments ( Table 1 : Statistics of PEC (r/Happy, r/OffMyChest, and r/CasualConversation) & EmpatheticDialogues. For PEC, we utilise 2-interlocutor conversations only. #Conv: number of conversations in the data split. We consider r/Happy, r/OffMyChest and EmpatheticDialogues to consist of mostly empathetic ( \u2021) listener utterances and r/CasualConversation to be comprised of predominantly non-empathetic ( \u00b6) ones. Note that the statistics of PEC are about the filtered dataset as described in Section 3.1. See Table 4 for more details.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 123, |
| "end": 130, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 624, |
| "end": 631, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Empathy vs. Non-Empathy", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "utterances) in r/H and r/OMC are significantly more empathetic than those in r/CC, and the interannotator agreement on this as measured by Fleiss' kappa (Fleiss, 1971) was \"substantial\". For ED, the empathy labelling is intuitive as the authors explicitly instructed the \"listeners\" to respond empathetically during the data collection. We note that our heuristic labelling for PEC and ED is based on the corpus-level labels given by the creators of the datasets, thus it may not be completely accurate at utterance or sentence level. We nevertheless utilise the heuristic labels for our experiments and leave more fine-grained annotation for future work.", |
| "cite_spans": [ |
| { |
| "start": 153, |
| "end": 167, |
| "text": "(Fleiss, 1971)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Empathy vs. Non-Empathy", |
| "sec_num": "3.1.1" |
| }, |
| { |
| "text": "Our counselling conversations are from P\u00e9rez-Rosas et al. (2019), who collected the first and only (to the best of our knowledge) publicly available dataset of MI conversations. The dialogues are the transcripts of 152 demonstrations of high-quality (MI adherent) and another 101 of low-quality (MI non-adherent) counselling from video-sharing platforms such as YouTube and Vimeo. The original transcripts were obtained with the automatic captioning tool of YouTube, so the conversations have minor transcription errors and are mostly without punctuation. We refer to this dataset as ROLE-PLAYMI, and list its statistics in Table 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 624, |
| "end": 631, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Motivational Interviewing", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We select a subset of ROLEPLAYMI to manually annotate utterance-level empathy to build a benchmark dataset for our models. The annotation guideline follows the definition of high empathy in MISC: Counsellors high on the empathy scale show an active interest in making sure they understand what the client is saying, including the client's perceptions, situation, meaning, and feelings. We ask the annotators to consider an utterance that shows MISC-defined high empathy as empathetic, otherwise as non-empathetic. Thus, non-empathy in this context can range from neutrality to apathy. T-u) n/a n/a 38.7% 2.3% %(\u00acQ. T-u) n/a n/a 71.9% 73.8% p(emp | \u00acQ, T-u) n/a n/a 0.50 0.03", |
| "cite_spans": [ |
| { |
| "start": 585, |
| "end": 589, |
| "text": "T-u)", |
| "ref_id": null |
| }, |
| { |
| "start": 615, |
| "end": 619, |
| "text": "T-u)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Empathy Annotation", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "p(emp | Q, T-u)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Empathy Annotation", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "n/a n/a 0.10 0.00 Table 2 : Statistics of ROLEPLAYMI and ANNO. #Conv: number of conversations in the subset. \"T-u\" is short for \"Therapist Utterance(s)\". #T-u: number of therapist utterances in the subset. %(emp.T-u): percentage of empathetic therapist utterances. %(\u00acQ.Tu): percentage of non-question therapist utterances. p(emp | \u00acQ, T-u): probability of a non-question therapist utterance being empathetic. p(emp | Q, T-u): probability of a question therapist utterance being empathetic. See Table 5 for more details.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 18, |
| "end": 25, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 495, |
| "end": 502, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Manual Empathy Annotation", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "We choose 7 transcripts (217 counsellor utterances in total) from the high-quality subset with negligible transcription errors, and 14 transcripts (214 counsellor utterances in total) from the lowquality one. The 431 selected utterances are presented to 2 human annotators for binary utterancelevel empathy annotation. One annotator is a senior researcher that has received formal MI training in the past, and the other is a PhD student that has read in depth about MI (incl. Rollnick et al. (2008) ). Their annotations show an inter-annotator agreement of 0.71 measured by Cohen's kappa (Cohen, 1968) , indicating \"substantial agreement\". Finally, the annotators discussed their results and resolved the differences. The annotated MI conversations are denoted as ANNO in the rest of the paper.", |
| "cite_spans": [ |
| { |
| "start": 476, |
| "end": 498, |
| "text": "Rollnick et al. (2008)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 588, |
| "end": 601, |
| "text": "(Cohen, 1968)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Empathy Annotation", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "As Table 2 shows, 38.7% of the therapist utterances in the high-quality subset are empathetic (i.e. 61.3% non-empathetic), while the number for the low-quality subset is 2.3% for empathetic (i.e. 97.7% non-empathetic), suggesting a marked difference between the empathy levels in high-and low-quality counselling.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Manual Empathy Annotation", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "We note that our empathy annotation is at utterance-level on the punctuation-free MI tran-scripts, which means an utterance is marked as empathetic as long as a part of the utterance is so, even though the remainder might not be. More finegrained annotation would be possible with punctuated utterances, which we leave for future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Manual Empathy Annotation", |
| "sec_num": "3.2.1" |
| }, |
| { |
| "text": "Empirically, we observe that questions in MI do not show empathy in general, which is intuitive since the purpose of questions is to gather more information. Indeed, we notice that the vast majority of the examples of open and closed questions provided by MISC are not empathetic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question & Empathy", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "Therefore, we additionally conduct binary annotation for each therapist utterance in ANNO as to whether the utterance is (predominantly) a question, by marking an utterance as a question utterance if more than half of the tokens in an utterance constitute at least one open or closed question as defined by MISC. For instance, \"it's good to see you up and about how are you feeling after your last little hospitalization\" is considered a question utterance, since \"how are you feeling after your last little hospitalization\" is an open question and makes up more than half of the utterance. We denote the non-question subset of ANNO as \u00acQ.ANNO.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question & Empathy", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "The relationship between empathy and question found in ANNO confirms our observation: a nonquestion therapist utterance from high-quality counselling is substantially more likely (0.50) to be empathetic than one from low-quality counselling (0.03), while the same does not hold for question therapist utterances: 0.10 for high-quality and 0.00 for low-quality, which indicates that therapist questions are overall very unlikely to be empathetic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question & Empathy", |
| "sec_num": "3.2.2" |
| }, |
| { |
| "text": "Comparing ROLEPLAYMI with PEC & ED, we noticed a pronounced difference between empathy in general conversation and therapy: an MI-adherent therapist tends to express empathy through nonquestions (as shown in Table 2 ), e.g. \"The blood sugars have increased some, so you're concerned that things are not as good as they were last time that we talked\". Conversely, participants in general conversations often show empathy via questions, e.g. \"Oh no! That's scary! What do you think it is?\". Thus, analysing sentence-level empathy (instead of utterance-level) could better separate the empathetic and non-empathetic parts, and more overlap between general-conversation empa-thy and therapeutic empathy may be found in the non-question sentences. This was not possible in our experiments as ROLEPLAYMI is not punctuated, thus we leave it for future work. We note that another domain difference is that ROLEPLAYMI consists of transcripts of spoken dialogues whereas PEC and ED contain \"written\" chat conversations. The difference is smoothed by the high-quality transcription of the ROLEPLAYMI videos and we therefore do not use specific techniques to address the difference, but we plan to investigate this factor further in future work.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 208, |
| "end": 215, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "General-Conversation Empathy vs. Therapeutic Empathy", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In this section, we first define the task of binary empathy classification, then lay out the out-of-domain empathy contrast strategy behind our supervised models for the task, and finally describe our unsupervised baselines driven by NLI.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Binary Empathy Classification", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We denote", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Definition", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "D M I = {(u C i , u T i , e i )}, i = 1, \u2022 \u2022 \u2022 , N", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Definition", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "as a collection of {(client utterance, therapist utterance, empathy label)} tuples, where u T i is the therapist reply to the client utterance u C i , e i \u2208 {emp, \u00acemp} denotes if u T i shows empathy, and N is the number of such tuples in the dataset. Our task can be formulated as follows: given u T i and optionally u C i for more context, predict the correct empathy label e i of u T i . We use ANNO as D M I .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task Definition", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Since our manually annotated subset of ROLE-PLAYMI is too small to be a proper training set, we resort to learning from out-of-domain (i.e. non-MI) (OOD) empathy contrast. Specifically, as described in Section 3.1.1 and Figure 1 , we utilise all listener utterances in r/H, r/OMC and ED as positive (empathetic) examples and their counterparts in r/CC as negative (non-empathetic) examples, as we aim to leverage parallels between general-conversation empathy and psychotherapeutic empathy. We build 3 empathy vs. non-empathy contrast 7 pairs from general conversations: (r/H vs. r/CC); (r/OMC vs. r/CC); (ED vs. r/CC). For each pair, we sample an equal number of examples from the empathetic (positive) and non-empathetic (negative) subsets to construct a contrast dataset P a Client: Everyone's getting on me about my drinking. | Therapist: Kind of like a bunch of crows pecking at you.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 220, |
| "end": 228, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Supervised Learning: Using Out-of-Domain Empathy Contrast", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The therapist is empathetic towards the patient Entailment", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "H b", |
| "sec_num": null |
| }, |
| { |
| "text": "The client wants to smoke more. Neutral The therapist is not listening to the client. Contradiction a P, Premise. b H, Hypothesis. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "H b", |
| "sec_num": null |
| }, |
| { |
| "text": "D Gen = {(u S j , u L j , e j )}", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "H b", |
| "sec_num": null |
| }, |
| { |
| "text": ", where in each sample the empathy label e j \u2208 {emp, \u00acemp} denotes whether the listener response u L j is empathetic towards its preceding speaker utterance u S j . Our sampling ensures that the 2 classes (i.e. emp & \u00acemp) in each pair during training are balanced.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "H b", |
| "sec_num": null |
| }, |
| { |
| "text": "For each contrast pair, we train a 1-utterance general-conversation empathy classifier cls (1) to predict e j given u L j , as well as a 2-utterance counterpart cls (2) to predict e j given (u S j , u L j ). Finally, we apply the trained cls (1) and cls (2) directly on", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "H b", |
| "sec_num": null |
| }, |
| { |
| "text": "D M I , using u C i as u S j and u T i as u L j .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "H b", |
| "sec_num": null |
| }, |
| { |
| "text": "Natural language inference (NLI) is the task of determining if a hypothesis is true (entailment), false (contradiction), or undetermined (neutral) given a premise 8 (Table 3) . Following Yin et al. (2019) where NLI models prove effective as ready-made zero-shot sequence classifiers, we formulate our empathy classification task as an NLI problem. Assuming only u T i is available, we use it as the premise, and define the 1-utterance empathy hypothesis h (1) as \"This text is empathetic.\". We then utilise an off-the-shelf NLI model M as an unsupervised 1-utterance empathy classifier nli E", |
| "cite_spans": [ |
| { |
| "start": 187, |
| "end": 204, |
| "text": "Yin et al. (2019)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 165, |
| "end": 174, |
| "text": "(Table 3)", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Unsupervised Baseline: Text Classification as Natural Language Inference", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "(1) to directly predict a label from {entailment, contradiction, neutral} given (u T i , h (1) ). We consider u T i to be classified as an empathetic utterance only if the predicted label is entailment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unsupervised Baseline: Text Classification as Natural Language Inference", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We also investigate a client-therapist exchange scenario where both u C i and u T i are provided. The premise p i is then formatted as \"Client: u C i | Therapist: u T i \", and we define the 2-utterance hypothesis as h (2) = \"The Therapist is empathetic towards the Client.\". We use the same M as an unsupervised 2-utterance empathy classifier nli E", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unsupervised Baseline: Text Classification as Natural Language Inference", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "(2) given the input (p i , h (2) ). Again, only entailment is deemed equivalent to categorising u T i as empathetic.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unsupervised Baseline: Text Classification as Natural Language Inference", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "It is our observation from MISC as well as ROLE-PLAYMI that an empathetic therapist tends to acknowledge the difficulties and feelings of clients, and hence we experiment with NLI-style modelling for client-therapist agreement. Specifically, we use M as an unsupervised 2utterance agreement classifier nli A C\u2192T to measure the agreement between u C i and u T i , using the former as the premise and the latter as the hypothesis. We only interpret an entailment prediction from M as the therapist agreeing with the client and hence the therapist empathising with the client.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unsupervised Baseline: Client-Therapist Agreement as Natural Language Inference", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "For OOD empathy contrast (Section 4.2), we keep the original train/dev/test splits of PEC and ED. Since the two datasets in each contrast pair can be vastly different in their sizes (e.g. ED has only 17.8K training examples whereas r/CC has 530.2K), we always sample the positive and negative subsets so that their sizes are identical to that of ED, the smallest dataset, which ensures a) the two classes are balanced in each pair, and b) different cls models are trained with equal amounts of data and their performances are hence comparable.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "To minimise the bias in training data caused by such sampling, we train the classifier of each contrast pair 5 times, each time with its own randomly sampled data. Note that this leads to 5 different groups of class-balanced {train, dev, set} datasets for each pair.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We leverage pre-trained language models for all our experiments. BERT (Devlin et al., 2019) is the backbone of our OOD empathy contrast models and its BERT-BASE-UNCASED variant is chosen. We add a fully connected layer atop the classification token ([CLS]) position of the language model to implement a binary classifier, and train the entire model end-to-end on the empathy contrast pairs. For the backbone M of the unsupervised zeroshot baselines, we use the BART-LARGE variant of BART (Lewis et al., 2020 ) that has been finetuned on MultiNLI (Williams et al., 2018) . For more details, see Section B.", |
| "cite_spans": [ |
| { |
| "start": 70, |
| "end": 91, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 488, |
| "end": 507, |
| "text": "(Lewis et al., 2020", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 546, |
| "end": 569, |
| "text": "(Williams et al., 2018)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "To measure model performance on ANNO, we choose Matthews correlation coefficient (MCC) since it is robust to class imbalance, taking into account that only 38.7% of the ANNO examples from the high-quality subset are marked as empathetic and the number is only 2.3% for low-quality. We also use MCC to measure test set performance to increase comparability.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Implementation", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We examine the performances achieved on ANNO by the models introduced in Section 4 , namely the blue bars in the \"OOD (1) w/ Contrast\" (1-utterance models trained on OOD empathy contrast, i.e. cls (1) ), \"OOD (2) w/ Contrast\" (2-utterance models trained on OOD empathy contrast, i.e. cls (2) ), and \"Baselines\" subplots of Figure 2 . The value of each blue bar indicates the mean MCC of the 5 models from the corresponding pair, and we use the error bar to simply represent +/-one standard deviation from the mean, in order to illustrate the variation among the scores of the 5 models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 323, |
| "end": 331, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Also, we show in Figure 3 the performances of the OOD models on their respective test sets. In the test set of each of the 5 models from a (D + , D \u2212 ) OOD pair, we have N T random samples from D + and another N T from D \u2212 , where N T is the size of the original test set of ED, in line with our sampling method for the OOD training sets. The mean (bar value) -standard deviation (error bar) representation follows that of Figure 2 . By comparing the scores of the 5 models from an OOD setup on their own test sets and on ANNO, it becomes clear how the domain shift from general conversation to MI affects the performance of those models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 17, |
| "end": 25, |
| "text": "Figure 3", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 423, |
| "end": 431, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We first observe that while each test set in the OOD setups is different as we address class imbalance with random sampling, it is still obvious that the OOD models achieve considerably better scores on their test sets but experience significant drops on ANNO. In particular, ED vs. r/CC (2) reaches over 0.9 MCC on average on its test sets but only around 0.10 on ANNO. This stops any of the OOD empathy contrast models from being a reliable indicator of therapeutic empathy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "There is also considerable variation in the scores on ANNO (but not on the test sets) of the OOD models from the same empathy contrast pair. For instance, while r/OMC vs. r/CC (2) reaches 0.17 MCC on average, the standard deviation is 0.03. Further, we find that among the 5 models of the r/OMC vs. r/CC (2) pair, the MCC can be as high as 0.21 and as low as 0.11 despite that a) the 5 models only differ in the randomness of their training data sampling, b) the models have negligible variation in their test set performances (Figure 3) . This pattern is present in all the OOD models, revealing their brittleness w.r.t. MI empathy classification.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 527, |
| "end": 537, |
| "text": "(Figure 3)", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "As for the choice between 1-utterance and 2utterance, the effects are mixed. Specifically, r/H vs. r/CC and ED vs. r/CC both have decreased performances on ANNO going from 1-utterance to 2-utterance, while r/OMC vs. r/CC benefits from this transition. In fact, in terms of the average score, r/OMC vs. r/CC (2) is the best setup. This could be because a client talks more about negative experiences in a therapy session, not unlike how the typical speaker shares emotional stores in r/OMC. In contrast, the speakers in r/H are more likely to tell positive experiences, which could explain the performance drop resulting from including the speaker utterance in r/H vs. r/CC (2).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The unsupervised zero-shot baselines do not fare better in general. nli E", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "(1) and nli E (2) score around 0.05 and 0.02, respectively, both below most of the mean scores achieved by the OOD empathy contrast models. This can be attributed to the fact that knowledge gained from NLI tasks are not sufficient for reasoning about complex concepts such as empathy. nli A C\u2192T , on the other hand, shows better results and outperforms half of the OOD empathy contrast models, which suggests correlation between client-therapist agreement and therapist empathy. As a probing step, we swap the client and therapist utterances to reverse the premise-hypothesis formulation and observe that it (nli A T \u2192C ) leads to a substantial drop to -0.04 MCC, further illustrating the aforementioned correlation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "To shed light on the impact of the OOD design choices we made in Section 4, we add a control group of OOD models that are trained without empathy contrast for comparison, as shown by the blue bars in the \"OOD (1) w/o Contrast\", \"OOD (2) w/o Contrast\" subplots. More specifically, We build 3 pairs: (r/OMC vs. r/H), (ED vs. r/H), and (ED vs. r/OMC), as we consider them (empathy vs. empathy) pairs from which an OOD model is not Figure 2 : Results of all models on ANNO and \u00acQ.ANNO, measured with Matthews correlation coefficient (Matthews, 1975) . The names of the baseline models (shown in the rightmost subplot) are re-written in the figure for better visibility, e.g. \"NLI\\nE\\n(1)\" instead of nli E", |
| "cite_spans": [ |
| { |
| "start": 529, |
| "end": 545, |
| "text": "(Matthews, 1975)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 428, |
| "end": 436, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "(1) ). The first 4 subplots on the left show the performances of OOD-trained models. The first two show the performances of the 1-(e.g. r/H vs. r/CC (1)) and 2-utterance OOD models (e.g. r/H vs. r/CC (2)) trained on data with empathy contrast (e.g. r/H vs. r/CC, which is empathy vs. non-empathy), while the third and fourth show the performances of the 1-and 2-utterance OOD models trained on data without empathy contrast (e.g. ED vs. r/H, which is empathy vs. empathy). As explained in Section 5.1, for each OOD pair (e.g. r/H vs. r/CC), we randomly sample from the class-unbalanced OOD data 5 times to obtain 5 groups of class-balanced {train, dev, set} data, in order to address class imbalance and data selection bias. For each OOD pair, therefore, we train 5 models independently with the training data from their respective groups. Thus, the value of each rectangular bar indicates the mean of the scores of the 5 models from the 5 data groups of the corresponding OOD pair, and the error bar shows +/-one standard deviation from the mean. able to learn empathy vs. non-empathy contrast. Additionally, we inspect the performances (orange bars) of all the models on \u00acQ.ANNO to understand model behaviour in a less noisy context (i.e. question utterances removed).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Interestingly, the control group models score around 0.11 MCC and are not far behind empathy contrast models such as r/OMC vs. r/CC and ED vs. r/CC in the 1-utterance scenario, albeit with similarly large variation in their results. When it comes to 2-utterance, however, the lead of the empathy contrast models (except r/H vs. r/CC) becomes more obvious, with r/OMC vs. r/CC scoring over 0.15 MCC in contrast to ED vs. r/OMC recording less than 0.05. This shows that the benefit of learning from OOD empathy contrast, though small, does exist, and is more pronounced when a) compared against learning from no-empathycontrast OOD data and b) more conversation context is taken into account by the models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Finally, for the OOD contrast models, we notice mixed effects of removing questions from the benchmark dataset. It enables performance gains for r/H vs. r/CC (1) and ED vs. r/CC (2) but performance drops for the other OOD empathy contrast models. This shows that despite the annotations indicating that question therapist utterances are predominantly non-empathetic, whether a therapist utterance is a question generally does not substantially impact the empathy prediction of an OOD contrast model. One possible explanation, among others, is that the models simply did not learn to associate question with non-empathy during the OOD contrast training and instead learned to base its classification on semantic cues unrelated to question/non-question. Echoing Section 3.3, we argue that analysing non-questions at sentence level would be less noisy and better predictions would thus be possible, which we leave for future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "The motivation for this work was to minimise the annotation effort needed for training an utterance-level classifier of therapeutic empathy/non-empathy, based on the assumption that 1) pre-trained language models can be finetuned to distinguish between empathy and nonempathy in general conversations, and 2) the finetuned model can be leveraged to directly predict therapeutic empathy/non-empathy. The first subplot on the left shows the test set performances of the 1-and 2-utterance OOD models trained on data with empathy contrast, and the second shows the test set performances of the 1-and 2utterance models trained on data without empathy contrast. As explained in Figure 2 , each OOD pair (e.g. r/H vs. r/CC (1) / (2)) corresponds to 5 groups of randomly sampled {train, dev, test} data and hence 5 trained models. Thus, the model trained on the training data of a group has a test set score associated with the test data of the group. Therefore, the value of each rectangular bar indicates the mean of the test set scores of the 5 models from the same OOD pair, and the error bar shows +/-one standard deviation from the mean.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 672, |
| "end": 680, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Clinical Application & Impact", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Our results, for the most part, show that this simple OOD training approach did not sufficiently perform accurate classification, which limits its application in clinical settings. Compared to supervised learning of session-level empathy on sizeable corpora of well-annotated therapeutic conversations , the task of utterance-level empathy classification with no in-domain training is more challenging and the models unsurprisingly fared worse. As discussed, the coarse, heuristic empathy labelling for the utterances in the training data and the domain gap between general conversation and therapeutic dialogue may have contributed considerably to the sub-optimal performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clinical Application & Impact", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Nevertheless, we believe that this work is a meaningful step towards low-resource real-time assessment of empathy in counselling, and that the idea of utilising pre-trained language models for low-resource scenarios related to clinical psychology is still relevant. With smoothed domain gaps and more fine-grained annotation, future work can still use pre-trained language models to leverage parallels between empathy manifestations in general conversation and therapeutic dialogue. For instance, knowledge of empathy vs. non-empathy learned from well-annotated general conversations can serve as a bootstrapping step for empathy vs. non-empathy training on a minimal amount of wellannotated therapeutic conversations, since there can be a small to modest amount of therapeutic dialogue data available for a specialised domain instead of no data at all, which can take advantage of OOD empathy knowledge as a starting point for in-domain fine-tuning and thus maximise the benefit of OOD empathy training.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Clinical Application & Impact", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We find that our models trained to learn from empathy vs. non-empathy contrast in general conversation (i.e. out-of-domain w.r.t. counselling) are generally not reliable predictors of empathy/nonempathy in motivational interviewing. Upon probing, we observe that OOD empathy contrast learning is still marginally better than OOD learning without empathy contrast, particularly when more conversation context is available.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In future work, we plan to investigate more finegrained empathy annotation and prediction, such as at sentence level, where we expect less noise and more accurate predictions. In addition, we will explore few-shot methods for the empathy classification task with out-of-domain empathy contrast training as a bootstrapping step.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Empathy often involves deeply personal circumstances (e.g. distress & struggle) and computational studies on it therefore warrant ethical consideration. The greatest ethical risk of this work has been privacy implications, as the conversational data we used could contain large amounts of sensitive identifiable information. To mitigate this risk, we experimented with only de-identified data where mentions of information like name, date, and location are replaced with placeholders. As a counterbalance, this study has considerable benefit as the first investigation of using knowledge of general-conversation empathy to support lowresource computational analysis of MI empathy, and the findings can inspire future efforts in making research on therapeutic empathy more accessible.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ethics & Privacy", |
| "sec_num": null |
| }, |
| { |
| "text": "We use \"counsellor\" and \"therapist\" interchangeably in this work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Reddit (https://www.reddit.com/) is an online platform comprised of subforums (known as subreddits), each with a specific topic for Reddit users to discuss.3 Identifiable information (e.g. names, dates) was replaced with placeholders prior to the experiments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://www.reddit.com/r/happy/ 5 https://www.reddit.com/r/offmychest/ 6 https://www.reddit.com/r/ CasualConversation", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We use \"empathy vs. non-empathy contrast\" and \"empathy contrast\" interchangeably.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Definition of NLI: https://paperswithcode. com/task/natural-language-inference", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/huggingface/ transformers 10 https://pytorch.org/ 11 https://scikit-learn.org/stable/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work has been funded by the EC in the H2020 Marie Sk\u0142odowska-Curie PhilHumans project, contract no. 812882. The authors would also like to thank Dr. Mark Aloia for his guidance and support.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| }, |
| { |
| "text": "We list the complete statistics of the general conversation datasets in Table 4 and those of ROLE-PLAYMI in Table 5 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 72, |
| "end": 79, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 108, |
| "end": 115, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Data", |
| "sec_num": null |
| }, |
| { |
| "text": "All our pre-trained language models are implemented by the HuggingFace framework 9 (Wolf et al., 2019) . All our models are implemented in PyTorch 10 , while their evaluation is implemented with scikit-learn 11 . For cls (1) , the input format to BERT isDuring OOD training, we use a learning rate of 1e-5 and a batch size of 32, and evaluate every 500 steps on the development set. We choose the Matthews correlation coefficient (Matthews, 1975) (MCC) as the metric for validation. We stop the training if the performance has not improved in the most recent 10 validations, and select the best checkpoint w.r.t. the development set.We formulate the input to nli -uLen.) 28.5 20.6 24.4 21.6 %(emp. T-u) n/a n/a 38.7% 2.3% %(\u00acQ. T-u) n/a n/a 71.9% 73.8% p(emp | \u00acQ, T-u) n/a n/a 0.50 0.03 p(emp | Q, T-u) n/a n/a 0.10 0.00 Table 5 : Statistics of ROLEPLAYMI and ANNO. The abbreviation convention is similar to that in Table 4 , while \"T-u\" is short for \"Therapist Utterance(s)\" and \"C-u\" for \"Client Utterance(s)\". #Conv: number of conversations in the subset. #T-u: number of therapist utterances in the subset. %(emp.T-u): percentage of empathetic therapist utterances. %(\u00acQ.T-u): percentage of non-question therapist utterances. p(emp | \u00acQ, T-u): probability of a non-question therapist utterance being empathetic. p(emp | Q, T-u): probability of a question therapist utterance being empathetic.", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 102, |
| "text": "(Wolf et al., 2019)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 663, |
| "end": 670, |
| "text": "-uLen.)", |
| "ref_id": null |
| }, |
| { |
| "start": 698, |
| "end": 702, |
| "text": "T-u)", |
| "ref_id": null |
| }, |
| { |
| "start": 728, |
| "end": 732, |
| "text": "T-u)", |
| "ref_id": null |
| }, |
| { |
| "start": 765, |
| "end": 769, |
| "text": "T-u)", |
| "ref_id": null |
| }, |
| { |
| "start": 799, |
| "end": 803, |
| "text": "T-u)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 822, |
| "end": 829, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 917, |
| "end": 924, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "B Implementation Details", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Observing dialogue in therapy: Categorizing and forecasting behavioral codes", |
| "authors": [ |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Tanana", |
| "suffix": "" |
| }, |
| { |
| "first": "Zac", |
| "middle": [ |
| "E" |
| ], |
| "last": "Imel", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Poitras", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Vivek", |
| "middle": [], |
| "last": "Srikumar", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019", |
| "volume": "1", |
| "issue": "", |
| "pages": "5599--5611", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/p19-1563" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jie Cao, Michael Tanana, Zac E. Imel, Eric Poitras, David C. Atkins, and Vivek Srikumar. 2019. Ob- serving dialogue in therapy: Categorizing and fore- casting behavioral codes. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28-Au- gust 2, 2019, Volume 1: Long Papers, pages 5599- 5611. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Improving the prediction of therapist behaviors in addiction counseling by exploiting class confusions", |
| "authors": [ |
| { |
| "first": "Zhuohao", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Karan", |
| "middle": [], |
| "last": "Singla", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Gibson", |
| "suffix": "" |
| }, |
| { |
| "first": "Dogan", |
| "middle": [], |
| "last": "Can", |
| "suffix": "" |
| }, |
| { |
| "first": "Zac", |
| "middle": [ |
| "E" |
| ], |
| "last": "Imel", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Panayiotis", |
| "middle": [ |
| "G" |
| ], |
| "last": "Georgiou", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [ |
| "S" |
| ], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "6605--6609", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP.2019.8682885" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhuohao Chen, Karan Singla, James Gibson, Do- gan Can, Zac E. Imel, David C. Atkins, Panayi- otis G. Georgiou, and Shrikanth S. Narayanan. 2019. Improving the prediction of therapist be- haviors in addiction counseling by exploiting class confusions. In IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP 2019, Brighton, United Kingdom, May 12-17, 2019, pages 6605-6609. IEEE.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Weighted kappa: nominal scale agreement provision for scaled disagreement or partial credit", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| } |
| ], |
| "year": 1968, |
| "venue": "Psychological bulletin", |
| "volume": "70", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Cohen. 1968. Weighted kappa: nominal scale agreement provision for scaled disagreement or par- tial credit. Psychological bulletin, 70(4):213.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "BERT: pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/n19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Pa- pers), pages 4171-4186. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Measuring nominal scale agreement among many raters", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Joseph", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fleiss", |
| "suffix": "" |
| } |
| ], |
| "year": 1971, |
| "venue": "Psychological bulletin", |
| "volume": "76", |
| "issue": "5", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph L Fleiss. 1971. Measuring nominal scale agree- ment among many raters. Psychological bulletin, 76(5):378.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "am I A good therapist?\" automated evaluation of psychotherapy skills using speech and language technologies", |
| "authors": [ |
| { |
| "first": "Shrikanth", |
| "middle": [], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Atkins, and Shrikanth Narayanan. 2021. \"am I A good therapist?\" automated evaluation of psy- chotherapy skills using speech and language tech- nologies. CoRR, abs/2102.11265.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Counselor skill influences outcomes of brief motivational interventions", |
| "authors": [ |
| { |
| "first": "Jacques", |
| "middle": [], |
| "last": "Gaume", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerhard", |
| "middle": [], |
| "last": "Gmel", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohamed", |
| "middle": [], |
| "last": "Faouzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean-Bernard", |
| "middle": [], |
| "last": "Daeppen", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Journal of substance abuse treatment", |
| "volume": "37", |
| "issue": "2", |
| "pages": "151--159", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacques Gaume, Gerhard Gmel, Mohamed Faouzi, and Jean-Bernard Daeppen. 2009. Counselor skill influ- ences outcomes of brief motivational interventions. Journal of substance abuse treatment, 37(2):151- 159.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Multi-label multi-task deep learning for behavioral coding", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Gibson", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Torrey", |
| "middle": [], |
| "last": "Creed", |
| "suffix": "" |
| }, |
| { |
| "first": "Zac", |
| "middle": [ |
| "E" |
| ], |
| "last": "Imel", |
| "suffix": "" |
| }, |
| { |
| "first": "Panayiotis", |
| "middle": [ |
| "G" |
| ], |
| "last": "Georgiou", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [ |
| "S" |
| ], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Gibson, David C. Atkins, Torrey Creed, Zac E. Imel, Panayiotis G. Georgiou, and Shrikanth S. Narayanan. 2018. Multi-label multi-task deep learn- ing for behavioral coding. CoRR, abs/1810.12349.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A deep learning approach to modeling empathy in addiction counseling", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Gibson", |
| "suffix": "" |
| }, |
| { |
| "first": "Dogan", |
| "middle": [], |
| "last": "Can", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Zac", |
| "middle": [ |
| "E" |
| ], |
| "last": "Imel", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Panayiotis", |
| "middle": [ |
| "G" |
| ], |
| "last": "Georgiou", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [ |
| "S" |
| ], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Interspeech 2016, 17th Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "1447--1451", |
| "other_ids": { |
| "DOI": [ |
| "10.21437/Interspeech.2016-554" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Gibson, Dogan Can, Bo Xiao, Zac E. Imel, David C. Atkins, Panayiotis G. Georgiou, and Shrikanth S. Narayanan. 2016. A deep learning ap- proach to modeling empathy in addiction counsel- ing. In Interspeech 2016, 17th Annual Conference of the International Speech Communication Asso- ciation, San Francisco, CA, USA, September 8-12, 2016, pages 1447-1451. ISCA.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Predicting therapist empathy in motivational interviews using language features inspired by psycholinguistic norms", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Gibson", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikolaos", |
| "middle": [], |
| "last": "Malandrakis", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Romero", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [ |
| "S" |
| ], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "INTER-SPEECH 2015, 16th Annual Conference of the International Speech Communication Association, Dresden", |
| "volume": "", |
| "issue": "", |
| "pages": "1947--1951", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Gibson, Nikolaos Malandrakis, Francisco Romero, David C. Atkins, and Shrikanth S. Narayanan. 2015. Predicting therapist empathy in motivational interviews using language features inspired by psycholinguistic norms. In INTER- SPEECH 2015, 16th Annual Conference of the Inter- national Speech Communication Association, Dres- den, Germany, September 6-10, 2015, pages 1947- 1951. ISCA.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Empathbert: A bert-based framework for demographic-aware empathy prediction", |
| "authors": [ |
| { |
| "first": "Aparna", |
| "middle": [], |
| "last": "Bhanu Prakash Reddy Guda", |
| "suffix": "" |
| }, |
| { |
| "first": "Niyati", |
| "middle": [], |
| "last": "Garimella", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chhaya", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bhanu Prakash Reddy Guda, Aparna Garimella, and Niyati Chhaya. 2021. Empathbert: A bert-based framework for demographic-aware empathy predic- tion. CoRR, abs/2102.00272.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "It takes two to empathize: One to seek and one to provide", |
| "authors": [ |
| { |
| "first": "Mahshid", |
| "middle": [], |
| "last": "Hosseini", |
| "suffix": "" |
| }, |
| { |
| "first": "Cornelia", |
| "middle": [], |
| "last": "Caragea", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mahshid Hosseini and Cornelia Caragea. 2021. It takes two to empathize: One to seek and one to provide. Proceedings of the AAAI Conference on Artificial In- telligence. To appear.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "BART: denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal ; Abdelrahman Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2020", |
| "issue": "", |
| "pages": "7871--7880", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.703" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: denoising sequence-to-sequence pre- training for natural language generation, translation, and comprehension. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, ACL 2020, Online, July 5-10, 2020, pages 7871-7880. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Moel: Mixture of empathetic listeners", |
| "authors": [ |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamin", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "121--132", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1012" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhaojiang Lin, Andrea Madotto, Jamin Shin, Peng Xu, and Pascale Fung. 2019. Moel: Mixture of empa- thetic listeners. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing, EMNLP- IJCNLP 2019, Hong Kong, China, November 3- 7, 2019, pages 121-132. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Caire: An end-to-end empathetic chatbot", |
| "authors": [ |
| { |
| "first": "Zhaojiang", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Genta", |
| "middle": [], |
| "last": "Indra Winata", |
| "suffix": "" |
| }, |
| { |
| "first": "Farhad", |
| "middle": [], |
| "last": "Bin Siddique", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamin", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "The Thirty-Second Innovative Applications of Artificial Intelligence Conference", |
| "volume": "2020", |
| "issue": "", |
| "pages": "13622--13623", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhaojiang Lin, Peng Xu, Genta Indra Winata, Farhad Bin Siddique, Zihan Liu, Jamin Shin, and Pascale Fung. 2020. Caire: An end-to-end em- pathetic chatbot. In The Thirty-Fourth AAAI Con- ference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial In- telligence, EAAI 2020, New York, NY, USA, Febru- ary 7-12, 2020, pages 13622-13623. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Eliciting positive emotion through affect-sensitive dialogue response generation: A neural network approach", |
| "authors": [ |
| { |
| "first": "Nurul", |
| "middle": [], |
| "last": "Lubis", |
| "suffix": "" |
| }, |
| { |
| "first": "Sakriani", |
| "middle": [], |
| "last": "Sakti", |
| "suffix": "" |
| }, |
| { |
| "first": "Koichiro", |
| "middle": [], |
| "last": "Yoshino", |
| "suffix": "" |
| }, |
| { |
| "first": "Satoshi", |
| "middle": [], |
| "last": "Nakamura", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18)", |
| "volume": "", |
| "issue": "", |
| "pages": "5293--5300", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nurul Lubis, Sakriani Sakti, Koichiro Yoshino, and Satoshi Nakamura. 2018. Eliciting positive emo- tion through affect-sensitive dialogue response gen- eration: A neural network approach. In Proceed- ings of the Thirty-Second AAAI Conference on Ar- tificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18), New Orleans, Louisiana, USA, February 2-7, 2018, pages 5293- 5300. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Comparison of the predicted and observed secondary structure of t4 phage lysozyme", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Brian", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Matthews", |
| "suffix": "" |
| } |
| ], |
| "year": 1975, |
| "venue": "Biochimica et Biophysica Acta (BBA)-Protein Structure", |
| "volume": "405", |
| "issue": "2", |
| "pages": "442--451", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian W Matthews. 1975. Comparison of the pre- dicted and observed secondary structure of t4 phage lysozyme. Biochimica et Biophysica Acta (BBA)- Protein Structure, 405(2):442-451.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Fidelity to motivational interviewing and subsequent cannabis cessation among adolescents", |
| "authors": [ |
| { |
| "first": "Jim", |
| "middle": [], |
| "last": "Mccambridge", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Day", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Bonnita", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Strang", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Addictive behaviors", |
| "volume": "36", |
| "issue": "7", |
| "pages": "749--754", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jim McCambridge, Maria Day, Bonnita A Thomas, and John Strang. 2011. Fidelity to motivational inter- viewing and subsequent cannabis cessation among adolescents. Addictive behaviors, 36(7):749-754.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Manual for the motivational interviewing skill code (misc)", |
| "authors": [ |
| { |
| "first": "Theresa", |
| "middle": [ |
| "B" |
| ], |
| "last": "William R Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Denise", |
| "middle": [], |
| "last": "Moyers", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Ernst", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Amrhein", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Albuquerque: Center on Alcoholism", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William R Miller, Theresa B Moyers, Denise Ernst, and Paul Amrhein. 2003. Manual for the motiva- tional interviewing skill code (misc). Unpublished manuscript. Albuquerque: Center on Alcoholism, Substance Abuse and Addictions, University of New Mexico.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Motivational interviewing: Helping people change", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rollnick", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William R Miller and Stephen Rollnick. 2012. Motiva- tional interviewing: Helping people change. Guil- ford press.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "The motivational interviewing treatment integrity code (miti 4): rationale, preliminary reliability and validity", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Theresa", |
| "suffix": "" |
| }, |
| { |
| "first": "Lauren", |
| "middle": [ |
| "N" |
| ], |
| "last": "Moyers", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [ |
| "K" |
| ], |
| "last": "Rowell", |
| "suffix": "" |
| }, |
| { |
| "first": "Denise", |
| "middle": [], |
| "last": "Manuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [ |
| "M" |
| ], |
| "last": "Ernst", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Houck", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Journal of substance abuse treatment", |
| "volume": "65", |
| "issue": "", |
| "pages": "36--42", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Theresa B Moyers, Lauren N Rowell, Jennifer K Manuel, Denise Ernst, and Jon M Houck. 2016. The motivational interviewing treatment integrity code (miti 4): rationale, preliminary reliability and valid- ity. Journal of substance abuse treatment, 65:36-42.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Understanding and predicting empathic behavior in counseling therapy", |
| "authors": [ |
| { |
| "first": "Ver\u00f3nica", |
| "middle": [], |
| "last": "P\u00e9rez-Rosas", |
| "suffix": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Resnicow", |
| "suffix": "" |
| }, |
| { |
| "first": "Satinder", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "An", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1426--1435", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P17-1131" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ver\u00f3nica P\u00e9rez-Rosas, Rada Mihalcea, Kenneth Resni- cow, Satinder Singh, and Lawrence An. 2017. Un- derstanding and predicting empathic behavior in counseling therapy. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics, ACL 2017, Vancouver, Canada, July 30 - August 4, Volume 1: Long Papers, pages 1426-1435. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "What makes a good counselor? learning to distinguish between high-quality and low-quality counseling conversations", |
| "authors": [ |
| { |
| "first": "Ver\u00f3nica", |
| "middle": [], |
| "last": "P\u00e9rez-Rosas", |
| "suffix": "" |
| }, |
| { |
| "first": "Xinyi", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Resnicow", |
| "suffix": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "926--935", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1088" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ver\u00f3nica P\u00e9rez-Rosas, Xinyi Wu, Kenneth Resnicow, and Rada Mihalcea. 2019. What makes a good coun- selor? learning to distinguish between high-quality and low-quality counseling conversations. In Pro- ceedings of the 57th Annual Meeting of the Associa- tion for Computational Linguistics, pages 926-935, Florence, Italy. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Towards empathetic opendomain conversation models: A new benchmark and dataset", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [ |
| "Michael" |
| ], |
| "last": "Hannah Rashkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Margaret", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Y-Lan", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Boureau", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019", |
| "volume": "1", |
| "issue": "", |
| "pages": "5370--5381", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/p19-1534" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hannah Rashkin, Eric Michael Smith, Margaret Li, and Y-Lan Boureau. 2019. Towards empathetic open- domain conversation models: A new benchmark and dataset. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28-August 2, 2019, Vol- ume 1: Long Papers, pages 5370-5381. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Motivational interviewing in health care: helping patients change behavior", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Rollnick", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Butler", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Rollnick, William R Miller, and Christopher Butler. 2008. Motivational interviewing in health care: helping patients change behavior. Guilford Press.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "A computational approach to understanding empathy expressed in text-based mental health support", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Sharma", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [ |
| "S" |
| ], |
| "last": "Miner", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Althoff", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "2020", |
| "issue": "", |
| "pages": "5263--5276", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.425" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Sharma, Adam S. Miner, David C. Atkins, and Tim Althoff. 2020. A computational approach to understanding empathy expressed in text-based men- tal health support. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing, EMNLP 2020, Online, November 16-20, 2020, pages 5263-5276. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Generating empathetic responses by looking ahead the user's sentiment", |
| "authors": [ |
| { |
| "first": "Jamin", |
| "middle": [], |
| "last": "Shin", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Madotto", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascale", |
| "middle": [], |
| "last": "Fung", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "2020 IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "2020", |
| "issue": "", |
| "pages": "7989--7993", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP40776.2020.9054379" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jamin Shin, Peng Xu, Andrea Madotto, and Pascale Fung. 2020. Generating empathetic responses by looking ahead the user's sentiment. In 2020 IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP 2020, Barcelona, Spain, May 4-8, 2020, pages 7989-7993. IEEE.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Towards end-2-end learning for predicting behavior codes from spoken utterances in psychotherapy conversations", |
| "authors": [ |
| { |
| "first": "Karan", |
| "middle": [], |
| "last": "Singla", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhuohao", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, ACL 2020, Online", |
| "volume": "", |
| "issue": "", |
| "pages": "3797--3803", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karan Singla, Zhuohao Chen, David C. Atkins, and Shrikanth Narayanan. 2020. Towards end-2-end learning for predicting behavior codes from spoken utterances in psychotherapy conversations. In Pro- ceedings of the 58th Annual Meeting of the Associ- ation for Computational Linguistics, ACL 2020, On- line, July 5-10, 2020, pages 3797-3803. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "A comparison of natural language processing methods for automated coding of motivational interviewing", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Tanana", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [ |
| "A" |
| ], |
| "last": "Hallgren", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Zac", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Imel", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Vivek", |
| "middle": [], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Srikumar", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Journal of substance abuse treatment", |
| "volume": "65", |
| "issue": "", |
| "pages": "43--50", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Tanana, Kevin A Hallgren, Zac E Imel, David C Atkins, and Vivek Srikumar. 2016. A comparison of natural language processing methods for automated coding of motivational interviewing. Journal of substance abuse treatment, 65:43-50.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "A taxonomy of empathetic response intents in human social conversations", |
| "authors": [ |
| { |
| "first": "Anuradha", |
| "middle": [], |
| "last": "Welivita", |
| "suffix": "" |
| }, |
| { |
| "first": "Pearl", |
| "middle": [], |
| "last": "Pu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
| "volume": "2020", |
| "issue": "", |
| "pages": "4886--4899", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.coling-main.429" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anuradha Welivita and Pearl Pu. 2020. A taxonomy of empathetic response intents in human social con- versations. In Proceedings of the 28th International Conference on Computational Linguistics, COLING 2020, Barcelona, Spain (Online), December 8-13, 2020, pages 4886-4899. International Committee on Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "A broad-coverage challenge corpus for sentence understanding through inference", |
| "authors": [ |
| { |
| "first": "Adina", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikita", |
| "middle": [], |
| "last": "Nangia", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1112--1122", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sen- tence understanding through inference. In Proceed- ings of the 2018 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Huggingface's transformers: State-of-the-art natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9mi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1910.03771" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Fun- towicz, et al. 2019. Huggingface's transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Towards detecting need for empathetic response in motivational interviewing", |
| "authors": [ |
| { |
| "first": "Zixiu", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rim", |
| "middle": [], |
| "last": "Helaoui", |
| "suffix": "" |
| }, |
| { |
| "first": "Vivek", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Diego", |
| "middle": [ |
| "Reforgiato" |
| ], |
| "last": "Recupero", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniele", |
| "middle": [], |
| "last": "Riboni", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Companion Publication of the 2020 International Conference on Multimodal Interaction, ICMI Companion 2020, Virtual Event", |
| "volume": "", |
| "issue": "", |
| "pages": "497--502", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3395035.3425228" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zixiu Wu, Rim Helaoui, Vivek Kumar, Diego Refor- giato Recupero, and Daniele Riboni. 2020. Towards detecting need for empathetic response in motiva- tional interviewing. In Companion Publication of the 2020 International Conference on Multimodal Interaction, ICMI Companion 2020, Virtual Event, The Netherlands, October, 2020, pages 497-502. ACM.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Modeling therapist empathy through prosody in drug addiction counseling", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Bone", |
| "suffix": "" |
| }, |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "Van Segbroeck", |
| "suffix": "" |
| }, |
| { |
| "first": "Zac", |
| "middle": [ |
| "E" |
| ], |
| "last": "Imel", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Panayiotis", |
| "middle": [ |
| "G" |
| ], |
| "last": "Georgiou", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [ |
| "S" |
| ], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "INTERSPEECH 2014, 15th Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "213--217", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Xiao, Daniel Bone, Maarten Van Segbroeck, Zac E. Imel, David C. Atkins, Panayiotis G. Georgiou, and Shrikanth S. Narayanan. 2014. Modeling therapist empathy through prosody in drug addiction counsel- ing. In INTERSPEECH 2014, 15th Annual Confer- ence of the International Speech Communication As- sociation, Singapore, September 14-18, 2014, pages 213-217. ISCA.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Analyzing the language of therapist empathy in motivational interview based psychotherapy", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Dogan", |
| "middle": [], |
| "last": "Can", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Panayiotis", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Georgiou", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [ |
| "S" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Asia-Pacific Signal and Information Processing Association Annual Summit and Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "1--4", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Xiao, Dogan Can, Panayiotis G. Georgiou, David C. Atkins, and Shrikanth S. Narayanan. 2012. Ana- lyzing the language of therapist empathy in moti- vational interview based psychotherapy. In Asia- Pacific Signal and Information Processing Associa- tion Annual Summit and Conference, APSIPA 2012, Hollywood, CA, USA, December 3-6, 2012, pages 1-4. IEEE.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Behavioral coding of therapist language in addiction counseling using recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Dogan", |
| "middle": [], |
| "last": "Can", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Gibson", |
| "suffix": "" |
| }, |
| { |
| "first": "Zac", |
| "middle": [ |
| "E" |
| ], |
| "last": "Imel", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Panayiotis", |
| "middle": [ |
| "G" |
| ], |
| "last": "Georgiou", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [ |
| "S" |
| ], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Interspeech 2016, 17th Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "908--912", |
| "other_ids": { |
| "DOI": [ |
| "10.21437/Interspeech.2016-1560" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Xiao, Dogan Can, James Gibson, Zac E. Imel, David C. Atkins, Panayiotis G. Georgiou, and Shrikanth S. Narayanan. 2016a. Behavioral cod- ing of therapist language in addiction counseling using recurrent neural networks. In Interspeech 2016, 17th Annual Conference of the International Speech Communication Association, San Francisco, CA, USA, September 8-12, 2016, pages 908-912. ISCA.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "A technology prototype system for rating therapist empathy from audio recordings in addiction counseling", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Che-Wei", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zac", |
| "middle": [ |
| "E" |
| ], |
| "last": "Imel", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Panayiotis", |
| "middle": [ |
| "G" |
| ], |
| "last": "Georgiou", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [ |
| "S" |
| ], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "PeerJ Comput. Sci", |
| "volume": "2", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.7717/peerj-cs.59" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Xiao, Che-Wei Huang, Zac E. Imel, David C. Atkins, Panayiotis G. Georgiou, and Shrikanth S. Narayanan. 2016b. A technology prototype system for rating therapist empathy from audio recordings in addiction counseling. PeerJ Comput. Sci., 2:e59.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Analyzing speech rate entrainment and its relation to therapist empathy in drug addiction counseling", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Zac", |
| "middle": [ |
| "E" |
| ], |
| "last": "Imel", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Panayiotis", |
| "middle": [ |
| "G" |
| ], |
| "last": "Georgiou", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [ |
| "S" |
| ], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "INTERSPEECH 2015, 16th Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "2489--2493", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Xiao, Zac E. Imel, David C. Atkins, Panayio- tis G. Georgiou, and Shrikanth S. Narayanan. 2015. Analyzing speech rate entrainment and its relation to therapist empathy in drug addiction counseling. In INTERSPEECH 2015, 16th Annual Conference of the International Speech Communication Asso- ciation, Dresden, Germany, September 6-10, 2015, pages 2489-2493. ISCA.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Benchmarking zero-shot text classification: Datasets, evaluation and entailment approach", |
| "authors": [ |
| { |
| "first": "Wenpeng", |
| "middle": [], |
| "last": "Yin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamaal", |
| "middle": [], |
| "last": "Hay", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3912--3921", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1404" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenpeng Yin, Jamaal Hay, and Dan Roth. 2019. Benchmarking zero-shot text classification: Datasets, evaluation and entailment approach. In Proceedings of the 2019 Conference on Em- pirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing, EMNLP-IJCNLP 2019, Hong Kong, China, November 3-7, 2019, pages 3912-3921. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Towards persona-based empathetic conversational models", |
| "authors": [ |
| { |
| "first": "Peixiang", |
| "middle": [], |
| "last": "Zhong", |
| "suffix": "" |
| }, |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chunyan", |
| "middle": [], |
| "last": "Miao", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "2020", |
| "issue": "", |
| "pages": "6556--6566", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.531" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peixiang Zhong, Chen Zhang, Hao Wang, Yong Liu, and Chunyan Miao. 2020. Towards persona-based empathetic conversational models. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, EMNLP 2020, On- line, November 16-20, 2020, pages 6556-6566. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "The language of situational empathy", |
| "authors": [ |
| { |
| "first": "Ke", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Luca", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanja", |
| "middle": [], |
| "last": "Aiello", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniele", |
| "middle": [], |
| "last": "Scepanovic", |
| "suffix": "" |
| }, |
| { |
| "first": "Sara", |
| "middle": [], |
| "last": "Quercia", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Konrath", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Proc. of the ACM on Human-Computer Interaction", |
| "volume": "1", |
| "issue": "", |
| "pages": "1--19", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ke Zhou, Luca Maria Aiello, Sanja Scepanovic, Daniele Quercia, and Sara Konrath. 2021. The lan- guage of situational empathy. Proc. of the ACM on Human-Computer Interaction, 1:1-19.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Condolence and empathy in online communities", |
| "authors": [ |
| { |
| "first": "Naitian", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Jurgens", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "2020", |
| "issue": "", |
| "pages": "609--626", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.45" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naitian Zhou and David Jurgens. 2020. Condolence and empathy in online communities. In Proceed- ings of the 2020 Conference on Empirical Methods in Natural Language Processing, EMNLP 2020, On- line, November 16-20, 2020, pages 609-626. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Mojitalk: Generating emotional responses at scale", |
| "authors": [ |
| { |
| "first": "Xianda", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "Yang" |
| ], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, ACL 2018", |
| "volume": "1", |
| "issue": "", |
| "pages": "1128--1137", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1104" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xianda Zhou and William Yang Wang. 2018. Mojitalk: Generating emotional responses at scale. In Pro- ceedings of the 56th Annual Meeting of the Associa- tion for Computational Linguistics, ACL 2018, Mel- bourne, Australia, July 15-20, 2018, Volume 1: Long Papers, pages 1128-1137. Association for Computa- tional Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "text": "Test set performances (in MCC) of all OOD models.", |
| "uris": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "content": "<table><tr><td/><td/><td>r/Happy \u2021</td><td colspan=\"2\">r/OffMyChest \u2021</td><td colspan=\"2\">r/CasualConversation \u00b6</td><td colspan=\"2\">EmpatheticDialogues \u2021</td></tr><tr><td>Split</td><td>train</td><td>valid test</td><td>train</td><td>valid test</td><td>train</td><td>valid test</td><td>train</td><td>valid test</td></tr><tr><td>#Conv</td><td>113.</td><td/><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td/><td/><td/><td/><td/><td/><td>i.e. listener</td></tr></table>", |
| "num": null, |
| "text": "9K 13.9K 16.0K 94.0K 12.1K 11.7K 530.2K 67.5K 66.9K 17.8K 2.8K 2.5K", |
| "html": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "content": "<table/>", |
| "num": null, |
| "text": "Natural Language Inference, example utterances fromMiller et al. (2003)", |
| "html": null |
| } |
| } |
| } |
| } |