| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:31:30.837634Z" |
| }, |
| "title": "Evaluating Automatic Speech Recognition Quality and Its Impact on Counselor Utterance Coding", |
| "authors": [ |
| { |
| "first": "June", |
| "middle": [], |
| "last": "Do", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Michigan", |
| "location": { |
| "settlement": "Ann Arbor", |
| "region": "MI", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Ver\u00f3nica", |
| "middle": [], |
| "last": "Min", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Michigan", |
| "location": { |
| "settlement": "Ann Arbor", |
| "region": "MI", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "P\u00e9rez-Rosas", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Michigan", |
| "location": { |
| "settlement": "Ann Arbor", |
| "region": "MI", |
| "country": "USA" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Michigan", |
| "location": { |
| "settlement": "Ann Arbor", |
| "region": "MI", |
| "country": "USA" |
| } |
| }, |
| "email": "mihalcea@umich.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Automatic speech recognition (ASR) is a crucial step in many natural language processing (NLP) applications, as often available data consists mainly of raw speech. Since the result of the ASR step is considered as a meaningful, informative input to later steps in the NLP pipeline, it is important to understand the behavior and failure mode of this step. In this work, we analyze the quality of ASR in the psychotherapy domain, using motivational interviewing conversations between therapists and clients. We conduct domain agnostic and domain-relevant evaluations using evaluation metrics and also identify domain-relevant keywords in the ASR output. Moreover, we empirically study the effect of mixing ASR and manual data during the training of a downstream NLP model, and also demonstrate how additional local context can help alleviate the error introduced by noisy ASR transcripts.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Automatic speech recognition (ASR) is a crucial step in many natural language processing (NLP) applications, as often available data consists mainly of raw speech. Since the result of the ASR step is considered as a meaningful, informative input to later steps in the NLP pipeline, it is important to understand the behavior and failure mode of this step. In this work, we analyze the quality of ASR in the psychotherapy domain, using motivational interviewing conversations between therapists and clients. We conduct domain agnostic and domain-relevant evaluations using evaluation metrics and also identify domain-relevant keywords in the ASR output. Moreover, we empirically study the effect of mixing ASR and manual data during the training of a downstream NLP model, and also demonstrate how additional local context can help alleviate the error introduced by noisy ASR transcripts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Evaluating the quality of psychotherapy is an essential step in assessing the fidelity of treatment and providing feedback to practitioners. In psychotherapy practice, this is usually done through a process called behavioral coding that consists of manually analyzing recordings of therapy conversations and then labeling specific behaviors from participants.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Recent efforts have addressed the automatic analysis and evaluation of psychotherapy quality, including the study of conversational dynamics between therapists and clients, the analysis of empathy and emotional responses, and the automatic assessment of therapist's skills (Althoff et al., 2016; Zhang and Danescu-Niculescu-Mizil, 2020a; P\u00e9rez-Rosas et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 273, |
| "end": 295, |
| "text": "(Althoff et al., 2016;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 296, |
| "end": 337, |
| "text": "Zhang and Danescu-Niculescu-Mizil, 2020a;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 338, |
| "end": 363, |
| "text": "P\u00e9rez-Rosas et al., 2017)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Most of these research studies have been conducted using small collections of manually transcribed counseling conversations due to the need of an accurate representation of what is being said during the conversation. However, the use of manual transcription restricts the inclusion of a larger number of conversations into the analysis as it is a costly and slow process, making it challenging to apply data hungry machine learning approaches. As an alternative, some studies have explored the use of automatic speech recognition (ASR) systems that are able to quickly transcribe a large number of conversations (Flemotomos et al., 2021) . However, there are several open questions regarding the feasibility of using automatic transcriptions in the evaluation of psychotherapy (Miner et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 612, |
| "end": 637, |
| "text": "(Flemotomos et al., 2021)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 777, |
| "end": 797, |
| "text": "(Miner et al., 2020)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we study the quality of ASR in counseling conversations and its impact on the task of behavioral coding. We use an existing dataset of behavioral counseling conversations consisting of audio recordings and manual transcriptions as well as annotations of ten behaviors related to therapists' counseling skills. We start by generating automatic transcriptions using a commercially available ASR system (Google, 2020) . Using the resulting parallel corpus of manual and ASR transcriptions, we conduct an assessment of the ASR quality using three main approaches. First, we use automatic evaluation metrics such as word error rate (WER) and semantic distance to conduct domain agnostic evaluations of the ASR performance across conversation participants. Second, we conduct a domainspecific examination of the ASR output by identifying domain-relevant keywords using behavioral codes and keywords identified using the Linguistic Inquiry and Word Count (LIWC) (Pennebaker et al., 2001 ). Finally, we study the effect of the noisy ASR on the downstream behavioral coding task and empirically show that additional local context in the form of neighboring utterances can help alleviate the impact of ASR errors.", |
| "cite_spans": [ |
| { |
| "start": 414, |
| "end": 428, |
| "text": "(Google, 2020)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 969, |
| "end": 993, |
| "text": "(Pennebaker et al., 2001", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We believe that studying the role of ASR systems in the NLP pipeline is an important step to develop and evaluate robust systems for better understanding of counseling dialogues.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As the overall accuracy of ASR systems keeps improving, the ability of producing accurate transcriptions of conversational data has enabled the development of NLP applications in health. Particularly, in the psychotherapy domain, where a large fraction of therapy sessions are conducted in spoken language, ASR can help reduce the burden of manual transcription, potentially allowing for largescale analysis of interactions between counselors and patients.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "There have been several efforts on applying NLP on conversation analysis and utterance coding tasks in the psychotherapy domain. NLP was used to evaluate counselor behaviors and strategies (Zhang and Danescu-Niculescu-Mizil, 2020b; P\u00e9rez-Rosas et al., 2019; Xiao et al., 2015) , or to provide feedback by generating appropriate responses to client utterances (Shen et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 189, |
| "end": 231, |
| "text": "(Zhang and Danescu-Niculescu-Mizil, 2020b;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 232, |
| "end": 257, |
| "text": "P\u00e9rez-Rosas et al., 2019;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 258, |
| "end": 276, |
| "text": "Xiao et al., 2015)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 359, |
| "end": 378, |
| "text": "(Shen et al., 2020)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "While most of previous work was conducted on manual transcriptions, there are only a few cases where automatically generated transcripts have been used, limiting the use of computational methods in psychiatry . The main reason behind this is the need for reliable ASR systems that are able to produce accurate transcriptions as the error introduced by transcribing words incorrectly can have a great impact on the performance of the overall application.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "It has been pointed out by previous research that automatic evaluation metrics such as word error rate alone are not a good indicator of accuracy in speech understanding (Park et al., 2008) . Our work is similar to Miner et al. (2020) recent work in that we use both agnostic and domain-relevant approaches to assess ASR systems in the mental health domain. However, we additionally investigate how the ASR error, both domain-agnostic and domain-relevant, propagates through the common NLP pipeline, in training and inference times, and provide an advice for researchers.", |
| "cite_spans": [ |
| { |
| "start": 170, |
| "end": 189, |
| "text": "(Park et al., 2008)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 215, |
| "end": 234, |
| "text": "Miner et al. (2020)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Finally, Mani et al. (2020) recently framed postprocessing ASR error correction as a machine translation task from noisy transcription to ground truth transcription, and trains a sequence to sequence error correction model. Although this approach can provide a modular solution to mitigate ASR errors in many speech understanding systems, we note that building such a parallel corpus can be prohibitive for many researchers. Table 1 . The average conversation in the dataset has a duration of 21 minutes and a length of 3320 words.", |
| "cite_spans": [ |
| { |
| "start": 9, |
| "end": 27, |
| "text": "Mani et al. (2020)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 425, |
| "end": 432, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The dataset also includes utterance-level annotations for ten behavioral codes from the Motivational Interviewing Treatment Integrity (MITI) coding scheme, the current gold standard for evaluating MI fidelity. MITI is focused on therapist language only and measures how well the therapist adhered to MI strategies by counting behaviors such as asking questions, using reflective language, seeking collaboration and emphasizing autonomy, among others. The dataset annotations were conducted by annotators with previous MI experience and trained on the use of MITI system. In addition to the MITI coding, our study uses two additional categories for utterances that are not labeled in the original dataset. The first includes therapist's speech that is not labeled under any MITI code (NAT) and the second includes client's utterances (NAC). ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Alignment. Since the manual transcriptions provided in the dataset consist of transcribed speech without corresponding timestamps, we used forced alignment to automatically align speakers' speech with its corresponding transcription. We used Gentle (Ochshorn and Hawkins), a forced speech aligner implemented using the Kaldi toolkit for speech recognition (Povey et al., 2011) . Note that this is a necessary step to enable comparisons between manual and automatic transcriptions for the same audio segments. Automatic Transcription. To automatically transcribe each counseling session, we first spliced its audio into smaller segments using the obtained timestamps. Next, we individually transcribed each segment using the Google's Speech-to-Text recognition system (Google, 2020). 1 Again, our choice of transcribing segments rather than full conversations is motivated by the need of comparable units so we can avoid potential misalignment generated by ASR segmentation.", |
| "cite_spans": [ |
| { |
| "start": 356, |
| "end": 376, |
| "text": "(Povey et al., 2011)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Preprocessing", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We start by conducting a domain-agnostic evaluation of the automatic transcription process that considers that the accuracy of the ASR system is equally important for all speech in the conversation. To this end, we focus on two automatic evaluation metrics: word error rate and semantic distance. The first one evaluates transcription error at the wordlevel; the second one aims to evaluate transcription error considering the semantic distance between the ASR output and the ground truth i.e., human transcription.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Domain-agnostic Evaluation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We calculate WER using the equation below, where S, D, I each denote the number of substitutions, deletions, and insertions respectively required to make the reference sequence identical to the ASR sequence. C refers to the number of correct words, whereas N is the number of words in the reference.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Error Rate (WER)", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "W ER = S + D + I S + D + C = S + D + I N", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Word Error Rate (WER)", |
| "sec_num": null |
| }, |
| { |
| "text": "We use the Python Jiwer package 2 to automatically calculate WER for all conversations in the dataset. Our calculations are done by aggregating transcriptions by the corresponding speaker and averaging across sessions. Semantic Distance. Although recent works show that averaging WERs over large benchmark sets can provide good estimation of model performance (Likhomanenko et al., 2020) , there have been criticisms against relying solely on WERs, on the grounds that some important aspects of transcription quality are ignored when focusing on word overlaps (Kong et al., 2016; Szyma\u0144ski et al., 2020) . For instance, \"This is a cap\" and \"This is a cat\" will have a low score of WER because of the low edit distance between the sentences, while their semantic contents are about two distant concepts (Kim et al., 2021) . We use semantic distance to complement WER as semantics play an important role in understanding psychotherapy language and the meaning of a particular utterance could be greatly affected by substitutions done during the ASR process.", |
| "cite_spans": [ |
| { |
| "start": 360, |
| "end": 387, |
| "text": "(Likhomanenko et al., 2020)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 560, |
| "end": 579, |
| "text": "(Kong et al., 2016;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 580, |
| "end": 603, |
| "text": "Szyma\u0144ski et al., 2020)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 802, |
| "end": 820, |
| "text": "(Kim et al., 2021)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Error Rate (WER)", |
| "sec_num": null |
| }, |
| { |
| "text": "More specifically, we measure the difference in semantic content between the ground truth and ASR transcriptions. Our calculations are conducted at the utterance level and aggregated overall all conversations. We define the semantic distance between a manually transcribed utterance U tt M AN and an automatically transcribed utterance U tt ASR as the cosine distance between the sentence embeddings of each utterance:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Error Rate (WER)", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Semantic Distance(U tt M AN , U tt ASR ) = 1 \u2212 emb(U tt M AN ) \u2022 emb(U tt ASR ) emb(U tt M AN ) emb(U tt ASR )", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Word Error Rate (WER)", |
| "sec_num": null |
| }, |
| { |
| "text": "Thus, lower semantic distance between a manual transcription and an ASR transcription would indicate lower degree of transcription error. For the emb(\u2022) function we use sentence transformer embeddings (Reimers and Gurevych, 2019) . We chose the sentence transformer over alternative methods of sentence embeddings such as BERT or word2vec, since recent research has shown that offthe-shelf transformer models without fine-tuning often lead to representations that perform poorly on semantic similarity tasks . Table 3 summarizes the results obtained by speaker's role (i.e., therapist, client) and gender (i.e., male, female). Overall, transcription of therapist's speech shows significantly lower error than client speech in terms of WER, but not on semantic distance (two tailed Mann-Whitney U-test, p < .05). We also observe significant differences in female and male speech recognaition for both WER and semantic distance (p < .05, two tailed Mann-Whitney U-test). The difference between genders is also confirmed when the speaker roles are considered. This result is aligned with previous findings that ASR systems tend to perform better on female speakers due to being more consistent to standard pronunciations than male speakers (Adda-Decker and Lamel, 2005; Goldwater et al., 2008) . However, it is important to mention that other work on ASR evaluation have encountered the opposite trend, where transcription of female speakers speech obtained higher WER than of males (Tatman, 2017) . A factor that potentially affected our analysis is that due to the unavailability of identity data for speakers in the dataset, we treated each session as featuring a unique set of speakers. This might have been caused by the over-representation of speakers who appear multiple times in the dataset.", |
| "cite_spans": [ |
| { |
| "start": 201, |
| "end": 229, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 1237, |
| "end": 1266, |
| "text": "(Adda-Decker and Lamel, 2005;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1267, |
| "end": 1290, |
| "text": "Goldwater et al., 2008)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1480, |
| "end": 1494, |
| "text": "(Tatman, 2017)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 510, |
| "end": 517, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Word Error Rate (WER)", |
| "sec_num": null |
| }, |
| { |
| "text": "Although the domain-agnostic evaluation can provide insights into the aggregate performance of an ASR system, a domain informed evaluation can help to better understand the quality of derived transcriptions and its potential impact on downstream tasks. In the counseling domain, incorrect transcription of words or phrases related to emotion, mental state, addiction, or medication can cause more harm than the incorrect transcription of other types of words. Seeking to evaluate the role of domain on ASR quality in our automatically transcribed conversations, we focus on speech that is relevant to counseling quality. To identify such speech, we use the behavioral coding provided in the dataset and also word categories from the Linguistic Inquiry and Word Count (LIWC) lexicon (Pennebaker et al., 2001 ). Behavioral codes. We measure WER and semantic distance on utterances coded with the ten counselor behaviors included in the dataset and also examined transcription error in uncoded utterances from both, therapists and clients. For WER, we first concatenated all the utterances labeled with a given code in each single conversation, and then averaged the obtained WER across all conversations. Semantic distances for each utterance are averaged over all utterances in the dataset. LIWC Categories. LIWC is a psycholinguistic lexicon that maps words and its stems to a set of categories related to psychological processes. There are 69 predefined categories that cover four highlevel topics: psychological processes, personal concerns, linguistic dimensions, and linguistic fillers.", |
| "cite_spans": [ |
| { |
| "start": 782, |
| "end": 806, |
| "text": "(Pennebaker et al., 2001", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Domain-relevant Evaluation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For our analysis, we identify and select a subset of categories from psychological processes and personal concerns as they have been found relevant to psychotherapy conversations. For words in the different categories appearing in the ground truth utterances, we evaluated whether the ASR system was able to correctly transcribed them. We calculate the true positive, false negative, and false positive rates as well the standard metrics of recall and precision. Table 4 shows the average WER and semantic distance of transcription for behavior codes and also for non-coded (\"Non-coded Client\", \"Non-coded Therpapist\") language in the conversations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 463, |
| "end": 470, |
| "text": "Table 4", |
| "ref_id": "TABREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Domain-relevant Evaluation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In general, we find that non-coded language tends to have higher transcription error than codedlanguage (two-tailed Mann-Whitney U-test, p < 0.05 for both WER and semantic distance). Within non-applicable codes, we note that NAT shows higher WER and semantic distance. Since in Table 3 we saw that client language tends to have higher error overall than therapist language, this may indicate that transcription error is correlated to speech content or topic, because NAC covers all client utterances, while NAT is only applied for non-MITI labeled utterances. When the ASR system is evaluated in terms of transcribing keywords that are relevant to psychotherapy and counseling, results from Table 5 indicate that correctly retrieving keywords is harder for ASR systems than avoiding incorrect insertion of keywords in the transcription, as precision values are concentrated near 1.0, while recall values are more diverse. Table 6 gives an example of how omission errors can change the semantic content of the utterance for LIWC categories such as \"DEATH, BODY\". In the context of mental health and psychotherapy, these results suggest that aggregate metrics that compare whole ground truth utterances and ASR transcriptions to compute error rate are not granular enough to capture such cases of ASR failure where mistrancriptions of keywords might result in clinicians or counselors missing signs of patient distress or danger.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 691, |
| "end": 698, |
| "text": "Table 5", |
| "ref_id": "TABREF8" |
| }, |
| { |
| "start": 922, |
| "end": 929, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Beyond studying the domain-agnostic and domainrelevant error patterns of the automatic transcrip-tion, we also study the relationship between the speech transcription step and the later behavior code classification, where ASR transcriptions are fed as input.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Role of ASR on the Automatic Evaluation of Psychotherapy", |
| "sec_num": "6" |
| }, |
| { |
| "text": "To explore whether the use of noisy ASR transcriptions affects the automatic evaluation of psychotherapy, we focus on a behavioral coding task where we seek to label participants' utterances into a set of predefined codes relevant to counseling quality using transcripts that are either manual or automatically generated. We use the utterance-level annotations provided with the dataset described in Section 3, which consist of ten codes for therapist language plus two additional codes for annotated language from therapists and clients. We thus conduct a multi-label classification task to assign each utterance in the conversation to any of these 12 labels.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Performance", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Our experiments are performed using a BERT model as our baseline classifier (Devlin et al., 2019) and our evaluated are conducted using 5-fold crossvalidation. BERT is a transformer-based model that has been widely used in NLP. We chose this model since pretrained parameters fine-tuned on large natural language corpora are readily available, and also because due to its design the additional context input could easily supplied through the use of separate token type ids. We used the version implemented in (Wolf et al., 2020) with a learning rate of 2e-5. The input to the model is a sequence of token-level embeddings of each utterance in the conversation and the predicted label is assigned using a multilayer perceptron. The experiments are run on a GeForce RTX 2080 Ti.", |
| "cite_spans": [ |
| { |
| "start": 76, |
| "end": 97, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 509, |
| "end": 528, |
| "text": "(Wolf et al., 2020)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Performance", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "We first conduct a set of experiments where we train and test multi-class utterance classifiers using either manual or automatic transcripts. In our first experiment, we aim to measure the model accuracy when using high quality training data i.e., manual transcripts for both, testing and training sets. Second, we substitute the train set for its automatically transcribed version and test on a manually transcribed set to evaluate the potential performance loss when training with noisy transcripts. Third, we again train on manual transcripts but this time test on automatic transcripts to evaluate whether a model built with accurate transcripts (i.e., produced by humans) would be effective while testing on transcriptions that are automatically obtained. Fi- And I really felt like I was there. Category: MONEY / Error Type: Insertion Manual: Oh money to buy the cigarettes, and not to buy medicine Exactly Because it's expensive. ASR: Money to buy cigarettes, but no money for the medicine exactly six months ago Table 6 : Sample ASR errors for LIWC-identified keywords nally, we evaluate a fully automatic pipeline, where both, train and test sets are obtained using ASR models. Results for these experiments are shown in Table 9 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 1020, |
| "end": 1027, |
| "text": "Table 6", |
| "ref_id": null |
| }, |
| { |
| "start": 1230, |
| "end": 1237, |
| "text": "Table 9", |
| "ref_id": "TABREF12" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model Performance", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "As results in Table 7 indicate, the choice of transcription method for both training and testing sets has a significant impact on the classification performance. Here, we see that even the model trained on the same manually transcribed training data can have drastically different reported performance, depending on the transcription method of the testing set. On the other hand, we also note that using ASR transcription as training set leads to a large decrease in performance when tested using manual testing data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 14, |
| "end": 21, |
| "text": "Table 7", |
| "ref_id": "TABREF10" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance Trade-off", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "Since manual transcription is the most accurate representation of speech data, working with manual transcriptions would be the optimal choice. However, manual transcription can be expensive, especially for situations where a large amount of data has been collected. Thus, in many cases ASR technologies provide a faster and much more affordable transcription method. However, supervised learning with noisy ASR transcripts may result in the model learning spurious correlations, rather than the desired relationship between certain linguistic patterns and the predicted variables. This in turn leads to lower performances as shown in our experiments, where we observe performance losses up to 15%. Furthermore, consider a real case reported by Miner et al. (2020) , where the word \"depressed\" was incorrectly transcribed into \"the preston\" in a self-harm counseling session. If an emotion detector were to be trained on the automatically transcribed data, the obvious correlation between \"depressed\" and \"sad, blue\" emotions will be lost, and replaced with a spurious one.", |
| "cite_spans": [ |
| { |
| "start": 744, |
| "end": 763, |
| "text": "Miner et al. (2020)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Trade-off", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "These considerations raise the question of what would be the best trade-off between the use of manual and automatic transcription methods in the psychotherapy domain.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Trade-off", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "To answer this question, we conduct a set of ex- Figure 1 : Classification accuracy as the fraction of manually transcribe data increases in the training set periments where we gradually mix manually and automatically transcribed data during the training phase of the classification model. To ensure that the model is learning fairly, we ensured that each utterance only appears once in the entire dataset, without appearing both in the manual or ASR sets. By progressively adding more manual data in the training set, we emulate practical settings where only a fraction of data can be manually transcribed due to cost or time constrains. More specifically, we start with a full training set using ASR transcription, and increase the percentage of manual data at 20% increments. Note that reported accuracy is measured in a manually transcribed testing set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 49, |
| "end": 57, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance Trade-off", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "As shown in Figure 1 , the performance of the trained system does increase as the fraction of manual data increases. However, this is not shown as a linear relationship, as most of the performance gain occurs in the first few additions of the manual data. Although further study is warranted to explain how the small fraction of manual transcription leads to a noticeable increase in performance, this result indicates that even a small amount of manual transcription effort can improve the system performance in a meaningful way, and thus manual transcription is more cost-effective in its early stages than its later stages. For example, in the context of this experiment, practitioners can expect approximately 85% of the performance improvement of full manual transcription at the price of manually transcribing only 40% of the dataset.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Performance Trade-off", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "ASR error correction is an ongoing research topic in signal processing and natural language processing communities, and several techniques, including post-editing and domain adaptation, have been proposed (Mani et al., 2020) . However, in this paper, we explore a simpler strategy based on context augmentation considering the distributional hypothesis in semantic theory, which states that words appearing in the same contexts tend to have similar meaning (Harris, 1954) . We thus hypothesize that augmenting the target utterance with local context consisting of neighboring utterances can alleviate the effect of noisy transcription.", |
| "cite_spans": [ |
| { |
| "start": 205, |
| "end": 224, |
| "text": "(Mani et al., 2020)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 457, |
| "end": 471, |
| "text": "(Harris, 1954)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Can (noisy) Local Context Help?", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "To this end, we compare BERT-based classifiers with different amounts of local context in addition to the target utterance (Devlin et al., 2019) . The results shown in Table 9 : Classification results for behavioral coding when using local context sult of five-fold cross validation. The \"No Context\" model is given a single utterance as input, and the final label by computing softmax after the final linear layer. For the \"Context = n\" models, n previous and following utterances surrounding the target utterance are also provided to the BERT model, as a concatenation. Note that through the use of separate token type ids, BERT allows practitioners to separately designate a sequence of context tokens, distinct from the target tokens. Overall, models that integrate context information outperform the base model in terms of average accuracy and Macro F1 with small but consistent performance gains, thus suggesting that the system's performance can be improved using this simple strategy as opposed to conducting expensive manual transcription.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 144, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 168, |
| "end": 175, |
| "text": "Table 9", |
| "ref_id": "TABREF12" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Can (noisy) Local Context Help?", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "Our work has several limitations that should be addressed through future work. First, our study only considers Google's ASR and although this a popular choice there are several other commercial and open source alternatives. Initially, we also explored the use of Amazon Transcribe Medical 3 ; however initial experiments did not show much variation with respect to the use of Google ASR. Nonetheless, further analysis is needed to evaluate how well the findings of this work will generalize to other ASR systems. Second, the computed WER and semantic distance are noisy, since the timestamps we used to align manual and automatic transcriptions were obtained through forced alignment. Furthermore, we did not evaluate the speaker diarization performance of the ASR system in identifying speaker's role. Current ASR systems, including Google's speech-to-text, offer the functionality to automatically assign speaker identities to transcribed utterances, and this feature might be useful for automatically assigning speaker roles to each utterance. Finally, we limited our focus to the behavioral coding task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Limitations", |
| "sec_num": "7" |
| }, |
| { |
| "text": "In this work, we conducted an evaluation of automatic speech recognition in the counseling domain using conversations between counselors and clients. To measure the degree of transcription error introduced by the use of an ASR system, we conducted domain-agnostic and domain-relevant evaluations using WER and semantic distance. Our analysis showed that while WER and semantic distance are in the 35 to 40% range when conducting a domain agnostic evaluation, the transcription error is slightly lower when considering transcription segments that are relevant to the domain i.e., utterances identified as important in evaluating the quality of counseling.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Lessons Learned", |
| "sec_num": "8" |
| }, |
| { |
| "text": "Moreover, we examined how the ASR step fits in and impacts the larger pipeline of an NLP system for behavioral coding in psychotherapy by comparing how the use of ASR data in place of manually transcribed data affects the performance of the downstream NLP system. Finally, we empirically showed that augmenting the system input with local context may alleviate the impact of noisy transcription. Given the results and analyses of this work, we conclude with the following lessons we learned in this study, on using ASR for NLP applications in psychotherapy and counseling: (1) Aggregate error measures are not sufficient by themselves, and must be complemented with domain-specific evaluations. (2) ASR error rates and performances differ across speaker roles and demographics as well as utterance content/topics. (3) Even a relatively small amount of manual transcription effort can help counteract noisy ASR and improve performance during the training of NLP models for psychotherapy applications.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Lessons Learned", |
| "sec_num": "8" |
| }, |
| { |
| "text": "We use the Google Cloud speech-to-text enhanced model", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://pypi.org/project/jiwer/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://aws.amazon.com/transcribe/ medical/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This material is based in part upon work supported by the Precision Health initiative at the University of Michigan, by the National Science Foundation (grant #1815291), and by the John Templeton Foundation (grant #61156). Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author and do not necessarily reflect the views of the Precision Health initiative, the National Science Foundation, or John Templeton Foundation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgment", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Do speech recognizers prefer female speakers?", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Adda-Decker", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Lamel", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Adda-Decker and L. Lamel. 2005. Do speech rec- ognizers prefer female speakers? In Interspeech.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Large-scale analysis of counseling conversations: An application of natural language processing to mental health", |
| "authors": [ |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Althoff", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Jure", |
| "middle": [], |
| "last": "Leskovec", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "4", |
| "issue": "", |
| "pages": "463--476", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00111" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tim Althoff, Kevin Clark, and Jure Leskovec. 2016. Large-scale analysis of counseling conversations: An application of natural language processing to mental health. Transactions of the Association for Computational Linguistics, 4:463-476.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understand- ing.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Automated evaluation of psychotherapy skills using speech and language technologies", |
| "authors": [ |
| { |
| "first": "Nikolaos", |
| "middle": [], |
| "last": "Flemotomos", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Victor", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhuohao", |
| "middle": [], |
| "last": "Martinez", |
| "suffix": "" |
| }, |
| { |
| "first": "Karan", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Singla", |
| "suffix": "" |
| }, |
| { |
| "first": "Raghuveer", |
| "middle": [], |
| "last": "Ardulov", |
| "suffix": "" |
| }, |
| { |
| "first": "Derek", |
| "middle": [ |
| "D" |
| ], |
| "last": "Peri", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Caperton", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [ |
| "J" |
| ], |
| "last": "Gibson", |
| "suffix": "" |
| }, |
| { |
| "first": "Panayiotis", |
| "middle": [], |
| "last": "Tanana", |
| "suffix": "" |
| }, |
| { |
| "first": "Jake", |
| "middle": [], |
| "last": "Georgiou", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarah", |
| "middle": [ |
| "P" |
| ], |
| "last": "Van Epps", |
| "suffix": "" |
| }, |
| { |
| "first": "Tad", |
| "middle": [], |
| "last": "Lord", |
| "suffix": "" |
| }, |
| { |
| "first": "Zac", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hirsch", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Imel", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikolaos Flemotomos, Victor R. Martinez, Zhuo- hao Chen, Karan Singla, Victor Ardulov, Raghu- veer Peri, Derek D. Caperton, James Gibson, Michael J. Tanana, Panayiotis Georgiou, Jake Van Epps, Sarah P. Lord, Tad Hirsch, Zac E. Imel, David C. Atkins, and Shrikanth Narayanan. 2021. Automated evaluation of psychotherapy skills using speech and language technologies.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Which words are hard to recognize? prosodic, lexical, and disfluency factors that increase ASR error rates", |
| "authors": [ |
| { |
| "first": "Sharon", |
| "middle": [], |
| "last": "Goldwater", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of ACL-08: HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "380--388", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sharon Goldwater, Dan Jurafsky, and Christopher D. Manning. 2008. Which words are hard to recog- nize? prosodic, lexical, and disfluency factors that increase ASR error rates. In Proceedings of ACL-08: HLT, pages 380-388, Columbus, Ohio. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Google. cloud speech-to-text", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Google", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Google. 2020. Google. cloud speech-to-text", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Distributional structure. Word", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Zellig", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Harris", |
| "suffix": "" |
| } |
| ], |
| "year": 1954, |
| "venue": "", |
| "volume": "10", |
| "issue": "", |
| "pages": "146--162", |
| "other_ids": { |
| "DOI": [ |
| "10.1080/00437956.1954.11659520" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zellig S. Harris. 1954. Distributional structure. Word, 10(2-3):146-162.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Computational psychotherapy research: scaling up the evaluation of patient-provider interactions", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Zac", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Imel", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Steyvers", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Atkins", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Psychotherapy", |
| "volume": "52", |
| "issue": "", |
| "pages": "19--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zac E. Imel, M. Steyvers, and David C. Atkins. 2015. Computational psychotherapy research: scaling up the evaluation of patient-provider interactions. Psy- chotherapy, 52 1:19-30.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Semantic distance: A new metric for asr performance analysis towards spoken language understanding", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Seltzer", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Seltzer. 2021. Semantic distance: A new metric for asr performance analysis towards spoken language understanding.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Evaluating automatic speech recognition systems in comparison with human perception results using distinctive feature measures", |
| "authors": [ |
| { |
| "first": "Xiang", |
| "middle": [], |
| "last": "Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeung-Yoon", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefanie", |
| "middle": [], |
| "last": "Shattuck-Hufnagel", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiang Kong, Jeung-Yoon Choi, and Stefanie Shattuck- Hufnagel. 2016. Evaluating automatic speech recog- nition systems in comparison with human perception results using distinctive feature measures.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "On the sentence embeddings from pre-trained language models", |
| "authors": [ |
| { |
| "first": "Bohan", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Junxian", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Mingxuan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bohan Li, Hao Zhou, Junxian He, Mingxuan Wang, Yiming Yang, and Lei Li. 2020. On the sentence embeddings from pre-trained language models.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Rethinking evaluation in asr: Are our models robust enough?", |
| "authors": [ |
| { |
| "first": "Tatiana", |
| "middle": [], |
| "last": "Likhomanenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiantong", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Vineel", |
| "middle": [], |
| "last": "Pratap", |
| "suffix": "" |
| }, |
| { |
| "first": "Paden", |
| "middle": [], |
| "last": "Tomasello", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Kahn", |
| "suffix": "" |
| }, |
| { |
| "first": "Gilad", |
| "middle": [], |
| "last": "Avidov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ronan", |
| "middle": [], |
| "last": "Collobert", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabriel", |
| "middle": [], |
| "last": "Synnaeve", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tatiana Likhomanenko, Qiantong Xu, Vineel Pratap, Paden Tomasello, Jacob Kahn, Gilad Avidov, Ronan Collobert, and Gabriel Synnaeve. 2020. Rethinking evaluation in asr: Are our models robust enough?", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Asr error correction and domain adaptation using machine translation", |
| "authors": [ |
| { |
| "first": "Anirudh", |
| "middle": [], |
| "last": "Mani", |
| "suffix": "" |
| }, |
| { |
| "first": "Shruti", |
| "middle": [], |
| "last": "Palaskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Nimshi", |
| "middle": [], |
| "last": "Venkat Meripo", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandeep", |
| "middle": [], |
| "last": "Konam", |
| "suffix": "" |
| }, |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Metze", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anirudh Mani, Shruti Palaskar, Nimshi Venkat Meripo, Sandeep Konam, and Florian Metze. 2020. Asr er- ror correction and domain adaptation using machine translation.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Assessing the accuracy of automatic speech recognition for psychotherapy", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [ |
| "S" |
| ], |
| "last": "Miner", |
| "suffix": "" |
| }, |
| { |
| "first": "Albert", |
| "middle": [], |
| "last": "Haque", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [ |
| "Alan" |
| ], |
| "last": "Fries", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "L" |
| ], |
| "last": "Fleming", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Wilfley", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Milstein", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Arnow", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [ |
| "Stewart" |
| ], |
| "last": "Agras", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Fei-Fei", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Shah", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "NPJ Digital Medicine", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam S. Miner, Albert Haque, Jason Alan Fries, S. L. Fleming, D. Wilfley, G. Terence Wilson, A. Milstein, D. Jurafsky, B. Arnow, W. Stewart Agras, Li Fei-Fei, and N. Shah. 2020. Assessing the accuracy of auto- matic speech recognition for psychotherapy. NPJ Digital Medicine, 3.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "An empirical analysis of word error rate and keyword error rate", |
| "authors": [ |
| { |
| "first": "Youngja", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Siddharth Patwardhan", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "C" |
| ], |
| "last": "Visweswariah", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gates", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "INTER-SPEECH", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Youngja Park, Siddharth Patwardhan, K. Visweswariah, and S. C. Gates. 2008. An empirical analysis of word error rate and keyword error rate. In INTER- SPEECH.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Linguistic Inquiry and Word Count", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [ |
| "W" |
| ], |
| "last": "Pennebaker", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [ |
| "E" |
| ], |
| "last": "Francis", |
| "suffix": "" |
| }, |
| { |
| "first": "Roger", |
| "middle": [ |
| "J" |
| ], |
| "last": "Booth", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Lawerence Erlbaum Associates", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James W. Pennebaker, Martha E. Francis, and Roger J. Booth. 2001. Linguistic Inquiry and Word Count. Lawerence Erlbaum Associates, Mahwah, NJ.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Building a motivational interviewing dataset", |
| "authors": [ |
| { |
| "first": "Ver\u00f3nica", |
| "middle": [], |
| "last": "P\u00e9rez-Rosas", |
| "suffix": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Resnicow", |
| "suffix": "" |
| }, |
| { |
| "first": "Satinder", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "An", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Third Workshop on Computational Linguistics and Clinical Psychology", |
| "volume": "", |
| "issue": "", |
| "pages": "42--51", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W16-0305" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ver\u00f3nica P\u00e9rez-Rosas, Rada Mihalcea, Kenneth Resni- cow, Satinder Singh, and Lawrence An. 2016. Build- ing a motivational interviewing dataset. In Proceed- ings of the Third Workshop on Computational Lin- guistics and Clinical Psychology, pages 42-51, San Diego, CA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Predicting counselor behaviors in motivational interviewing encounters", |
| "authors": [ |
| { |
| "first": "Ver\u00f3nica", |
| "middle": [], |
| "last": "P\u00e9rez-Rosas", |
| "suffix": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Resnicow", |
| "suffix": "" |
| }, |
| { |
| "first": "Satinder", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "An", |
| "suffix": "" |
| }, |
| { |
| "first": "Kathy", |
| "middle": [ |
| "J" |
| ], |
| "last": "Goggin", |
| "suffix": "" |
| }, |
| { |
| "first": "Delwyn", |
| "middle": [], |
| "last": "Catley", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1128--1137", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ver\u00f3nica P\u00e9rez-Rosas, Rada Mihalcea, Kenneth Resni- cow, Satinder Singh, Lawrence An, Kathy J. Goggin, and Delwyn Catley. 2017. Predicting counselor be- haviors in motivational interviewing encounters. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Lin- guistics: Volume 1, Long Papers, pages 1128-1137, Valencia, Spain. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "What makes a good counselor? learning to distinguish between high-quality and low-quality counseling conversations", |
| "authors": [ |
| { |
| "first": "Ver\u00f3nica", |
| "middle": [], |
| "last": "P\u00e9rez-Rosas", |
| "suffix": "" |
| }, |
| { |
| "first": "Xinyi", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenneth", |
| "middle": [], |
| "last": "Resnicow", |
| "suffix": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "926--935", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1088" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ver\u00f3nica P\u00e9rez-Rosas, Xinyi Wu, Kenneth Resnicow, and Rada Mihalcea. 2019. What makes a good coun- selor? learning to distinguish between high-quality and low-quality counseling conversations. In Pro- ceedings of the 57th Annual Meeting of the Associa- tion for Computational Linguistics, pages 926-935, Florence, Italy. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "The kaldi speech recognition toolkit", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Povey", |
| "suffix": "" |
| }, |
| { |
| "first": "Arnab", |
| "middle": [], |
| "last": "Ghoshal", |
| "suffix": "" |
| }, |
| { |
| "first": "Gilles", |
| "middle": [], |
| "last": "Boulianne", |
| "suffix": "" |
| }, |
| { |
| "first": "Nagendra", |
| "middle": [], |
| "last": "Goel", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirko", |
| "middle": [], |
| "last": "Hannemann", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanmin", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Petr", |
| "middle": [], |
| "last": "Schwarz", |
| "suffix": "" |
| }, |
| { |
| "first": "Georg", |
| "middle": [], |
| "last": "Stemmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "IEEE 2011 workshop", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Povey, Arnab Ghoshal, Gilles Boulianne, Na- gendra Goel, Mirko Hannemann, Yanmin Qian, Petr Schwarz, and Georg Stemmer. 2011. The kaldi speech recognition toolkit. In In IEEE 2011 work- shop.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Sentencebert: Sentence embeddings using siamese bertnetworks", |
| "authors": [ |
| { |
| "first": "Nils", |
| "middle": [], |
| "last": "Reimers", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- bert: Sentence embeddings using siamese bert- networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Counseling-style reflection generation using generative pretrained transformers with augmented context", |
| "authors": [ |
| { |
| "first": "Siqi", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Charles", |
| "middle": [], |
| "last": "Welch", |
| "suffix": "" |
| }, |
| { |
| "first": "Rada", |
| "middle": [], |
| "last": "Mihalcea", |
| "suffix": "" |
| }, |
| { |
| "first": "Ver\u00f3nica", |
| "middle": [], |
| "last": "P\u00e9rez-Rosas", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 21th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "10--20", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siqi Shen, Charles Welch, Rada Mihalcea, and Ver\u00f3nica P\u00e9rez-Rosas. 2020. Counseling-style re- flection generation using generative pretrained trans- formers with augmented context. In Proceedings of the 21th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 10-20, 1st virtual meeting. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "WER we are and WER we think we are", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Szyma\u0144ski", |
| "suffix": "" |
| }, |
| { |
| "first": "Mikolaj", |
| "middle": [], |
| "last": "Piotr\u017celasko", |
| "suffix": "" |
| }, |
| { |
| "first": "Adrian", |
| "middle": [], |
| "last": "Morzy", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Szymczak", |
| "suffix": "" |
| }, |
| { |
| "first": "Joanna", |
| "middle": [], |
| "last": "Marzena\u017cy\u0142a-Hoppe", |
| "suffix": "" |
| }, |
| { |
| "first": "Lukasz", |
| "middle": [], |
| "last": "Banaszczak", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Augustyniak", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "3290--3295", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.findings-emnlp.295" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Szyma\u0144ski, Piotr\u017belasko, Mikolaj Morzy, Adrian Szymczak, Marzena\u017by\u0142a-Hoppe, Joanna Ba- naszczak, Lukasz Augustyniak, Jan Mizgajski, and Yishay Carmiel. 2020. WER we are and WER we think we are. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 3290-3295, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Gender and dialect bias in YouTube's automatic captions", |
| "authors": [ |
| { |
| "first": "Rachael", |
| "middle": [], |
| "last": "Tatman", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the First ACL Workshop on Ethics in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "53--59", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W17-1606" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rachael Tatman. 2017. Gender and dialect bias in YouTube's automatic captions. In Proceedings of the First ACL Workshop on Ethics in Natural Lan- guage Processing, pages 53-59, Valencia, Spain. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Transformers: State-of-the-art natural language processing", |
| "authors": [ |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Wolf", |
| "suffix": "" |
| }, |
| { |
| "first": "Lysandre", |
| "middle": [], |
| "last": "Debut", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Sanh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Chaumond", |
| "suffix": "" |
| }, |
| { |
| "first": "Clement", |
| "middle": [], |
| "last": "Delangue", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Moi", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierric", |
| "middle": [], |
| "last": "Cistac", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Rault", |
| "suffix": "" |
| }, |
| { |
| "first": "Remi", |
| "middle": [], |
| "last": "Louf", |
| "suffix": "" |
| }, |
| { |
| "first": "Morgan", |
| "middle": [], |
| "last": "Funtowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Davison", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Shleifer", |
| "suffix": "" |
| }, |
| { |
| "first": "Clara", |
| "middle": [], |
| "last": "Patrick Von Platen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yacine", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Julien", |
| "middle": [], |
| "last": "Jernite", |
| "suffix": "" |
| }, |
| { |
| "first": "Canwen", |
| "middle": [], |
| "last": "Plu", |
| "suffix": "" |
| }, |
| { |
| "first": "Teven", |
| "middle": [ |
| "Le" |
| ], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Sylvain", |
| "middle": [], |
| "last": "Scao", |
| "suffix": "" |
| }, |
| { |
| "first": "Mariama", |
| "middle": [], |
| "last": "Gugger", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Drame", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "38--45", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-demos.6" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, Remi Louf, Morgan Funtow- icz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Trans- formers: State-of-the-art natural language process- ing. In Proceedings of the 2020 Conference on Em- pirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "rate my therapist\": Automated detection of empathy in drug and alcohol counseling via speech and language processing", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Xiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Zac", |
| "middle": [ |
| "E" |
| ], |
| "last": "Imel", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Georgiou", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "C" |
| ], |
| "last": "Atkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Shrikanth", |
| "middle": [ |
| "S" |
| ], |
| "last": "Narayanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "PLoS ONE", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "B. Xiao, Zac E. Imel, P. Georgiou, David C. Atkins, and Shrikanth S. Narayanan. 2015. \"rate my thera- pist\": Automated detection of empathy in drug and alcohol counseling via speech and language process- ing. PLoS ONE, 10.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Balancing objectives in counseling conversations: Advancing forwards or looking backwards", |
| "authors": [ |
| { |
| "first": "Justine", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristian", |
| "middle": [], |
| "last": "Danescu-Niculescu-Mizil", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "5276--5289", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-main.470" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Justine Zhang and Cristian Danescu-Niculescu-Mizil. 2020a. Balancing objectives in counseling conversa- tions: Advancing forwards or looking backwards. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 5276- 5289, Online. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Balancing objectives in counseling conversations: Advancing forwards or looking backwards", |
| "authors": [ |
| { |
| "first": "Justine", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristian", |
| "middle": [], |
| "last": "Danescu-Niculescu-Mizil", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Justine Zhang and Cristian Danescu-Niculescu-Mizil. 2020b. Balancing objectives in counseling conver- sations: Advancing forwards or looking backwards. In Proceedings of ACL.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF1": { |
| "text": "", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>: Session statistics</td></tr><tr><td>3 Dataset</td></tr><tr><td>3.1 Data Source</td></tr><tr><td>We evaluate utterances and behavioral codes from</td></tr><tr><td>213 counseling sessions compiled by P\u00e9rez-Rosas</td></tr><tr><td>et al. (2016). The sessions were originally drawn</td></tr><tr><td>from various sources, including two studies on</td></tr><tr><td>smoking cessation and medication adherence. The</td></tr><tr><td>full set comprises a total of 97.8 hours of audio</td></tr><tr><td>with average session duration of 20.8 minutes. All</td></tr><tr><td>the sessions were manually anonymized to remove</td></tr><tr><td>identifiable information such as counselor and pa-</td></tr><tr><td>tient names and references to counseling sites' lo-</td></tr><tr><td>cation. The sessions were transcribed using manual</td></tr><tr><td>and crowd-sourced methods. The transcription set</td></tr><tr><td>consist of 707,165 words distributed across 52,658</td></tr><tr><td>utterances and 39,637 talk-turns. More detailed</td></tr><tr><td>statistics on words and utterances per session are</td></tr><tr><td>provided in</td></tr></table>" |
| }, |
| "TABREF2": { |
| "text": "", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>list the different behavioral codes, their count and</td></tr><tr><td>their average word length.</td></tr></table>" |
| }, |
| "TABREF3": { |
| "text": "Statistics for MITI behaviors coded in the dataset", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF5": { |
| "text": "WER and Semantic Distance statistics by speaker role and gender for manual and automatic transcriptions. Plus and minus values denote standard deviation.", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>Code</td><td>WER</td><td>Semantic Distance</td></tr><tr><td>AF</td><td>0.36\u00b10.23</td><td>0.18\u00b10.16</td></tr><tr><td>AUTO</td><td>0.34\u00b10.29</td><td>0.18\u00b10.17</td></tr><tr><td>CON</td><td>0.38\u00b10.40</td><td>0.13\u00b10.12</td></tr><tr><td>CR</td><td>0.32\u00b10.14</td><td>0.18\u00b10.16</td></tr><tr><td>NGI</td><td>0.33\u00b10.27</td><td>0.16\u00b10.15</td></tr><tr><td>NPWP</td><td>0.35\u00b10.57</td><td>0.17\u00b10.16</td></tr><tr><td>PWOP</td><td>0.29\u00b10.14</td><td>0.15\u00b10.14</td></tr><tr><td colspan=\"2\">QUEST 0.31\u00b10.19</td><td>0.18\u00b10.17</td></tr><tr><td>SEEK</td><td>0.32\u00b10.43</td><td>0.17\u00b10.15</td></tr><tr><td>SR</td><td>0.36\u00b10.19</td><td>0.20\u00b10.18</td></tr><tr><td>NAT</td><td>0.48\u00b10.20</td><td>0.37\u00b10.26</td></tr><tr><td>NAC</td><td>0.40\u00b10.16</td><td>0.30\u00b10.10</td></tr></table>" |
| }, |
| "TABREF6": { |
| "text": "WER and Semantic Distance statistics for ten MITI codes and non-annotated utterances in the dataset by therapists (NAT) and clients (NAC). Plus and minus values denote standard deviation.", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF8": { |
| "text": "", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF10": { |
| "text": "Classification results for behavioral coding in MI sessions. AF, CON, NPWP, AUTO are not reported as their F-scores are zero", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>% of Manual Data</td><td>Acc.</td><td>QUEST</td><td>CR</td><td>SR</td><td colspan=\"2\">F-score NAT NAC</td><td>SEEK</td><td>NGI</td><td>PWOP</td></tr><tr><td>0%</td><td>0.5520</td><td>0.4529</td><td colspan=\"7\">0.3642 0.0010 0.2587 0.7587 0.0815 0.3789 0.0127</td></tr><tr><td>20%</td><td>0.6173</td><td>0.5820</td><td colspan=\"6\">0.5053 0.0076 0.4360 0.8132 0.1821 0.4865</td><td>0.011</td></tr><tr><td>40%</td><td>0.6734</td><td>0.5988</td><td colspan=\"7\">0.5225 0.0241 0.6397 0.8601 0.2981 0.4943 0.0276</td></tr><tr><td>60%</td><td>0.6827</td><td>0.5966</td><td colspan=\"7\">0.5298 0.0336 0.6700 0.8678 0.2314 0.4996 0.0021</td></tr><tr><td>80%</td><td>0.6914</td><td>0.6061</td><td colspan=\"7\">0.5340 0.0810 0.6866 0.8726 0.3073 0.5119 0.0534</td></tr><tr><td>100%</td><td>0.6940</td><td>0.6071</td><td colspan=\"7\">0.5334 0.0794 0.6919 0.8758 0.3058 0.5186 0.0048</td></tr><tr><td colspan=\"2\">Majority Class Classifier 0.4321</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.6034</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td></tr></table>" |
| }, |
| "TABREF11": { |
| "text": "Classification results for behavioral coding for incremental fraction of manual transcripts in training set. The majority class classifier outputs the majority label in the training dataset for each instance", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td/><td>0.8</td><td/><td/><td/><td/><td/></tr><tr><td>Accuracy</td><td>0.7</td><td/><td/><td/><td/><td/></tr><tr><td/><td>0.6</td><td/><td/><td/><td/><td/></tr><tr><td/><td>0.5</td><td>0</td><td>20</td><td>40</td><td>60</td><td>80</td><td>100</td></tr><tr><td/><td/><td/><td colspan=\"4\">Percentage of manual data</td></tr></table>" |
| }, |
| "TABREF12": { |
| "text": "are averaged over the re-", |
| "type_str": "table", |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td/><td colspan=\"2\">Accuracy Macro F1</td></tr><tr><td>No Context</td><td>0.5645</td><td>0.2085</td></tr><tr><td>Context = 1</td><td>0.5762</td><td>0.2297</td></tr><tr><td>Context = 2</td><td>0.5772</td><td>0.2290</td></tr></table>" |
| } |
| } |
| } |
| } |