| { |
| "paper_id": "U18-1007", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:12:06.773710Z" |
| }, |
| "title": "Exploring Textual and Speech information in Dialogue Act Classification with Speaker Domain Adaptation", |
| "authors": [ |
| { |
| "first": "Xuanli", |
| "middle": [], |
| "last": "He", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "xuanli.he1@monash.edu" |
| }, |
| { |
| "first": "Hung", |
| "middle": [], |
| "last": "Quan", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "hung.tran@monash.edu" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tran", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Havard", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "william.havard@gmail.com" |
| }, |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Besacier", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "laurent.besacier@imag.fr" |
| }, |
| { |
| "first": "Ingrid", |
| "middle": [], |
| "last": "Zukerman", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "ingrid.zukerman@monash.edu" |
| }, |
| { |
| "first": "Gholamreza", |
| "middle": [], |
| "last": "Haffari", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "gholamreza.haffari@monash.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In spite of the recent success of Dialogue Act (DA) classification, the majority of prior works focus on text-based classification with oracle transcriptions, i.e. human transcriptions, instead of Automatic Speech Recognition (ASR)'s transcriptions. Moreover, the performance of this classification task, because of speaker domain shift, may deteriorate. In this paper, we explore the effectiveness of using both acoustic and textual signals, either oracle or ASR transcriptions, and investigate speaker domain adaptation for DA classification. Our multimodal model proves to be superior to the unimodal models, particularly when the oracle transcriptions are not available. We also propose an effective method for speaker domain adaptation, which achieves competitive results.", |
| "pdf_parse": { |
| "paper_id": "U18-1007", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In spite of the recent success of Dialogue Act (DA) classification, the majority of prior works focus on text-based classification with oracle transcriptions, i.e. human transcriptions, instead of Automatic Speech Recognition (ASR)'s transcriptions. Moreover, the performance of this classification task, because of speaker domain shift, may deteriorate. In this paper, we explore the effectiveness of using both acoustic and textual signals, either oracle or ASR transcriptions, and investigate speaker domain adaptation for DA classification. Our multimodal model proves to be superior to the unimodal models, particularly when the oracle transcriptions are not available. We also propose an effective method for speaker domain adaptation, which achieves competitive results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Dialogue Act (DA) classification is a sequencelabelling task, mapping a sequence of utterances to their corresponding DAs. Since DA classification plays an important role in understanding spontaneous dialogue (Stolcke et al., 2000) , numerous techniques have been proposed to capture the semantic correlation between utterances and DAs.", |
| "cite_spans": [ |
| { |
| "start": 209, |
| "end": 231, |
| "text": "(Stolcke et al., 2000)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Earlier on, statistical techniques such as Hidden Markov Models (HMMs) were widely used to recognise DAs (Stolcke et al., 2000; Julia et al., 2010) . Recently, due to the enormous success of neural networks in sequence labeling/transduction tasks (Sutskever et al., 2014; Bahdanau et al., 2014; Popov, 2016) , several recurrent neural network (RNN) based architectures have been proposed to conduct DA classification, resulting in * Equal contribution promising outcomes (Ji et al., 2016; Shen and Lee, 2016; Tran et al., 2017a) .", |
| "cite_spans": [ |
| { |
| "start": 105, |
| "end": 127, |
| "text": "(Stolcke et al., 2000;", |
| "ref_id": null |
| }, |
| { |
| "start": 128, |
| "end": 147, |
| "text": "Julia et al., 2010)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 247, |
| "end": 271, |
| "text": "(Sutskever et al., 2014;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 272, |
| "end": 294, |
| "text": "Bahdanau et al., 2014;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 295, |
| "end": 307, |
| "text": "Popov, 2016)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 471, |
| "end": 488, |
| "text": "(Ji et al., 2016;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 489, |
| "end": 508, |
| "text": "Shen and Lee, 2016;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 509, |
| "end": 528, |
| "text": "Tran et al., 2017a)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Despite the success of previous work in DA classification, there are still several fundamental issues. Firstly, most of the previous works rely on transcriptions (Ji et al., 2016; Shen and Lee, 2016; Tran et al., 2017a) . Fewer of these focus on combining speech and textual signals (Julia et al., 2010) , and even then, the textual signals in these works utilise the oracle transcriptions. We argue that in the context of a spoken dialog system, oracle transcriptions of utterances are usually not available, i.e. the agent does not have access to the human transcriptions. Speech and textual data complement each other, especially when textual data is from ASR systems rather than oracle transcripts. Furthermore, domain adaptation in text or speech-based DA classification is relatively underinvestigated. As shown in our experiments, DA classification models perform much worse when they are applied to new speakers.", |
| "cite_spans": [ |
| { |
| "start": 162, |
| "end": 179, |
| "text": "(Ji et al., 2016;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 180, |
| "end": 199, |
| "text": "Shen and Lee, 2016;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 200, |
| "end": 219, |
| "text": "Tran et al., 2017a)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 283, |
| "end": 303, |
| "text": "(Julia et al., 2010)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we explore the effectiveness of using both acoustic and textual signals, and investigate speaker domain adaptation for DA classification. We present a multimodal model to combine text and speech signals, which proves to be superior to the unimodal models, particularly when the oracle transcriptions are not available. Moreover, we propose an effective unsupervised method for speaker domain adaptation, which learns a suitable encoder for the new domain giving rise to representations similar to those in the source domain.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this section, we describe the basic structure of our model, which combines the textual and speech modalities. We also introduce a representation learning approach using adversarial ideas to tackle the domain adaptation problem. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model Description", |
| "sec_num": "2" |
| }, |
| { |
| "text": "A conversation is comprised of a sequence of utterances u 1 , ..., u T , and each utterance u t is labeled with a DA a t . An utterance could include text, speech or both. We focus on online DA classification, and our classification model attempts to directly model the conditional probability p(a 1:T |u 1:T ) decomposed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Multimodal Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "p(a 1:T |u 1:T ) = T t=1 p(a t |a t\u22121 , u t ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Multimodal Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "(1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Multimodal Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "According to Eqn. 1, during the training time the previous label is from the ground-truth data, while this information comes from the model during the inference stage. This discrepancy, referred as label bias, can result in error accumulation. To incorporate the previous DA information and mitigate the label-bias problem, we adopt the uncertainty propagation architecture (Tran et al., 2017b). The conditional probability term in Eqn. 1 is computed as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Multimodal Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "a t |a t\u22121 , u t \u223c q t q t = sof tmax(W \u2022 c(u t ) + b) W = a q t\u22121 (a)W a , b = a q t\u22121 (a)b a", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Multimodal Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where W a and b a are DA-specific parameters gated on the DA a, c(u t ) is the encoding of the utterance u t , and q t\u22121 represents the uncertainty distribution over the DAs at the time step t \u2212 1. Text Utterance. An utterance u t includes a list of words w 1 t , ..., w n t . The word w i t is embedded by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Multimodal Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "x i t = e(w i t )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Multimodal Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where e is an embedding table. Speech Utterance. We apply a frequency-based transformation on raw speech signals to acquire Mel-frequency cepstral coefficients (MFCCs), which have been very effective in speech recognition (Mohamed et al., 2012) . To learn the contextspecific features of the speech signal, a convolutional neural network (CNN) is employed over MFCCs:", |
| "cite_spans": [ |
| { |
| "start": 222, |
| "end": 244, |
| "text": "(Mohamed et al., 2012)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Multimodal Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "x 1 t , ..., x m t = CN N (s 1 t , ..., s k t )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Multimodal Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where s i t is a MFCC feature vector at the position i for the t-th utterance. Encoding of Text+Speech. As illustrated in Figure 1, we employ two RNNs with LSTM units to encode the text and speech sequences of an utterance u t :", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 122, |
| "end": 128, |
| "text": "Figure", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Our Multimodal Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "c(u t ) tx = RN N \u03b8 \u03b8 \u03b8 (x 1 t , ..., x n t ) c(u t ) sp = RN N \u03b8 \u03b8 \u03b8 (x 1 t , ..., x m t ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Multimodal Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "where the encoding of the text c(u t ) tx and speech c(u t ) sp are the last hidden states of the corresponding RNNs whose parameters are denoted by \u03b8 \u03b8 \u03b8 and \u03b8 \u03b8 \u03b8 . The distributed representation c(u t ) of the utterance u t is then the concatenation of c(u t ) tx and c(u t ) sp .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Our Multimodal Model", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Different people tend to speak differently. This creates a problem for DA classification systems, as unfamiliar speech signals might not be recognised properly. In our preliminary experiments, the performance of DA classification on speakers that are unseen in the training set suffers from dramatic performance degradation over test set. This motivates us to explore the problem of speaker domain adaptation in DA classification.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Speaker Domain Adaptation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We assume we have a large amount of labelled source data pair {X src , Y src }, and a small amount Figure 2 : Overview of discriminative model. Dashed lines indicate frozen parts of unlabelled target data X trg , where an utterance u \u2208 X includes both speech and text parts. Inspired by Tzeng et al. (2017) , our goal is to learn a target domain encoder which can fool a domain classifier C \u03c6 in distinguishing whether the utterance belongs to the source or target domain. Once the target encoder is trained to produce representations which look like those coming from the source domain, the target encoder can be used together with other components of the source DA prediction model to predict DAs for the target domain (see Figure 2) .", |
| "cite_spans": [ |
| { |
| "start": 287, |
| "end": 306, |
| "text": "Tzeng et al. (2017)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 99, |
| "end": 107, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 726, |
| "end": 735, |
| "text": "Figure 2)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Speaker Domain Adaptation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We use a 1-layer feed-forward network as the domain classifier:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Speaker Domain Adaptation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "C \u03c6 \u03c6 \u03c6 (r) = \u03c3(W C \u2022 r + b C )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Speaker Domain Adaptation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where the classifier produces the probability of the input representation r belonging to the source domain, and \u03c6 \u03c6 \u03c6 denotes the classifier parameters {W C , b C }. Let the target and source domain encoders are denoted by c trg (u trg ) and c src (u trg ), respectively. The training objective of the domain classifier is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Speaker Domain Adaptation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "min \u03c6 \u03c6 \u03c6 L 1 (X src , X trg , C \u03c6 \u03c6 \u03c6 ) = \u2212 E u\u223cXsrc [log C \u03c6 \u03c6 \u03c6 (c src (u))] \u2212 E u\u223cXtrg [1 \u2212 log C \u03c6 \u03c6 \u03c6 (c trg (u))].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Speaker Domain Adaptation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "As mentioned before, we keep the source encoder fixed and train the parameters of the target domain encoder. The training objective of the target domain encoder is", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Speaker Domain Adaptation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "min \u03b8 \u03b8 \u03b8 trg L 2 (X trg , C \u03c6 \u03c6 \u03c6 ) = \u2212 E u\u223cXtrg [log C \u03c6 \u03c6 \u03c6 (c trg (u))]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Speaker Domain Adaptation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "where the optimisation is performed over the speech RNN parameters \u03b8 \u03b8 \u03b8 trg of the target encoder. We also tried to optimise other parameters (i.e. CNN parameters, word embeddings and text RNN parameters), but the performance is similar to the speech RNN only. This is possibly because the major difference between source and target domain data is due to the speech signals. We alternate between optimising L 1 and L 2 by using Adam (Kingma and Ba, 2014) until a training condition is met.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Speaker Domain Adaptation", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We test our models on two datasets: the MapTask Dialog Act data (Anderson et al., 1991) and the Switchboard Dialogue Act data (Jurafsky et al., 1997) .", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 87, |
| "text": "(Anderson et al., 1991)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 126, |
| "end": 149, |
| "text": "(Jurafsky et al., 1997)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "MapTask dataset This dataset consist of 128 conversations labelled with 13 DAs. We randomly partition this data into 80% training, 10% development and 10% test sets, having 103, 12 and 13 conversations respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Switchboard dataset There are 1155 transcriptions of telephone conversations in this dataset, and each utterance falls into one of 42 DAs. We follow the setup proposed by Stolcke et al. (2000) : 1115 conversations for training, 21 for development and 19 for testing. Since we do not have access to the original recordings of Switchboard dataset, we use synthetic speeches generated by a text-to-speech (TTS) system from the oracle transcriptions.", |
| "cite_spans": [ |
| { |
| "start": 171, |
| "end": 192, |
| "text": "Stolcke et al. (2000)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In-Domain Evaluation. Unlike most prior work (Ji et al., 2016; Shen and Lee, 2016; Tran et al., 2017a) , we use ASR transcripts, produced by the CMUSphinx ASR system, rather than the oracle text. We argue that most dialogues in the real world are in the speech format, thus our setup is closer to the real-life scenario.", |
| "cite_spans": [ |
| { |
| "start": 45, |
| "end": 62, |
| "text": "(Ji et al., 2016;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 63, |
| "end": 82, |
| "text": "Shen and Lee, 2016;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 83, |
| "end": 102, |
| "text": "Tran et al., 2017a)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "As shown in Tables 1 and 2, our multimodal model outperforms strong baselines on Switchboard and MapTask datasets, when using the ASR transcriptions. When using the oracle text, the in-formation from the speech signal does not lead to further improvement though, possibly due to the existence of acoustic features (such as tones, question markers etc) in the high quality transcriptions. On MapTask, there is a large gap between oraclebased and ASR-based models. This degradation is mainly caused by the poor quality acoustic signals in MapTask, making ASR ineffective compared to directly predicting DAs from the speech signal.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Oracle text Stolcke et al. (2000) 71.00% Shen and Lee 2016 Out-of-Domain Evaluation. We evaluate our domain adaptation model on the out of domain data on Switchboard. Our training data comprises of five known speakers, whereas development and test sets include data from three new speakers. The speeches for these 8 speakers are generated by a TTS system.", |
| "cite_spans": [ |
| { |
| "start": 12, |
| "end": 33, |
| "text": "Stolcke et al. (2000)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models Accuracy", |
| "sec_num": null |
| }, |
| { |
| "text": "As described in Section 2.2, we pre-train our speech models on the labeled training data from the 5 known speakers, then train speech encoders for the new speakers using speeches from both known and new speakers. During domain adaptation, the five known speakers are marked as the source domain, while the three new speakers are treated as the target domains. For domain adaptation with unlabelled data, the DA tags of both the source and target domains are removed. We test the source-only model and the domain adaptation models merely on the three new speakers in test data. As shown in Table 3 , compared with the source-only model, the domain adaptation strategy improves the performance of speechonly and text+speech models, consistently and substantially.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 589, |
| "end": 596, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Models Accuracy", |
| "sec_num": null |
| }, |
| { |
| "text": "Methods Speech Text+Speech Unadapted 48.73%", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models Accuracy", |
| "sec_num": null |
| }, |
| { |
| "text": "63.57% Domain Adapted 54.37% 67.21% Supervised Learning 56.19 % 68.04% Table 3 : Experimental results of the unadapted (i.e. source-only) and domain adapted models using unlabeled data on Switchboard, as well as the supervised learning upperbound.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 71, |
| "end": 78, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Models Accuracy", |
| "sec_num": null |
| }, |
| { |
| "text": "To assess the effectiveness of our domain adaptation architecture, we compare it with the supervised learning scenario where the model has access to labeled data from all speakers during training. To do this, we randomly add two thirds of labelled development data of new speakers to the training set, and apply the trained model to the test set. The supervised learning scenario is an upperbound to our domain adaptation approach, as it makes use of labeled data; see the results in the last row of Table 3 . However, the gap between supervised learning and domain adaptation is not big compared to that between the adapted and unadapted models, showing that our domain adaption technique has been effective.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 500, |
| "end": 507, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Models Accuracy", |
| "sec_num": null |
| }, |
| { |
| "text": "In this paper, we have proposed a multimodal model to combine textual and acoustic signals for DA prediction. We have demonstrated that the our model exceeds unimodal models, especially when oracle transcriptions do not exist. In addition, we have proposed an effective domain adaptation technique in order to adapt our multimodal DA prediction model to new speakers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "4" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "The hcrc map task corpus", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Anne", |
| "suffix": "" |
| }, |
| { |
| "first": "Miles", |
| "middle": [], |
| "last": "Anderson", |
| "suffix": "" |
| }, |
| { |
| "first": "Ellen", |
| "middle": [ |
| "Gurman" |
| ], |
| "last": "Bader", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Bard", |
| "suffix": "" |
| }, |
| { |
| "first": "Gwyneth", |
| "middle": [], |
| "last": "Boyle", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Doherty", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Garrod", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacqueline", |
| "middle": [], |
| "last": "Isard", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Kowtko", |
| "suffix": "" |
| }, |
| { |
| "first": "Jim", |
| "middle": [], |
| "last": "Mcallister", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 1991, |
| "venue": "Language and speech", |
| "volume": "34", |
| "issue": "4", |
| "pages": "351--366", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anne H Anderson, Miles Bader, Ellen Gurman Bard, Elizabeth Boyle, Gwyneth Doherty, Simon Garrod, Stephen Isard, Jacqueline Kowtko, Jan McAllister, Jim Miller, et al. 1991. The hcrc map task corpus. Language and speech 34(4):351-366.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2014. Neural machine translation by jointly learning to align and translate. CoRR abs/1409.0473.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Switchboard: Telephone speech corpus for research and development", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "John", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Godfrey", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Edward", |
| "suffix": "" |
| }, |
| { |
| "first": "Jane", |
| "middle": [], |
| "last": "Holliman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mc-Daniel", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "Acoustics, Speech, and Signal Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "517--520", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John J Godfrey, Edward C Holliman, and Jane Mc- Daniel. 1992. Switchboard: Telephone speech cor- pus for research and development. In Acoustics, Speech, and Signal Processing, 1992. ICASSP-92., 1992 IEEE International Conference on. IEEE, vol- ume 1, pages 517-520.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "A latent variable recurrent neural network for discourse relation language models", |
| "authors": [ |
| { |
| "first": "Yangfeng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Gholamreza", |
| "middle": [], |
| "last": "Haffari", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yangfeng Ji, Gholamreza Haffari, and Jacob Eisen- stein. 2016. A latent variable recurrent neural net- work for discourse relation language models. CoRR abs/1603.01913.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Dialog act classification using acoustic and discourse information of maptask data", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Fatema", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Julia", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Khan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Iftekharuddin", |
| "suffix": "" |
| }, |
| { |
| "first": "U", |
| "middle": [], |
| "last": "Atiq", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Islam", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "International Journal of Computational Intelligence and Applications", |
| "volume": "09", |
| "issue": "04", |
| "pages": "289--311", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fatema N. Julia, Khan M. Iftekharuddin, and Atiq U. Islam. 2010. Dialog act classification using acoustic and discourse information of maptask data. Inter- national Journal of Computational Intelligence and Applications 09(04):289-311.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Switchboard SWBD-DAMSL shallow-discourse-function annotation coders manual", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Shriberg", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Biasca", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. Jurafsky, E. Shriberg, and D. Biasca. 1997. Switch- board SWBD-DAMSL shallow-discourse-function annotation coders manual. Technical Report Draft 13, University of Colorado, Institute of Cognitive Science.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. CoRR abs/1412.6980. http://arxiv.org/abs/1412.6980.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Understanding how deep belief networks perform acoustic modelling", |
| "authors": [ |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Abdel-Rahman Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerald", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Penn", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Acoustics, Speech and Signal Processing (ICASSP), 2012 IEEE International Conference on. IEEE", |
| "volume": "", |
| "issue": "", |
| "pages": "4273--4276", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abdel-rahman Mohamed, Geoffrey Hinton, and Ger- ald Penn. 2012. Understanding how deep belief net- works perform acoustic modelling. In Acoustics, Speech and Signal Processing (ICASSP), 2012 IEEE International Conference on. IEEE, pages 4273- 4276.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Sequence-to-sequence rnns for text summarization", |
| "authors": [ |
| { |
| "first": "Ramesh", |
| "middle": [], |
| "last": "Nallapati", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Xiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Bowen", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ramesh Nallapati, Bing Xiang, and Bowen Zhou. 2016. Sequence-to-sequence rnns for text summa- rization. CoRR abs/1602.06023.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Deep Learning Architecture for Part-of-Speech Tagging with Word and Suffix Embeddings", |
| "authors": [ |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Popov", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "68--77", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexander Popov. 2016. Deep Learning Architec- ture for Part-of-Speech Tagging with Word and Suf- fix Embeddings, Springer International Publishing, Cham, pages 68-77.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Neural attention models for sequence classification: Analysis and application to key term extraction and dialogue act detection", |
| "authors": [ |
| { |
| "first": "Hung-Yi", |
| "middle": [], |
| "last": "Sheng-Syun Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sheng-syun Shen and Hung-yi Lee. 2016. Neural at- tention models for sequence classification: Analysis and application to key term extraction and dialogue act detection. CoRR abs/1604.00077.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Dialogue act modeling for automatic tagging and recognition of conversational speech", |
| "authors": [ |
| { |
| "first": "Marie", |
| "middle": [], |
| "last": "Meteer", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marie Meteer. 2000. Dialogue act modeling for au- tomatic tagging and recognition of conversational speech. CoRR cs.CL/0006023.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural net- works. CoRR abs/1409.3215.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A hierarchical neural model for learning sequences of dialogue acts", |
| "authors": [ |
| { |
| "first": "Ingrid", |
| "middle": [], |
| "last": "Quan Hung Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "Gholamreza", |
| "middle": [], |
| "last": "Zukerman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Haffari", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "428--437", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Quan Hung Tran, Ingrid Zukerman, and Gholamreza Haffari. 2017a. A hierarchical neural model for learning sequences of dialogue acts. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Vol- ume 1, Long Papers. volume 1, pages 428-437.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Preserving distributional information in dialogue act classification", |
| "authors": [ |
| { |
| "first": "Ingrid", |
| "middle": [], |
| "last": "Quan Hung Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "Gholamreza", |
| "middle": [], |
| "last": "Zukerman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Haffari", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2141--2146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Quan Hung Tran, Ingrid Zukerman, and Gholamreza Haffari. 2017b. Preserving distributional informa- tion in dialogue act classification. In Proceedings of the 2017 Conference on Empirical Methods in Nat- ural Language Processing. pages 2141-2146.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Adversarial discriminative domain adaptation", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Tzeng", |
| "suffix": "" |
| }, |
| { |
| "first": "Judy", |
| "middle": [], |
| "last": "Hoffman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kate", |
| "middle": [], |
| "last": "Saenko", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Darrell", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. 2017. Adversarial discriminative domain adaptation. CoRR abs/1702.05464.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "uris": null, |
| "type_str": "figure", |
| "text": "The multimodal model. For the utterance t, the left and right sides are encoded speech and text, respectively." |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "content": "<table><tr><td>Models</td><td>Accuracy</td></tr><tr><td>Oracle text</td><td/></tr><tr><td>Julia et al. (2010)</td><td>55.40%</td></tr><tr><td>Tran et al. (2017a)</td><td>61.60%</td></tr><tr><td>Text only (ours)</td><td>61.73%</td></tr><tr><td>Text+Speech (ours)</td><td>61.67%</td></tr><tr><td>Speech and ASR</td><td/></tr><tr><td>Speech only</td><td>39.32%</td></tr><tr><td>Text only (ASR)</td><td>38.10%</td></tr><tr><td colspan=\"2\">Text+Speech (ASR) 39.39%</td></tr></table>", |
| "num": null, |
| "text": "Results of different models on Switchboard data.", |
| "html": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "content": "<table/>", |
| "num": null, |
| "text": "Results of different models on MapTask data.", |
| "html": null |
| } |
| } |
| } |
| } |