| { |
| "paper_id": "P19-1004", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:30:49.206576Z" |
| }, |
| "title": "Do Neural Dialog Systems Use the Conversation History Effectively? An Empirical Study", |
| "authors": [ |
| { |
| "first": "Chinnadhurai", |
| "middle": [], |
| "last": "Sankar", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universit\u00e9 de Montr\u00e9al 3\u00c9 cole Polytechnique de Montr\u00e9al 4 Google Research", |
| "location": { |
| "addrLine": "Brain Team 5 Element AI", |
| "settlement": "Montr\u00e9al" |
| } |
| }, |
| "email": "chinnadhurai@gmail.com" |
| }, |
| { |
| "first": "Sandeep", |
| "middle": [], |
| "last": "Subramanian", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universit\u00e9 de Montr\u00e9al 3\u00c9 cole Polytechnique de Montr\u00e9al 4 Google Research", |
| "location": { |
| "addrLine": "Brain Team 5 Element AI", |
| "settlement": "Montr\u00e9al" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Pal", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universit\u00e9 de Montr\u00e9al 3\u00c9 cole Polytechnique de Montr\u00e9al 4 Google Research", |
| "location": { |
| "addrLine": "Brain Team 5 Element AI", |
| "settlement": "Montr\u00e9al" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Sarath", |
| "middle": [], |
| "last": "Chandar", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universit\u00e9 de Montr\u00e9al 3\u00c9 cole Polytechnique de Montr\u00e9al 4 Google Research", |
| "location": { |
| "addrLine": "Brain Team 5 Element AI", |
| "settlement": "Montr\u00e9al" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universit\u00e9 de Montr\u00e9al 3\u00c9 cole Polytechnique de Montr\u00e9al 4 Google Research", |
| "location": { |
| "addrLine": "Brain Team 5 Element AI", |
| "settlement": "Montr\u00e9al" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Neural generative models have been become increasingly popular when building conversational agents. They offer flexibility, can be easily adapted to new domains, and require minimal domain engineering. A common criticism of these systems is that they seldom understand or use the available dialog history effectively. In this paper, we take an empirical approach to understanding how these models use the available dialog history by studying the sensitivity of the models to artificially introduced unnatural changes or perturbations to their context at test time. We experiment with 10 different types of perturbations on 4 multi-turn dialog datasets and find that commonly used neural dialog architectures like recurrent and transformer-based seq2seq models are rarely sensitive to most perturbations such as missing or reordering utterances, shuffling words, etc. Also, by open-sourcing our code, we believe that it will serve as a useful diagnostic tool for evaluating dialog systems in the future 1 .", |
| "pdf_parse": { |
| "paper_id": "P19-1004", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Neural generative models have been become increasingly popular when building conversational agents. They offer flexibility, can be easily adapted to new domains, and require minimal domain engineering. A common criticism of these systems is that they seldom understand or use the available dialog history effectively. In this paper, we take an empirical approach to understanding how these models use the available dialog history by studying the sensitivity of the models to artificially introduced unnatural changes or perturbations to their context at test time. We experiment with 10 different types of perturbations on 4 multi-turn dialog datasets and find that commonly used neural dialog architectures like recurrent and transformer-based seq2seq models are rarely sensitive to most perturbations such as missing or reordering utterances, shuffling words, etc. Also, by open-sourcing our code, we believe that it will serve as a useful diagnostic tool for evaluating dialog systems in the future 1 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "With recent advancements in generative models of text (Wu et al., 2016; Vaswani et al., 2017; Radford et al., 2018) , neural approaches to building chit-chat and goal-oriented conversational agents (Sordoni et al., 2015; Vinyals and Le, 2015; Serban et al., 2016; Bordes and Weston, 2016; Serban et al., 2017b) has gained popularity with the hope that advancements in tasks like machine translation (Bahdanau et al., 2015) , abstractive summarization (See et al., 2017) should translate to dialog systems as well. While these models have demonstrated the ability to generate fluent responses, they still lack the ability to \"understand\" and process the dialog history to produce coherent and interesting responses. They often produce boring and repetitive responses like \"Thank you.\" (Li et al., 2015; Serban et al., 2017a) or meander away from the topic of conversation. This has been often attributed to the manner and extent to which these models use the dialog history when generating responses. However, there has been little empirical investigation to validate these speculations.", |
| "cite_spans": [ |
| { |
| "start": 54, |
| "end": 71, |
| "text": "(Wu et al., 2016;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 72, |
| "end": 93, |
| "text": "Vaswani et al., 2017;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 94, |
| "end": 115, |
| "text": "Radford et al., 2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 198, |
| "end": 220, |
| "text": "(Sordoni et al., 2015;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 221, |
| "end": 242, |
| "text": "Vinyals and Le, 2015;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 243, |
| "end": 263, |
| "text": "Serban et al., 2016;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 264, |
| "end": 288, |
| "text": "Bordes and Weston, 2016;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 289, |
| "end": 310, |
| "text": "Serban et al., 2017b)", |
| "ref_id": null |
| }, |
| { |
| "start": 399, |
| "end": 422, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 451, |
| "end": 469, |
| "text": "(See et al., 2017)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 784, |
| "end": 801, |
| "text": "(Li et al., 2015;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 802, |
| "end": 823, |
| "text": "Serban et al., 2017a)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this work, we take a step in that direction and confirm some of these speculations, showing that models do not make use of a lot of the information available to it, by subjecting the dialog history to a variety of synthetic perturbations. We then empirically observe how recurrent (Sutskever et al., 2014) and transformer-based (Vaswani et al., 2017) sequence-to-sequence (seq2seq) models respond to these changes. The central premise of this work is that models make minimal use of certain types of information if they are insensitive to perturbations that destroy them. Worryingly, we find that 1) both recurrent and transformer-based seq2seq models are insensitive to most kinds of perturbations considered in this work 2) both are particularly insensitive even to extreme perturbations such as randomly shuffling or reversing words within every utterance in the conversation history (see Table 1 ) and 3) recurrent models are more sensitive to the ordering of utterances within the dialog history, suggesting that they could be modeling conversation dynamics better than transformers.", |
| "cite_spans": [ |
| { |
| "start": 284, |
| "end": 308, |
| "text": "(Sutskever et al., 2014)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 331, |
| "end": 353, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 895, |
| "end": 902, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Since this work aims at investigating and gaining an understanding of the kinds of information a generative neural response model learns to use, the most relevant pieces of work are where sim- (Belinkov and Bisk, 2017) . Understanding what information is learned or contained in the representations of neural networks has also been studied by \"probing\" them with linear or deep models (Adi et al., 2016; Subramanian et al., 2018; Conneau et al., 2018) . Several works have recently pointed out the presence of annotation artifacts in common text and multi-modal benchmarks. For example, Gururangan et al. (2018) demonstrate that hypothesisonly baselines for natural language inference obtain results significantly better than random guessing. Kaushik and Lipton (2018) report that reading comprehension systems can often ignore the entire question or use only the last sentence of a document to answer questions. Anand et al. (2018) show that an agent that does not navigate or even see the world around it can answer questions about it as well as one that does. These pieces of work suggest that while neural methods have the potential to learn the task specified, its design could lead them to do so in a manner that doesn't use all of the available information within the task.", |
| "cite_spans": [ |
| { |
| "start": 193, |
| "end": 218, |
| "text": "(Belinkov and Bisk, 2017)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 385, |
| "end": 403, |
| "text": "(Adi et al., 2016;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 404, |
| "end": 429, |
| "text": "Subramanian et al., 2018;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 430, |
| "end": 451, |
| "text": "Conneau et al., 2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 743, |
| "end": 768, |
| "text": "Kaushik and Lipton (2018)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 913, |
| "end": 932, |
| "text": "Anand et al. (2018)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Recent work has also investigated the inductive biases that different sequence models learn. For example, Tran et al. (2018) find that recurrent models are better at modeling hierarchical structure while Tang et al. (2018) find that feedforward architectures like the transformer and convolutional models are not better than RNNs at modeling long-distance agreement. Transformers however excel at word-sense disambiguation. We analyze whether the choice of architecture and the use of an attention mechanism affect the way in which dialog systems use information available to them.", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 124, |
| "text": "Tran et al. (2018)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 204, |
| "end": 222, |
| "text": "Tang et al. (2018)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Following the recent line of work on generative dialog systems, we treat the problem of generating an appropriate response given a conversation history as a conditional language modeling problem. Specifically we want to learn a conditional probability distribution P \u03b8 (y|x) where y is a reasonable response given the conversation history x. The conversation history is typically represented as a sequence of utterances", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "x 1 , x 2 , . . . x n , where each utterance x i itself is comprised of a sequence of words x i 1 , x i 2 . . . x i k .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The response y is a single utterance also comprised of a sequence of words y 1 , y 2 . . . y m . The overall conditional probability is factorized autoregressively as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "P \u03b8 (y|x) = n i=1 P \u03b8 (y i |y <i , x 1 . . . x n )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "P \u03b8 , in this work, is parameterized by a recurrent or transformer-based seq2seq model. The crux of this work is to study how the learned probability distribution behaves as we artificially perturb the conversation history x 1 , . . . x n . We measure behavior by looking at how much the per-token perplexity increases under these changes. For example, one could think of shuffling the order in which x 1 . . . x n is presented to the model and observe how much the perplexity of y under the model increases. If the increase is only minimal, we can conclude that the ordering of x 1 . . . x n isn't informative to the model. For a complete list of perturbations considered in this work, please refer to Section 3.2. All models are trained without any perturbations and sensitivity is studied only at test time.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Figure 1: The increase in perplexity for different models when only presented with the k most recent utterances from the dialog history for Dailydialog (left) and bAbI dialog (right) datasets. Recurrent models with attention fare better than transformers, since they use more of the conversation history.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We experiment with four multi-turn dialog datasets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "bAbI dialog is a synthetic goal-oriented multiturn dataset (Bordes and Weston, 2016) consisting of 5 different tasks for restaurant booking with increasing levels of complexity. We consider Task 5 in our experiments since it is the hardest and is a union of all four tasks. It contains 1k dialogs with an average of 13 user utterances per dialog.", |
| "cite_spans": [ |
| { |
| "start": 59, |
| "end": 84, |
| "text": "(Bordes and Weston, 2016)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Chat is an open domain dataset (Zhang et al., 2018) with multi-turn chit-chat conversations between turkers who are each assigned a \"persona\" at random. It comprises of 10.9k dialogs with an average of 14.8 turns per dialog.", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 51, |
| "text": "(Zhang et al., 2018)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Persona", |
| "sec_num": null |
| }, |
| { |
| "text": "Dailydialog is an open domain dataset (Li et al., 2017) which consists of dialogs that resemble dayto-day conversations across multiple topics. It comprises of 13k dialogs with an average of 7.9 turns per dialog.", |
| "cite_spans": [ |
| { |
| "start": 38, |
| "end": 55, |
| "text": "(Li et al., 2017)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Persona", |
| "sec_num": null |
| }, |
| { |
| "text": "MutualFriends is a multi-turn goal-oriented dataset (He et al., 2017) where two agents must discover which friend of theirs is mutual based on the friends' attributes. It contains 11k dialogs with an average of 11.41 utterances per dialog.", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 69, |
| "text": "(He et al., 2017)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Persona", |
| "sec_num": null |
| }, |
| { |
| "text": "We experimented with several types of perturbation operations at the utterance and word (token) levels. All perturbations are applied in isolation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Perturbations", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Utterance-level perturbations We consider the following operations 1) Shuf that shuffles the sequence of utterances in the dialog history, 2) Rev that reverses the order of utterances in the history (but maintains word order within each utterance) 3) Drop that completely drops certain utterances and 4) Truncate that truncates the dialog history to contain only the k most recent utterances where k \u2264 n, where n is the length of dialog history.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Perturbations", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Word-level perturbations We consider similar operations but at the word level within every utterance 1) word-shuffle that randomly shuffles the words within an utterance 2) reverse that reverses the ordering of words, 3) word-drop that drops 30% of the words uniformly 4) noun-drop that drops all nouns, 5) verb-drop that drops all verbs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Types of Perturbations", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We experimented with two different classes of models -recurrent and transformer-based sequence-to-sequence generative models. All data loading, model implementations and evaluations were done using the ParlAI framework. We used the default hyper-parameters for all the models as specified in ParlAI.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "We trained a seq2seq (seq2seq lstm) model where the encoder and decoder are parameterized as LSTMs (Hochreiter and Schmidhuber, 1997) . We also experiment with using decoders that use an attention mechanism (seq2seq lstm att) (Bahdanau et al., 2015) . The encoder and decoder LSTMs have 2 layers with 128 dimensional hidden states with a dropout rate of 0.1.", |
| "cite_spans": [ |
| { |
| "start": 99, |
| "end": 133, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 226, |
| "end": 249, |
| "text": "(Bahdanau et al., 2015)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recurrent Models", |
| "sec_num": null |
| }, |
| { |
| "text": "Transformer Our transformer (Vaswani et al., 2017) model uses 300 dimensional embeddings and hidden states, 2 layers and 2 attention heads with no dropout. This model is significantly smaller than the ones typically used in machine The model that exhibits the highest sensitivity (higher the better) to a particular perturbation on a dataset is in bold. seq2seq lstm att are the most sensitive models 24/40 times, while transformers are the least with 6/40 times.", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 50, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recurrent Models", |
| "sec_num": null |
| }, |
| { |
| "text": "translation since we found that the model that resembled Vaswani et al. (2017) significantly overfit on all our datasets. While the models considered in this work might not be state-of-the-art on the datasets considered, we believe these models are still competitive and used commonly enough at least as baselines, that the community will benefit by understanding their behavior. In this paper, we use early stopping with a patience of 10 on the validation set to save our best model. All models achieve close to the perplexity numbers reported for generative seq2seq models in their respective papers.", |
| "cite_spans": [ |
| { |
| "start": 57, |
| "end": 78, |
| "text": "Vaswani et al. (2017)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Recurrent Models", |
| "sec_num": null |
| }, |
| { |
| "text": "Our results are presented in Table 2 and Figure 1 . Table 2 reports the perplexities of different models on test set in the second column, followed by the increase in perplexity when the dialog history is perturbed using the method specified in the column header. Rows correspond to models trained on different datasets. Figure 1 presents the change in perplexity for models when presented only with the k most recent utterances from the dialog history.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 29, |
| "end": 36, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 41, |
| "end": 49, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 52, |
| "end": 59, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 321, |
| "end": 329, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results & Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We make the following observations:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results & Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "1. Models tend to show only tiny changes in perplexity in most cases, even under extreme changes to the dialog history, suggesting that they use far from all the information that is available to them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results & Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "2. Transformers are insensitive to wordreordering, indicating that they could be learning bag-of-words like representations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results & Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "3. The use of an attention mechanism in seq2seq lstm att and transformers makes these models use more information from earlier parts of the conversation than vanilla seq2seq models as seen from increases in perplexity when using only the last utterance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results & Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "4. While transformers converge faster and to lower test perplexities, they don't seem to capture the conversational dynamics across utterances in the dialog history and are less sensitive to perturbations that scramble this structure than recurrent models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results & Discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "This work studies the behaviour of generative neural dialog systems in the presence of synthetically introduced perturbations to the dialog history, that it conditions on. We find that both recurrent and transformer-based seq2seq models are not significantly affected even by drastic and unnatural modifications to the dialog history. We also find subtle differences between the way in which recurrent and transformer-based models use available context. By open-sourcing our code, we believe this paradigm of studying model behavior by introducing perturbations that destroys different kinds of structure present within the dialog history can be a useful diagnostic tool. We also foresee this paradigm being useful when building new dialog datasets to understand the kinds of information models use to solve them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to acknowledge NVIDIA for donating GPUs and a DGX-1 computer used in this work. We would also like to thank the anonymous reviewers for their constructive feedback. Our code is available at https://github.com/ chinnadhurai/ParlAI/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Fine-grained analysis of sentence embeddings using auxiliary prediction tasks", |
| "authors": [ |
| { |
| "first": "Yossi", |
| "middle": [], |
| "last": "Adi", |
| "suffix": "" |
| }, |
| { |
| "first": "Einat", |
| "middle": [], |
| "last": "Kermany", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ofer", |
| "middle": [], |
| "last": "Lavi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1608.04207" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yossi Adi, Einat Kermany, Yonatan Belinkov, Ofer Lavi, and Yoav Goldberg. 2016. Fine-grained anal- ysis of sentence embeddings using auxiliary predic- tion tasks. arXiv preprint arXiv:1608.04207.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Blindfold baselines for embodied qa", |
| "authors": [ |
| { |
| "first": "Ankesh", |
| "middle": [], |
| "last": "Anand", |
| "suffix": "" |
| }, |
| { |
| "first": "Eugene", |
| "middle": [], |
| "last": "Belilovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyle", |
| "middle": [], |
| "last": "Kastner", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugo", |
| "middle": [], |
| "last": "Larochelle", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1811.05013" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ankesh Anand, Eugene Belilovsky, Kyle Kastner, Hugo Larochelle, and Aaron Courville. 2018. Blindfold baselines for embodied qa. arXiv preprint arXiv:1811.05013.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Neural machine translation by jointly learning to align and translate", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings Of The International Conference on Representation Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Ben- gio. 2015. Neural machine translation by jointly learning to align and translate. In Proceedings Of The International Conference on Representation Learning (ICLR 2015).", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Synthetic and natural noise both break neural machine translation", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Bisk", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1711.02173" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Belinkov and Yonatan Bisk. 2017. Synthetic and natural noise both break neural machine transla- tion. arXiv preprint arXiv:1711.02173.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Learning end-to-end goal-oriented dialog", |
| "authors": [ |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Antoine Bordes and Jason Weston. 2016. Learn- ing end-to-end goal-oriented dialog. CoRR, abs/1605.07683.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "What you can cram into a single vector: Probing sentence embeddings for linguistic properties", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "Kruszewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1805.01070" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, German Kruszewski, Guillaume Lample, Lo\u00efc Barrault, and Marco Baroni. 2018. What you can cram into a single vector: Probing sentence embeddings for linguistic properties. arXiv preprint arXiv:1805.01070.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Annotation artifacts in natural language inference data", |
| "authors": [ |
| { |
| "first": "Swabha", |
| "middle": [], |
| "last": "Suchin Gururangan", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Swayamdipta", |
| "suffix": "" |
| }, |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1803.02324" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Suchin Gururangan, Swabha Swayamdipta, Omer Levy, Roy Schwartz, Samuel R Bowman, and Noah A Smith. 2018. Annotation artifacts in natural language inference data. arXiv preprint arXiv:1803.02324.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Learning Symmetric Collaborative Dialogue Agents with Dynamic Knowledge Graph Embeddings", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Balakrishnan", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Eric", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. He, A. Balakrishnan, M. Eric, and P. Liang. 2017. Learning Symmetric Collaborative Dialogue Agents with Dynamic Knowledge Graph Embed- dings. arXiv e-prints.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "How much reading does reading comprehension require? a critical investigation of popular benchmarks", |
| "authors": [ |
| { |
| "first": "Divyansh", |
| "middle": [], |
| "last": "Kaushik", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zachary C Lipton", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1808.04926" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Divyansh Kaushik and Zachary C Lipton. 2018. How much reading does reading comprehension require? a critical investigation of popular benchmarks. arXiv preprint arXiv:1808.04926.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Sharp nearby, fuzzy far away: How neural language models use context", |
| "authors": [ |
| { |
| "first": "Urvashi", |
| "middle": [], |
| "last": "Khandelwal", |
| "suffix": "" |
| }, |
| { |
| "first": "He", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Qi", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1805.04623" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Urvashi Khandelwal, He He, Peng Qi, and Dan Ju- rafsky. 2018. Sharp nearby, fuzzy far away: How neural language models use context. arXiv preprint arXiv:1805.04623.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "A Diversity-Promoting Objective Function for Neural Conversation Models", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. Li, M. Galley, C. Brockett, J. Gao, and B. Dolan. 2015. A Diversity-Promoting Objective Function for Neural Conversation Models. ArXiv e-prints.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Dailydialog: A manually labelled multi-turn dialogue dataset", |
| "authors": [ |
| { |
| "first": "Yanran", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyu", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Wenjie", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ziqiang", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuzi", |
| "middle": [], |
| "last": "Niu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1710.03957" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yanran Li, Hui Su, Xiaoyu Shen, Wenjie Li, Ziqiang Cao, and Shuzi Niu. 2017. Dailydialog: A manually labelled multi-turn dialogue dataset. arXiv preprint arXiv:1710.03957.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Improving language understanding by generative pre-training", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Karthik", |
| "middle": [], |
| "last": "Narasimhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Salimans", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. 2018. Improving language under- standing by generative pre-training. URL https://s3- us-west-2. amazonaws. com/openai-assets/research- covers/languageunsupervised/language under- standing paper. pdf.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Get to the point: Summarization with pointer-generator networks", |
| "authors": [ |
| { |
| "first": "Abigail", |
| "middle": [], |
| "last": "See", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1704.04368" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abigail See, Peter J Liu, and Christopher D Man- ning. 2017. Get to the point: Summarization with pointer-generator networks. arXiv preprint arXiv:1704.04368.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "A hierarchical latent variable encoder-decoder model for generating dialogues", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [ |
| "V" |
| ], |
| "last": "Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Sordoni", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Lowe", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Charlin", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Thirty-First AAAI Conference (AAAI)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "I. V. Serban, A. Sordoni, R. Lowe, L. Charlin, J. Pineau, A. Courville, and Y. Bengio. 2017a. A hierarchical latent variable encoder-decoder model for generating dialogues. In Thirty-First AAAI Con- ference (AAAI).", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Building end-to-end dialogue systems using generative hierarchical neural network models", |
| "authors": [ |
| { |
| "first": "Iulian", |
| "middle": [], |
| "last": "Vlad Serban", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Sordoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [ |
| "C" |
| ], |
| "last": "Courville", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of AAAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iulian Vlad Serban, Alessandro Sordoni, Yoshua Ben- gio, Aaron C. Courville, and Joelle Pineau. 2016. Building end-to-end dialogue systems using gener- ative hierarchical neural network models. In Pro- ceedings of AAAI.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "A neural network approach to context-sensitive generation of conversational responses", |
| "authors": [ |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Sordoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Brockett", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangfeng", |
| "middle": [], |
| "last": "Ji", |
| "suffix": "" |
| }, |
| { |
| "first": "Margaret", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian-Yun", |
| "middle": [], |
| "last": "Nie", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Bill", |
| "middle": [], |
| "last": "Dolan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1506.06714" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alessandro Sordoni, Michel Galley, Michael Auli, Chris Brockett, Yangfeng Ji, Margaret Mitchell, Jian-Yun Nie, Jianfeng Gao, and Bill Dolan. 2015. A neural network approach to context-sensitive gen- eration of conversational responses. arXiv preprint arXiv:1506.06714.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Learning general purpose distributed sentence representations via large scale multi-task learning", |
| "authors": [ |
| { |
| "first": "Sandeep", |
| "middle": [], |
| "last": "Subramanian", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Trischler", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "J" |
| ], |
| "last": "Pal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1804.00079" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sandeep Subramanian, Adam Trischler, Yoshua Ben- gio, and Christopher J Pal. 2018. Learning gen- eral purpose distributed sentence representations via large scale multi-task learning. arXiv preprint arXiv:1804.00079.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural net- works. In Advances in neural information process- ing systems, pages 3104-3112.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Why self-attention? a targeted evaluation of neural machine translation architectures", |
| "authors": [ |
| { |
| "first": "Gongbo", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| }, |
| { |
| "first": "Mathias", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "Annette", |
| "middle": [], |
| "last": "Rios", |
| "suffix": "" |
| }, |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1808.08946" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gongbo Tang, Mathias M\u00fcller, Annette Rios, and Rico Sennrich. 2018. Why self-attention? a targeted eval- uation of neural machine translation architectures. arXiv preprint arXiv:1808.08946.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "The importance of being recurrent for modeling hierarchical structure", |
| "authors": [ |
| { |
| "first": "Ke", |
| "middle": [], |
| "last": "Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "Arianna", |
| "middle": [], |
| "last": "Bisazza", |
| "suffix": "" |
| }, |
| { |
| "first": "Christof", |
| "middle": [], |
| "last": "Monz", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1803.03585" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ke Tran, Arianna Bisazza, and Christof Monz. 2018. The importance of being recurrent for modeling hierarchical structure. arXiv preprint arXiv:1803.03585.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "A neural conversational model", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1506.05869" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals and Quoc Le. 2015. A neural conversa- tional model. arXiv preprint arXiv:1506.05869.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Google's neural machine translation system: Bridging the gap between human and machine translation", |
| "authors": [ |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Wolfgang", |
| "middle": [], |
| "last": "Norouzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Maxim", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Krikun", |
| "suffix": "" |
| }, |
| { |
| "first": "Qin", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Klaus", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Macherey", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.08144" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. 2016. Google's neural ma- chine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Personalizing dialogue agents: I have a dog", |
| "authors": [ |
| { |
| "first": "Saizheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Dinan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jack", |
| "middle": [], |
| "last": "Urbanek", |
| "suffix": "" |
| }, |
| { |
| "first": "Arthur", |
| "middle": [], |
| "last": "Szlam", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1801.07243" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Saizheng Zhang, Emily Dinan, Jack Urbanek, Arthur Szlam, Douwe Kiela, and Jason Weston. 2018. Per- sonalizing dialogue agents: I have a dog, do you have pets too? arXiv preprint arXiv:1801.07243.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "text": "Could you show me where the Chinesc-style clothing is located ? I want to buy a silk coat the located Chinesc-style where is show a . buy you ? I clothing want coat silk me Could to 3 This way , please . Here they are . They're all handmade . are handmade . way please This all Here they . , They're . 4 Model Response: How much is it ?Model Response: How much is it ?", |
| "num": null, |
| "content": "<table><tr><td>No Perturbations</td><td>Token shuffling</td></tr><tr><td>1 Good afternoon ! Can I help you ?</td><td>I afternoon help you Good ? ! Can</td></tr><tr><td>2</td><td/></tr></table>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "text": "An example of an LSTM seq2seq model with attention's insensitivity to shuffling of words in the dialog history on the DailyDialog dataset.", |
| "num": null, |
| "content": "<table><tr><td>ilar analyses have been carried out to understand</td></tr><tr><td>the behavior of neural models in other settings.</td></tr><tr><td>An investigation into how LSTM based uncondi-</td></tr><tr><td>tional language models use available context was</td></tr><tr><td>carried out by Khandelwal et al. (2018). They</td></tr><tr><td>empirically demonstrate that models are sensitive</td></tr><tr><td>to perturbations only in the nearby context and</td></tr><tr><td>typically use only about 150 words of context.</td></tr><tr><td>On the other hand, in conditional language mod-</td></tr><tr><td>eling tasks like machine translation, models are</td></tr><tr><td>adversely affected by both synthetic and natural</td></tr><tr><td>noise introduced anywhere in the input</td></tr></table>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "text": "[0.41] 3.35 [0.38] 4.04 [0.28] 0.13 [0.04] 5.08 [0.79] 1.58 [0.15] 0.87 [0.08] 1.06 [0.28] 3.37 [0.33] 3.10 [0.45] seq2seq lstm att 29.65 [1.10] 4.76 [0.39] 2.54 [0.24] 3.31 [0.49] 0.32 [0.03] 4.84 [0.42] 2.03 [0.25] 1.37 [0.29] 2.22 [0.22] 2.82 [0.31] 3.29 [0.25] transformer 28.73 [1.30] 3.28 [1.37] 0.82 [0.40] 1.25 [0.62] 0.27 [0.19] 2.43 [0.83] 1.20 [0.69] 0.63 [0.17] 2.60 [0.98] 0.15 [0.08] 0.26 [0.18] Persona Chat seq2seq lstm 43.24 [0.99] 3.27 [0.13] 6.29 [0.48] 13.11 [1.22] 0.47 [0.21] 6.10 [0.46] 1.81 [0.25] 0.68 [0.19] 0.75 [0.15] 1.29 [0.17] 1.95 [0.20] seq2seq lstm att 42.90 [1.76] 4.44 [0.81] 6.70 [0.67] 11.61 [0.75] 2.99 [2.24] 5.58 [0.45] 2.47 [0.67] 1.11 [0.27] 1.20 [0.23] 2.03 [0.46] 2.39 [0.31]", |
| "num": null, |
| "content": "<table><tr><td>Models</td><td>Test PPL</td><td>Only</td><td>Shuf</td><td>Rev</td><td>Drop</td><td>Drop</td><td>Word</td><td>Verb</td><td>Noun</td><td>Word</td><td>Word</td></tr><tr><td/><td/><td>Last</td><td/><td/><td>First</td><td>Last</td><td>Drop</td><td>Drop</td><td>Drop</td><td>Shuf</td><td>Rev</td></tr><tr><td/><td/><td/><td colspan=\"4\">Utterance level perturbations ( \u2206 P P L [\u03c3] )</td><td/><td colspan=\"4\">Word level perturbations ( \u2206 P P L [\u03c3] )</td></tr><tr><td/><td/><td/><td/><td/><td>DailyDialog</td><td/><td/><td/><td/><td/><td/></tr><tr><td colspan=\"12\">seq2seq lstm 1.70 transformer 32.90 [1.40] 40.78 [0.31] 1.90 [0.08] 1.22 [0.22] 1.41 [0.54] \u22120.1 [0.07] 1.59 [0.39] 0.54 [0.08] 0.40 [0.00] 0.32 [0.18] 0.01 [0.01] 0.00 [0.06]</td></tr><tr><td/><td/><td/><td/><td/><td>MutualFriends</td><td/><td/><td/><td/><td/><td/></tr><tr><td>seq2seq lstm</td><td>14.17 [0.29]</td><td colspan=\"10\">1.44 [0.86] 1.42 [0.25] 1.24 [0.34] 0.00 [0.00] 0.76 [0.10] 0.28 [0.11] 0.00 [0.03] 0.61 [0.39] 0.31 [0.25] 0.56 [0.39]</td></tr><tr><td>seq2seq lstm att</td><td>10.60 [0.21]</td><td colspan=\"10\">32.13 [4.08] 1.24 [0.19] 1.06 [0.24] 0.08 [0.03] 1.35 [0.15] 1.56 [0.20] 0.15 [0.07] 3.28 [0.38] 2.35 [0.22] 4.59 [0.46]</td></tr><tr><td>transformer</td><td>10.63 [0.03]</td><td colspan=\"10\">20.11 [0.67] 1.06 [0.16] 1.62 [0.44] 0.12 [0.03] 0.81 [0.09] 0.75 [0.05] 0.16 [0.02] 1.50 [0.12] 0.07 [0.01] 0.13 [0.04]</td></tr><tr><td/><td/><td/><td/><td/><td colspan=\"2\">bAbi dailog: Task5</td><td/><td/><td/><td/><td/></tr><tr><td>seq2seq lstm</td><td>1.28 [0.02]</td><td colspan=\"10\">1.31 [0.50] 43.61 [15.9] 40.99 [9.38] 0.00 [0.00] 4.28 [1.90] 0.38 [0.11] 0.01 [0.00] 0.10 [0.06] 0.09 [0.02] 0.42 [0.38]</td></tr><tr><td>seq2seq lstm att</td><td>1.06 [0.02]</td><td colspan=\"10\">9.14 [1.28] 41.21 [8.03] 34.32 [10.7] 0.00 [0.00] 6.75 [1.86] 0.64 [0.07] 0.03 [0.03] 0.22 [0.04] 0.25 [0.01] 1.10 [0.80]</td></tr><tr><td>transformer</td><td>1.07 [0.00]</td><td colspan=\"10\">4.06 [0.33] 0.38 [0.02] 0.62 [0.02] 0.00 [0.00] 0.21 [0.02] 0.36 [0.02] 0.25 [0.06] 0.37 [0.06] 0.00 [0.00] 0.00 [0.00]</td></tr></table>", |
| "html": null, |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "text": "Model performance across multiple datasets and sensitivity to different perturbations. Columns 1 & 2 report the test set perplexity (without perturbations) of different models. Columns 3-12 report the increase in perplexity when models are subjected to different perturbations. The mean (\u00b5) and standard deviation [\u03c3] across 5 runs are reported. The Only Last column presents models with only the last utterance from the dialog history.", |
| "num": null, |
| "content": "<table/>", |
| "html": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |