| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:29:17.876680Z" |
| }, |
| "title": "Continual Adaptation for Efficient Machine Communication", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [ |
| "D" |
| ], |
| "last": "Hawkins", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Minae", |
| "middle": [], |
| "last": "Kwon", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "mnkwon@stanford.edu" |
| }, |
| { |
| "first": "Dorsa", |
| "middle": [], |
| "last": "Sadigh", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "D" |
| ], |
| "last": "Goodman", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "ngoodman@stanford.edu" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "To communicate with new partners in new contexts, humans rapidly form new linguistic conventions. Recent neural language models are able to comprehend and produce the existing conventions present in their training data, but are not able to flexibly and interactively adapt those conventions on the fly as humans do. We introduce an interactive repeated reference task as a benchmark for models of adaptation in communication and propose a regularized continual learning framework that allows an artificial agent initialized with a generic language model to more accurately and efficiently communicate with a partner over time. We evaluate this framework through simulations on COCO and in real-time reference game experiments with human partners.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "To communicate with new partners in new contexts, humans rapidly form new linguistic conventions. Recent neural language models are able to comprehend and produce the existing conventions present in their training data, but are not able to flexibly and interactively adapt those conventions on the fly as humans do. We introduce an interactive repeated reference task as a benchmark for models of adaptation in communication and propose a regularized continual learning framework that allows an artificial agent initialized with a generic language model to more accurately and efficiently communicate with a partner over time. We evaluate this framework through simulations on COCO and in real-time reference game experiments with human partners.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Communication depends on shared conventions about the meanings of words (Lewis, 1969) , but the real-world demands of language use often require agents to go beyond fixed conventional meanings (Grice, 1975; Davidson, 1986) . Recent work on pragmatic and context-aware models has approached this problem by equipping speaker and listener agents with the ability to explicitly reason about one another. Pragmatic reasoning allows listeners to infer richer intended meanings by considering counterfactual alternatives, and allows speakers to be appropriately informative, not merely truthful (Goodman and Frank, 2016; Andreas and Klein, 2016; Fried et al., 2018; Monroe et al., 2017; Vedantam et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 72, |
| "end": 85, |
| "text": "(Lewis, 1969)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 193, |
| "end": 206, |
| "text": "(Grice, 1975;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 207, |
| "end": 222, |
| "text": "Davidson, 1986)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 589, |
| "end": 614, |
| "text": "(Goodman and Frank, 2016;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 615, |
| "end": 639, |
| "text": "Andreas and Klein, 2016;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 640, |
| "end": 659, |
| "text": "Fried et al., 2018;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 660, |
| "end": 680, |
| "text": "Monroe et al., 2017;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 681, |
| "end": 703, |
| "text": "Vedantam et al., 2017)", |
| "ref_id": "BIBREF61" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "These models have largely focused on one-shot settings, where the context is the immediate visual environment. In common interactive settings, however, the relevant context for pragmatic competence also includes the history of previous interactions with the same communication partner. Human in-a cat lying in the sunlight on a bed two cats are playing on a log trial 1 trial 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "... We introduce a regularized continual learning approach allowing agents initialized with a pretrained language model \u0398 to iteratively infer the language model \u03b8 i used by a partner, over repeated interactions {t 1 , t 2 . . . } in an online reference game.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "terlocutors are able to establish ad hoc conventions based on this history (Clark and Wilkes-Gibbs, 1986; Clark, 1996) , allowing for increasingly accurate and efficient communication. Speakers can remain understandable while expending significantly fewer words (Krauss and Weinheimer, 1964; Orita et al., 2015; Stali\u016bnait\u0117 et al., 2018; Hawkins et al., 2020a; Stewart et al., 2020) . For example, consider a nurse visiting a bedridden patient at their home. The first time the patient asks the nurse to retrieve a particular medication, they must painstakingly identify a specific bottle, e.g. \"the medicine for my back pain in a small blue medicine bottle labeled Flexeril in my bathroom.\" But after a week of care, they may just ask for the \"back meds\" and expect the nurse to know which bottle they mean. Such flexibility poses a challenge for current pragmatic models. For an artificial agent to establish new conventions, as humans do, it must go beyond pragmatic reasoning at the single-utterance timescale to learn about its partners over longer timescales.", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 105, |
| "text": "Wilkes-Gibbs, 1986;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 106, |
| "end": 118, |
| "text": "Clark, 1996)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 262, |
| "end": 291, |
| "text": "(Krauss and Weinheimer, 1964;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 292, |
| "end": 311, |
| "text": "Orita et al., 2015;", |
| "ref_id": "BIBREF51" |
| }, |
| { |
| "start": 312, |
| "end": 337, |
| "text": "Stali\u016bnait\u0117 et al., 2018;", |
| "ref_id": "BIBREF58" |
| }, |
| { |
| "start": 338, |
| "end": 360, |
| "text": "Hawkins et al., 2020a;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 361, |
| "end": 382, |
| "text": "Stewart et al., 2020)", |
| "ref_id": "BIBREF59" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Here, we propose that the problem of ad hoc convention formation can be usefully re-formulated as an inference problem amenable to online domain adaptation. Our approach is motivated by a growing body of evidence in cognitive science that humans quickly re-calibrate their expectations about how language is used by different partners (Grodner and Sedivy, 2011; Yildirim et al., 2016) . This empirical work highlights three key challenges facing a scalable adaptation approach. First, because the target data comes from intentional agents, pragmatic reasoning must be deployed throughout adaptation to strengthen inferences (Frank et al., 2009) . Second, because the data is sparse, strong adaptation risks catastrophic forgetting; yet, human speakers are able to revert to their background expectations for the next interlocutor (Wilkes-Gibbs and Clark, 1992; Metzing and Brennan, 2003) . Third, the ability to ground the meanings of later, shorter utterances (e.g. \"back meds\") in the use of earlier, longer utterances requires a compositional representation; otherwise the connection between the utterances is not clear (Hawkins et al., 2020a) .", |
| "cite_spans": [ |
| { |
| "start": 335, |
| "end": 361, |
| "text": "(Grodner and Sedivy, 2011;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 362, |
| "end": 384, |
| "text": "Yildirim et al., 2016)", |
| "ref_id": "BIBREF66" |
| }, |
| { |
| "start": 624, |
| "end": 644, |
| "text": "(Frank et al., 2009)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 830, |
| "end": 860, |
| "text": "(Wilkes-Gibbs and Clark, 1992;", |
| "ref_id": "BIBREF65" |
| }, |
| { |
| "start": 861, |
| "end": 887, |
| "text": "Metzing and Brennan, 2003)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 1123, |
| "end": 1146, |
| "text": "(Hawkins et al., 2020a)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our primary contribution is an online continual learning framework for transforming pragmatic agents into adaptive agents that can be deployed in real-time interactions. This framework is shown schematically in Fig. 1 : after each trial, we take a small number of gradient steps to update beliefs about the language model used by the current partner. To evaluate our framework, we first introduce a benchmark interactive repeated reference task ( Fig. 2 ) using contexts of natural images. In Sec. 3, we introduce the three core components of our algorithm: (i) a contrastive loss objective incorporating explicit pragmatic reasoning, (ii) a KL regularization objective to prevent overfitting or catastrophic forgetting, and (iii) a data augmentation step for compositionally assigning credit to sub-utterances. In Sec. 4, we report experiments demonstrating that this algorithm enables more effective communication with naive human partners over repeated interactions. Finally, in Sec. 5 we report a series of ablation studies showing that each component plays a necessary role, and close with a discussion of important areas for future research in Sec. 6", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 211, |
| "end": 217, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 447, |
| "end": 453, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Personalizing language models. Adapting or personalizing language models is a classic prob-lem of practical interest for NLP, where shifts in the data distribution are often found across test contexts (Kneser and Steinbiss, 1993; Riccardi and Gorin, 2000; Bellegarda, 2004; Ben-David et al., 2010) . Our approach draws upon the idea of dynamically fine-tuning RNNs (Mikolov et al., 2010; Krause et al., 2017) , which has successfully explained key patterns of human behavior in selfpaced reading tasks (Van Schijndel and Linzen, 2018) . We also draw on the regularization objectives proposed in this literatures (Li and Bilmes, 2007; Liu et al., 2016) . However, the interactive communicative setting we consider poses several distinct challenges from traditional speech recognition (Miao and Metze, 2015) or text classification settings (Blitzer et al., 2007; Glorot et al., 2011) for which adaptation is typically considered. Partnerspecific observations of language use are sparser, must be incorporated online, and are generated by intentional agents.", |
| "cite_spans": [ |
| { |
| "start": 201, |
| "end": 229, |
| "text": "(Kneser and Steinbiss, 1993;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 230, |
| "end": 255, |
| "text": "Riccardi and Gorin, 2000;", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 256, |
| "end": 273, |
| "text": "Bellegarda, 2004;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 274, |
| "end": 297, |
| "text": "Ben-David et al., 2010)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 365, |
| "end": 387, |
| "text": "(Mikolov et al., 2010;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 388, |
| "end": 408, |
| "text": "Krause et al., 2017)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 502, |
| "end": 534, |
| "text": "(Van Schijndel and Linzen, 2018)", |
| "ref_id": "BIBREF60" |
| }, |
| { |
| "start": 612, |
| "end": 633, |
| "text": "(Li and Bilmes, 2007;", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 634, |
| "end": 651, |
| "text": "Liu et al., 2016)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 783, |
| "end": 805, |
| "text": "(Miao and Metze, 2015)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 838, |
| "end": 860, |
| "text": "(Blitzer et al., 2007;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 861, |
| "end": 881, |
| "text": "Glorot et al., 2011)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Incorporating discourse history. Previous work has incorporated discourse history in reference games using explicit co-reference detection (Roy et al., 2019) or contribution tracking (DeVault and Stone, 2009) techniques. An alternative approach is to include embeddings of the history as conditional input to the model at test time (Haber et al., 2019) . Similar approaches have been proposed for sequential visual question answering (Ohsugi et al., 2019; Choi et al., 2018) . Rather than pre-training a fixed, monolithic language model and incorporating shared history on top of this model at test time, we suggest that the underlying language model itself ought to be continually adapted over the course of an interaction.", |
| "cite_spans": [ |
| { |
| "start": 139, |
| "end": 157, |
| "text": "(Roy et al., 2019)", |
| "ref_id": "BIBREF55" |
| }, |
| { |
| "start": 183, |
| "end": 208, |
| "text": "(DeVault and Stone, 2009)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 332, |
| "end": 352, |
| "text": "(Haber et al., 2019)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 434, |
| "end": 455, |
| "text": "(Ohsugi et al., 2019;", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 456, |
| "end": 474, |
| "text": "Choi et al., 2018)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Bayesian models of adaptation. Models of adaptation in cognitive science are typically formulated in terms of (hierarchical) Bayesian beliefupdating based on evidence of language use (Kleinschmidt and Jaeger, 2015; Roettger and Franke, 2019; Delaney-Busch et al., 2019; Schuster and Degen, 2020) . In these models, each new observation is taken as statistical evidence about the partner's language model, allowing pairs to coordinate on shared expectations and ground new conventions in their partner's previous behavior (see Sec. 3.2). While these models capture key theoretical properties of human adaptation, they do not scale well to natural-language applications, where neural networks are dominant. ", |
| "cite_spans": [ |
| { |
| "start": 242, |
| "end": 269, |
| "text": "Delaney-Busch et al., 2019;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 270, |
| "end": 295, |
| "text": "Schuster and Degen, 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We begin by recasting convention formation as an online domain adaptation problem. As in previous computational approaches to pragmatics (e.g. Goodman and Frank, 2016; Andreas and Klein, 2016), we formulate this problem as an inference about another agent. The key theoretical idea is to expand the scope of pragmatic inference from the single-utterance timescale to evidence accumulated over longer timescales of an interaction. In addition to inferring a partner's intended meaning (or interpretation) for each individual utterance, an adaptive agent pools across previous utterances to infer the distinct but stable way their partner uses language. Under this inference framework, an agent must both (1) begin with background expectations about language shared across many partners, and (2) have a mechanism to rapidly learn the specific language model used by the current partner. Our work assumes a conventional neural language model as the starting point and focuses on the partner-specific inference problem. In this section, we describe our repeated reference game benchmark task (3.1), review the underlying problem as it has been previously formulated in a Bayesian framework (3.2), and finally describe our algorithm for adapting neural language models (3.3).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "As a benchmark for studying domain adaptation in communication, we use the repeated reference game task (Fig. 2) , which has been widely used in cognitive science to study partner-specific adaptation in communication (Krauss and Weinheimer, 1964; Clark and Wilkes-Gibbs, 1986; Wilkes-Gibbs and Clark, 1992) . In this task, a speaker agent and a listener agent are shown a context of images, C (e.g. four images of cats). On each trial, one of these images is privately designated as the target object, o * , for the speaker (e.g. the image with the thick border shown on the left). The speaker agent thus takes the pair (o * , C) as input and returns an utterance u (e.g. \"black cat with a fluffy cat\") that will allow the listener to select the target from C. The listener agent takes (u, C) as input and returns a softmax probability for each image, which it uses to make a selection. Both agents then receive feedback about the listener's selection and the identity of the target. Critically, the sequence of trials is constructed so that each image appears as the target several times. For example, our evaluations loop through each target six times, allowing us to observe how communication about each image changes as a function of dialogue history (see Fig. S1 in Supplementary Materials for examples).", |
| "cite_spans": [ |
| { |
| "start": 217, |
| "end": 246, |
| "text": "(Krauss and Weinheimer, 1964;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 247, |
| "end": 276, |
| "text": "Clark and Wilkes-Gibbs, 1986;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 277, |
| "end": 306, |
| "text": "Wilkes-Gibbs and Clark, 1992)", |
| "ref_id": "BIBREF65" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 104, |
| "end": 112, |
| "text": "(Fig. 2)", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 1260, |
| "end": 1267, |
| "text": "Fig. S1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Repeated reference game task", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We begin by assuming that agents represent the semantics of their language as a function relating natural language utterances u to actual states of the world o (here, images). We further assume that this function belongs to a family parameterized by \u03b8, and denote the parameter used by a particular agent i with \u03b8 i (see Fig. 1 ). If an artificial agent knows the true value of \u03b8 i -their current partner's semantics 1 -they are in a better position to understand them, and to be understood in turn. However, because \u03b8 i is not directly observable and \u03b8 varies across partners and contexts, it must be inferred (Bergen et al., 2016) . Furthermore, it is in the agent's best interest to use its updated beliefs about its partner's \u03b8 to guide its own production and interpretation. An important consequence of this formulation is that conventionalization, the process by which parties converge on an efficient way to refer to something, emerges naturally as a consequence of mutual adaptation, the process by which each party independently tries to infer their interlocutor's language model (Smith et al., 2013; Hawkins et al., 2020b) . This is the central computational problem of adaptation, which we formalize as follows. Following Bayes Rule, the adaptive agent's beliefs about \u03b8 i , conditioning on observations D i from the shared history of interactions in that context, are:", |
| "cite_spans": [ |
| { |
| "start": 611, |
| "end": 632, |
| "text": "(Bergen et al., 2016)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 1089, |
| "end": 1109, |
| "text": "(Smith et al., 2013;", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 1110, |
| "end": 1132, |
| "text": "Hawkins et al., 2020b)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 321, |
| "end": 327, |
| "text": "Fig. 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "The inference problem", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (\u03b8 i |D i , \u0398) \u221d P (D i |\u03b8 i )P (\u03b8 i |\u0398)", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "The inference problem", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "This formulation decomposes the inference into two terms, a prior term P (\u03b8 i |\u0398) and a likelihood term", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The inference problem", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "P (D i |\u03b8 i ). 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The inference problem", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The prior captures the idea that different partners share some general features of the semantics, represented by \u0398, since they speak the same language; in the absence of partner-specific information, the agent ought to be regularized toward this background knowledge. The likelihood term, on the other hand, accounts for direct evidence of language use. It represents an explicit forward model of an agent: different latent values of \u03b8 generate different observable actions. In other words, the standard single-utterance pragmatic inference problem is nested within the longer-timescale inference about \u03b8. While explicit reasoning about the other agent is typically considered at the time of action selection (i.e. when the speaker is choosing an utterance, or when the listener is choosing a referent; Goodman and Frank, 2016; Andreas and Klein, 2016), this likelihood term importantly incorporates such reasoning at the time of adaptation (i.e. when updating beliefs about \u03b8 based on previous actions; Frank et al., 2009; Smith et al., 2013) .", |
| "cite_spans": [ |
| { |
| "start": 1004, |
| "end": 1023, |
| "text": "Frank et al., 2009;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 1024, |
| "end": 1043, |
| "text": "Smith et al., 2013)", |
| "ref_id": "BIBREF57" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The inference problem", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "If we let \u03b8 be the weights of an image-captioning network, then the background knowledge shared across partners, \u0398, corresponds to a pre-trained initialization, and conditioning on partner-specific data under a Bayesian prior corresponds to regularized gradient descent on \u03b8. We exploit this connection to derive an online continual learning scheme that addresses the challenges of adapting to a human partner in a repeated reference game task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Architecture and algorithm overview. Concretely, we consider an architecture that combines a convolutional visual encoder (ResNet-152) with an LSTM decoder (Vinyals et al., 2015) . The LSTM takes a 300-dimensional embedding as input for each word in an utterance and its output is linearly projected back to a softmax distribution over the vocabulary size. To pass the visual feature vector computed by the encoder into the decoder, the final layer of ResNet was replaced by a fully-connected adapter layer. This layer was jointly pre-trained with the decoder on the COCO training corpus (Lin et al., 2014) and frozen. The COCO corpus con-Algorithm 1 Update step for adaptive model 1: Input: \u03b8 t : weights at time t 2: Output: \u03b8 t+1 : updated weights 3: Data: (u t , o t ): observed utterance and object 4: for step do 5:", |
| "cite_spans": [ |
| { |
| "start": 156, |
| "end": 178, |
| "text": "(Vinyals et al., 2015)", |
| "ref_id": "BIBREF62" |
| }, |
| { |
| "start": 588, |
| "end": 606, |
| "text": "(Lin et al., 2014)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "sample augmented batch u \u223c P(u t ) 6: let f \u03b8t = log P \u03b8t (u|o t ) + log P \u03b8t (o t |u) \u2212 reg(o 1:t\u22121 , u 1:t\u22121 ) 7:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "update \u03b8 t \u2190 \u03b8 t + \u03b2\u2207f \u03b8t 8: end for tains images of common objects, each annotated with multiple human captions. The CNN-LSTM architecture allows an agent to select utterances, by using beam search over captions given a target image as input, and also to select objects from the context, by evaluating the likelihood of the caption for each image in context and taking the most likely one.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Critically, we assume the agent will select actions on each trial using the value of \u03b8 it believes its partner to be using, so updating its own model is equivalent to updating expectations about its partner's model. Using the pre-trained model as our initialization, we can fine-tune the decoder weights (i.e. word embeddings, LSTM, and linear output layer) within a particular communicative interaction. Our algorithm is specified in Algorithm 1. Upon observing the utterance-object pair produced on each trial of the repeated reference game (Line 3), we take a small number of gradient steps updating the model weights to reflect the usage observed so far (Lines 4-7). Our adaptation objective function (Line 6) is built from combining a standard cross-entropy term with a KL-based regularization term to prevent catastrophic forgetting and a contrastive term to incorporate pragmatic reasoning about the visual context. In the following sections, we explain these terms and also introduce a final component of our approach: compositional data augmentation. Utterance likelihood. For our benchmark repeated reference game, the data obtained on trial t is a paired observation of an utterance u and an intended object of reference o. The simplest learning objective for \u03b8 is the standard cross-entropy loss: the likelihood of this utterance being produced to convey the intended target in isolation: P \u03b8 (u|o). This likelihood can be computed directly from the neural captioning model, where the probability of each word in u = {w 0 , . . . , w } is given by the softmax decoder output conditioned on the sentence so far, P \u03b8t (w i |o, w \u2212i ), so:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "P \u03b8t (u|o) \u221d i< P \u03b8t (w i |o, w \u2212i ) (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Contrastive likelihood.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "The same objectutterance pairs can be viewed as being generated by a listener agent selecting o relative to the other distractors in the immediate context C of other objects. This reasoning requires inverting the captioning model to evaluate how well the utterance u describes each object in C, and then normalizing:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P \u03b8t (o|u, C, \u03b8 t ) \u221d P \u03b8t (u|o)P (o)", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "This inversion is based on models of one-shot pragmatic inference in reference games (Goodman and Frank, 2016; Andreas and Klein, 2016; Vedantam et al., 2017; Cohn-Gordon et al., 2018) . While optimizing the utterance likelihood serves to make the observed utterance more likely for the target in isolation, optimizing the contrastive likelihood allows the agent to make a stronger inference that it does not apply to the distractors. KL Regularization. Fine-tuning repeatedly on a small number of data points presents a clear risk of catastrophic forgetting (Robins, 1995) , losing our ability to produce or understand utterances for other images. While limiting the number of gradient steps keeps the adapted model somewhat close to the prior, we will show that this is not sufficient (see Sec. 5.1). Because small differences in weights can lead to large differences in behavior for neural models, we also consider a regularization that tethers the behavior of the adapted model close to the behavior at initialization. Specifically, we consider a KL regularization term that explicitly minimizes the divergence between the captioning model's output probabilities before and after finetuning for unseen images (Yu et al., 2013; Galashov et al., 2018) . It is not tractable to take the KL divergence over the (nearly infinite) space of all possible natural-language utterances. Hence, we approximate the divergence incrementally by expanding from the maximum a posteriori (MAP) word denoted w * at each step according to the initial model P \u0398 (see Appendix A):", |
| "cite_spans": [ |
| { |
| "start": 85, |
| "end": 110, |
| "text": "(Goodman and Frank, 2016;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 111, |
| "end": 135, |
| "text": "Andreas and Klein, 2016;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 136, |
| "end": 158, |
| "text": "Vedantam et al., 2017;", |
| "ref_id": "BIBREF61" |
| }, |
| { |
| "start": 159, |
| "end": 184, |
| "text": "Cohn-Gordon et al., 2018)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 559, |
| "end": 573, |
| "text": "(Robins, 1995)", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 1213, |
| "end": 1230, |
| "text": "(Yu et al., 2013;", |
| "ref_id": "BIBREF68" |
| }, |
| { |
| "start": 1231, |
| "end": 1253, |
| "text": "Galashov et al., 2018)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "i< D KL P \u0398 (w i |o, w * \u2212i ) P \u03b8t (w i |o, w * \u2212i ) (4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "where is the length of the MAP caption. This loss is then averaged across random images sampled from the full domain O, not just those in context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Compositional data augmentation. Agents should be able to infer previous successes on a longer utterance (e.g. \"two men are sitting on a bench\"), that the component parts of this utterance (e.g. \"two men\", \"a bench\") are also likely to convey the intended meaning. In the absence of a (weakly) compositional representation, a speaker has no way of doing credit assignment: observing that a listener successfully chose the target upon hearing a long utterance only provides further evidence for the full utterance. Fine-tuning an LSTM architecture will increase the likelihood of sub-strings to some extent after a successful selection, but this is insufficient for two reasons. First, not all sub-strings are syntactically well-formed referring expressions (e.g. \"two men are\"), and the LSTM lacks a syntactic representation to represent such coherence. Second, the likelihood of the full utterance will always be increased by more than any sub-utterance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "To address these problems, we explored a data augmentation step that introduces a stronger compositionality bias via referential entailments (Young et al., 2014) . After each trial, we augmented the speaker's utterance u with a small denotation graph D(u) containing the set of all noun phrases found in the syntactic dependency parse of u, and optimize our objective function on batches of these entailments. By independently updating expectations about well-formed entailments alongside the longer utterances that were actually produced, we hypothesized that our model could more naturally ground shorter, conventionalized labels in the shared history of successful understanding. Local rehearsal. A second form of augmentation we explore is local rehearsal: at each step we include data from the history of interaction D = {(u, o)} 1:t up to the current time t, to prevent overfitting to the most recent observation. In practice we subsample batches from the interaction history in a separate loss term with its own weighting coefficient, ensuring the new data point and a batch of its subphrase augmentations are used in every gradient step. We initialize D with the utterance the model generates for each object.", |
| "cite_spans": [ |
| { |
| "start": 141, |
| "end": 161, |
| "text": "(Young et al., 2014)", |
| "ref_id": "BIBREF67" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Continual adaptation for neural models", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In this section, we evaluate our model's performance in real-time interactions with human speakers. Our artificial agent was paired with human partners to play a repeated reference game using images from the validation set of the COCO corpus (Lin et al., 2014; Chen et al., 2015) as the targets of reference. Critically, we constructed contexts to create a diagnostic mismatch between the COCO pre-training regime and the referential test regime. Specifically, we chose contexts such that the model's accuracy -the probability of identifying the target -would be poor at the outset.", |
| "cite_spans": [ |
| { |
| "start": 242, |
| "end": 260, |
| "text": "(Lin et al., 2014;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 261, |
| "end": 279, |
| "text": "Chen et al., 2015)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interactive human evaluations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To obtain appropriately challenging contexts, we used our pre-trained model's own visual encoder to find sets of highly similar images within the same category. We first extracted 256-dimensional feature vectors for each image from the final, fullyconnected layer of the encoder. We then used these features to partition the images into 100 groups using a k-means algorithm, sampled one image from each cluster, and took its 3 nearest neighbors in feature space, yielding 100 unique contexts of 4 images each. This adversarial process explicitly identified contexts that our pre-trained captioning model would be poorly equipped to distinguish.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Interactive human evaluations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Human baselines. We first investigated the baseline performance of human speakers and listeners. We recruited 108 participants (54 pairs) from Amazon Mechanical Turk and automatically paired them into an interactive environment with a chatbox. For each pair, we sampled a context and constructed a sequence of 24 trials structured into 6 repetition blocks, where each of the 4 images appeared as the target once per block. We prevented the same target appearing twice in a row and scrambled the order of the images on each player's screen on each trial. We found that pairs of humans were highly accurate, with performance consistently near ceil-ing ( Fig. 3, black lines) . At the same time, their utterances grew increasingly efficient: their utterances reduced in length across repeated interaction (t = 25.8, p < 0.001). 3", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 652, |
| "end": 672, |
| "text": "Fig. 3, black lines)", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Interactive human evaluations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Next, we evaluated the performance of our adaptive model in the listener role (for a similar analysis of our model in the speaker role, see Appendix D). We recruited 57 additional participants from Amazon Mechanical Turk who were told they would be paired with an artificial agent learning how they talk. This task was identical to the one performed by pairs of humans, except we allowed only a single message to be sent through the chatbox on each trial. This message was sent to a server where the model weights from the previous trial were loaded to the GPU, used to generate a response, and updated for the next round. The approximate latency for the model to respond was 5-10s depending on how many games were running simultaneously.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model performance", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "For our adaptation objective function, we used a linear combination of the utterance and contrastive losses and the KL-regularization (see Appendix B for hyper-parameter settings). We also used local rehearsal and compositional data augmentation. While the pre-trained model initially performs much less accurately than humans, as expected, our adaptive listener shows rapid improvement in accuracy over the course of interaction (Fig. 3) . In a mixed-effects logistic regression predicting trial-level accuracy, including pair-and image-level random effects, we found a significant increase in the probability of a correct response with successive repetitions, z = 12.6, p < 0.001, from 37% correct (slightly above chance levels of 25%) to 93% at the end. To test whether this success can be attributed to the initial quality of the listener model, or to humans adapting to a relatively unchanging model, we examined the performance of a non-adapting baseline (i.e. a model using the pre-trained model weights on every trial). We evaluated this baseline offline, using the utterances we recorded from the online games. This baseline showed no improvement, staying only slightly above chance accuracy over the course of the task.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 430, |
| "end": 438, |
| "text": "(Fig. 3)", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Model performance", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We now proceed to a series of ablation analyses that analyze the role played by each component of our approach. These analyses involve offline simulations conducted on the data we collected in the previous section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We begin by testing the effectiveness of our KL regularization term (Eq. 4) for preventing catastrophic forgetting. We reasoned that changing expectations in the adaptation context should not interfere with expectations in other, unseen contexts. To directly analyze such interference, we adapted an ablated variant of our listener model over the course of a game with one context of images, and then measured its average accuracy identifying the target given the initial utterances produced by different speakers on different (unseen) contexts. We then compared this test accuracy with the baseline accuracy achieved by an unadapted listener model. We cross-validated these estimates over many adaptation contexts. Specifically, because the baseline was already close to chance on 'challenging' contexts ( Fig. 3) , we used an additional set of 52 human-human interactions we collected in easier contexts (where images belonged to different COCO categories) to better expose degradations in performance. While accuracy significantly increased compared to baseline in the adapting context for both variants, we found a 10% drop in accuracy on unseen contexts for the ablated variant with no KL term, compared to only a 2% drop in the model using the full loss (t(51) = 12.2, p < 0.001 in a paired t test; see Fig. 4A ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 807, |
| "end": 814, |
| "text": "Fig. 3)", |
| "ref_id": "FIGREF4" |
| }, |
| { |
| "start": 1309, |
| "end": 1316, |
| "text": "Fig. 4A", |
| "ref_id": "FIGREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "KL regularization prevents catastrophic forgetting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Next, to more thoroughly probe the progression of interference, we conducted a second analysis examining the likelihood assigned to different captions by the listener model over the course of adaptation. We tracked both the initial captions produced by the pre-trained initialization in the adapting context and in unseen contexts. To obtain unseen contexts, we sampled a set of images from COCO that were not used in our experiment, and generated a caption for each. We also generated initial captions for the target objects in the adapting context. We recorded the likelihood of all of these sampled captions under the model at the beginning and at each step of adaptation until the final round. Finally, we greedily generated an utterance for each target at the end and retrospectively evaluated its likelihood at earlier points during adaptation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "KL regularization prevents catastrophic forgetting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "These three likelihood curves are compared for ablated models in Fig. 4B . By definition, the final caption in the adapting context becomes more likely in all cases (brown line). Without the lo-cal rehearsal mechanism, the initial caption the model expected in the adapting context becomes less likely as it is replaced by the human partner's preferred caption (red line). Only when the KL term is removed, however, do we find interference with the model's expectations for unseen contexts (yellow line). Thus, we find that KL regularization plays a critical role in preventing catastrophic forgetting.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 65, |
| "end": 72, |
| "text": "Fig. 4B", |
| "ref_id": "FIGREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "KL regularization prevents catastrophic forgetting", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Next, we consider the contributions of other key components for success. Specifically, we constructed ablated variants of our model with no pragmatics (i.e. no contrastive loss term during adaption), and with no local rehearsal (i.e. no ability to keep training on batches from the history of the interaction). We simulated adaptation for these ablated variants on the 57 games where human speakers produced utterances for our listener model, and examined the probability assigned to the target after hearing each utterance (Fig. 4C) .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 524, |
| "end": 533, |
| "text": "(Fig. 4C)", |
| "ref_id": "FIGREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Pragmatics and local rehearsal improve listener performance", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We found in a mixed-effects regression that each of these components independently contributes to success, as the ablated variants perform significantly worse than the full model (z = 2.1, p = 0.03 and z = 3.6, p < 0.001 for variants with no local rehearsal and no pragmatics, respectively; see Appendix C for regression details). Compared to an entirely non-adapting baseline, however, even these ablated variants improved over time.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pragmatics and local rehearsal improve listener performance", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Relationship to human adaptation The theoretical ties between our approach and proposed cognitive models of human adaptation raises several questions. First, it is possible that improved performance could be driven by human speakers adapting in response to our listener agent's successes and errors rather than the other way around. While some degree of human adaptation is inevitablefor example, humans only seemed to shorten their utterances once our models' accuracy began to rise -human adaptation alone is insufficient to explain gains in accuracy. If these gains were due to human speakers gradually discovering utterances that a pre-trained (non-adapting) model could understand, we would expect some gains in the accuracy of our baseline non-adapting model over time. Furthermore, we found that the handful of human speakers that dramatically changed their descriptions across rounds actually performed worse than those who adhered to consistent descriptions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "With this said, the extent of adaptation in humancomputer dialogue is known to be affected by human participants' expectations about the artificial agent (Branigan et al., 2011; Koulouri et al., 2016) , potentially including expectations about whether it will be adaptive or not. Bi-directional adaptation effects may be more pronounced in other dialogue settings where the human and model both speak, giving the human an opportunity to re-use utterances produced by the model. It will be important for future work to evaluate non-adaptive baselines online rather than offline, as we did, in order to observe exactly how humans respond to, or compensate for, non-adaptive agents.", |
| "cite_spans": [ |
| { |
| "start": 154, |
| "end": 177, |
| "text": "(Branigan et al., 2011;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 178, |
| "end": 200, |
| "text": "Koulouri et al., 2016)", |
| "ref_id": "BIBREF34" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Second, it is natural to ask how our model would perform in the speaker role with a human listener, using their (sparse) response success as feedback rather than their utterances. In ongoing work, we have found that the same approach allows a (pragmatic) model to converge to more efficient conventions in the speaker role (see Appendix D in supplemental), such that the same language model can flexibly switch between speaker and listener roles with the same human partner. Still, it is unlikely that this speaker model reduces in the same way as human speakers do (see Supplemental Fig. S1 for examples). Differences may reflect additional accessibility, grammaticality, or compositionality biases in humans; direct comparisons remain an open question for cognitive science.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 584, |
| "end": 591, |
| "text": "Fig. S1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Third, scaling the principles of computationallevel Bayesian cognitive models to neural networks capable of adapting to natural language in practice required several algorithmic-level innovations which are not yet plausible proposals for human cognition (Marr, 2010) . While our local rehearsal mechanism may be consistent with replay mechanisms in human memory, our KL regularization mechanism implausibly requires earlier parameter values of the model to be held in memory. Our data augmentation mechanism was introduced specifically to compensate for the inability of the LSTM architecture to propagate the use of a referring expression to its entailments, but we expect that human language processing mechanisms achieve this effect by different means. We expect further work to refine these algorithmic components as neural language models continue to advance.", |
| "cite_spans": [ |
| { |
| "start": 254, |
| "end": 266, |
| "text": "(Marr, 2010)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Relationship to language learning Our work is also related to broader efforts to ground language learning and emergent communication in usage, where artificial agents are trained to use language from scratch by playing interactive reference games Lazaridou et al., 2016; Wang et al., 2017; Chevalier-Boisvert et al., 2019) . Rather than starting our agents from scratch, we have emphasized the need for continual, partner-specific learning even among mature language users with existing priors. This raises another question: how are these different timescales of learning related to one another? One possibility is that the need to quickly adapt one's language to new partners and contexts over short timescales may serve as a functional pressure shaping languages more broadly.", |
| "cite_spans": [ |
| { |
| "start": 247, |
| "end": 270, |
| "text": "Lazaridou et al., 2016;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 271, |
| "end": 289, |
| "text": "Wang et al., 2017;", |
| "ref_id": "BIBREF63" |
| }, |
| { |
| "start": 290, |
| "end": 322, |
| "text": "Chevalier-Boisvert et al., 2019)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Recent theories in cognitive science have formalized this hypothesis in a hierarchical Bayesian model (Hawkins et al., 2020b) . In this model, the prior \u0398 that an agent brings into subsequent interactions is updated to reflect the overall distribution of partner-specific models \u03b8 i , thus balancing general and idiosyncratic language knowledge in a principled way. For neural language models, however, there is an apparent tension between the strong KL regularization required to prevent unwanted interference with background knowledge during partner-specific adaptation, leading to catastrophic forgetting, and the flexibility to generalize or transfer conventions to new communicative settings as required for language learning. We do not want to regularize so strongly that agents memorize conventions only applying to a single image that is completely reset after each interaction; instead, we wish to obtain a gradient of generalization across both referents and partners as a function of similarity (Markman and Makin, 1998).", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 125, |
| "text": "(Hawkins et al., 2020b)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "One promising solution to this problem, motivated by connections between hierarchical Bayes and algorithms like MAML (Finn et al., 2017; Grant et al., 2018; Nagabandi et al., 2019) , is to perform a meta-learning 'outer loop' updating the initialization \u0398, taking into account the regularized, partner-specific 'inner loop' of adaptation for each \u03b8 i . In principle, a meta-learning approach for neural language learning would distill abstract, shared aspects of language into a unified \u0398, while still allowing for rapid ad hoc conventionalization. Still, cognitively plausible and scalable meta-learning algorithms remain an open area of research.", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 136, |
| "text": "(Finn et al., 2017;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 137, |
| "end": 156, |
| "text": "Grant et al., 2018;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 157, |
| "end": 180, |
| "text": "Nagabandi et al., 2019)", |
| "ref_id": "BIBREF49" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Limitations and future work While our evaluations were limited to a canonical CNN-RNN image captioning architecture, a key open question for future work is how our continual adaptation approach ought to be implemented for more complex, stateof-the-art architectures. One possibility, following the approach recently proposed by Jaech and Ostendorf (2018a), is to allow context (e.g. partner identity) to control a low-rank transformation of the weight matrix such that online fine-tuning can take place in a more compact context embedding space (Jaech and Ostendorf, 2018b) .", |
| "cite_spans": [ |
| { |
| "start": 545, |
| "end": 573, |
| "text": "(Jaech and Ostendorf, 2018b)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Furthermore, while we adapted the entire parameterized RNN module end-to-end, future work should explore the effect of limiting adaption to subcomponents (e.g. only word embeddings) or expanding adaptation to additional model components such as attention weights or high-level visual representations. Beyond possible consequences for engineering better adaptive models, each of these variants corresponds to a distinct cognitive hypothesis about exactly which representations are being adapted on the fly in human communication.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "A final area for future work is generalizing the forms of social feedback that can be used as data D i for updating representations beyond the sparse choices in a reference game. In particular, forms of repair through question-asking or other nonreferential dialogue acts may license stronger inferences about a partner's language model and allow misunderstandings to be resolved more quickly in challenging contexts (Drew, 1997; Dingemanse et al., 2015; Li et al., 2016) . These forms of feedback may be particularly important for extending our approach beyond the benchmark task of repeated reference games to the more complex domains of real-world conversational tasks.", |
| "cite_spans": [ |
| { |
| "start": 417, |
| "end": 429, |
| "text": "(Drew, 1997;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 430, |
| "end": 454, |
| "text": "Dingemanse et al., 2015;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 455, |
| "end": 471, |
| "text": "Li et al., 2016)", |
| "ref_id": "BIBREF39" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Conclusion Human language use is remarkably flexible, continuously adapting to the needs of the current situation. In this paper, we introduced a challenging repeated reference game benchmark for artificial agents, which requires such adaptability to succeed. We proposed a continual learning approach allowing agents to form context-specific conventions by fine-tuning general-purpose representations. Even when pre-trained models initially perform inaccurately or inefficiently, our approach allows such models to quickly adapt to their partner's language in the given context and thus become more accurate and more efficient using common ground.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Traditionally, this semantic function is truth-conditional, mapping utterance-state pairs to Boolean values, but recent approaches have shifted to more graded, real-valued functions such as those implemented by neural networks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "For the rest of this paper, we only consider the case of adapting to one partner, so we will drop the partner index i.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Note that our contexts were selected to be challenging under the impoverished language prior of our pre-trained listener model, but were not expected to require any adaptation for human listeners to achieve high accuracy; see Hawkins et al. (2020a) for a more challenging stimulus domain used to elicit strong human adaptation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This research was supported in part by a Stanford HAI Hoffman-Yee Research Grant, Office of Naval Research grant ONR MURI N00014-16-1-2007 and DARPA agreement FA8650-19-C-7923, as well as NSF award #1911835 to RDH, and NSF award #1941722 to DS. We are grateful to audiences at the 2019 ICML Workshop on Adaptive and Multi-Task Learning, where an early version of this work was presented, and to three anonymous reviewers for their insightful comments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/hawkrobe/ continual-adaptation", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "All code and materials available at:", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Reasoning about pragmatics with neural listeners and speakers", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Andreas", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1173--1182", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Andreas and Dan Klein. 2016. Reasoning about pragmatics with neural listeners and speakers. In Proceedings of EMNLP, pages 1173-1182.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Statistical language model adaptation: review and perspectives. Speech Communication", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Jerome", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bellegarda", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "42", |
| "issue": "", |
| "pages": "93--108", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jerome R Bellegarda. 2004. Statistical language model adaptation: review and perspectives. Speech Com- munication, 42(1):93-108.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A theory of learning from different domains", |
| "authors": [ |
| { |
| "first": "Shai", |
| "middle": [], |
| "last": "Ben-David", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Blitzer", |
| "suffix": "" |
| }, |
| { |
| "first": "Koby", |
| "middle": [], |
| "last": "Crammer", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Kulesza", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennifer", |
| "middle": [ |
| "Wortman" |
| ], |
| "last": "Vaughan", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Machine learning", |
| "volume": "79", |
| "issue": "1-2", |
| "pages": "151--175", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shai Ben-David, John Blitzer, Koby Crammer, Alex Kulesza, Fernando Pereira, and Jennifer Wortman Vaughan. 2010. A theory of learning from different domains. Machine learning, 79(1-2):151-175.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Pragmatic reasoning through semantic inference", |
| "authors": [ |
| { |
| "first": "Leon", |
| "middle": [], |
| "last": "Bergen", |
| "suffix": "" |
| }, |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Leon Bergen, Roger Levy, and Noah Goodman. 2016. Pragmatic reasoning through semantic inference. Semantics and Pragmatics, 9(20).", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Biographies, bollywood, boom-boxes and blenders: Domain adaptation for sentiment classification", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Blitzer", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "440--447", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Blitzer, Mark Dredze, and Fernando Pereira. 2007. Biographies, bollywood, boom-boxes and blenders: Domain adaptation for sentiment classification. In Proceedings of ACL, pages 440-447.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "The role of beliefs in lexical alignment: Evidence from dialogs with humans and computers", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Holly P Branigan", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Martin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Pickering", |
| "suffix": "" |
| }, |
| { |
| "first": "Janet", |
| "middle": [ |
| "F" |
| ], |
| "last": "Pearson", |
| "suffix": "" |
| }, |
| { |
| "first": "Ash", |
| "middle": [], |
| "last": "Mclean", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Cognition", |
| "volume": "121", |
| "issue": "1", |
| "pages": "41--57", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Holly P Branigan, Martin J Pickering, Jamie Pearson, Janet F McLean, and Ash Brown. 2011. The role of beliefs in lexical alignment: Evidence from dialogs with humans and computers. Cognition, 121(1):41- 57.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Microsoft COCO captions: Data collection and evaluation server", |
| "authors": [ |
| { |
| "first": "Xinlei", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsung-Yi", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ramakrishna", |
| "middle": [], |
| "last": "Vedantam", |
| "suffix": "" |
| }, |
| { |
| "first": "Saurabh", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Doll\u00e1r", |
| "suffix": "" |
| }, |
| { |
| "first": "C Lawrence", |
| "middle": [], |
| "last": "Zitnick", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1504.00325" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakr- ishna Vedantam, Saurabh Gupta, Piotr Doll\u00e1r, and C Lawrence Zitnick. 2015. Microsoft COCO cap- tions: Data collection and evaluation server. arXiv preprint arXiv:1504.00325.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "BabyAI: First steps towards grounded language learning with a human in the loop", |
| "authors": [ |
| { |
| "first": "Maxime", |
| "middle": [], |
| "last": "Chevalier-Boisvert", |
| "suffix": "" |
| }, |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Salem", |
| "middle": [], |
| "last": "Lahlou", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucas", |
| "middle": [], |
| "last": "Willems", |
| "suffix": "" |
| }, |
| { |
| "first": "Chitwan", |
| "middle": [], |
| "last": "Saharia", |
| "suffix": "" |
| }, |
| { |
| "first": "Thien", |
| "middle": [], |
| "last": "Huu Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 7th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maxime Chevalier-Boisvert, Dzmitry Bahdanau, Salem Lahlou, Lucas Willems, Chitwan Saharia, Thien Huu Nguyen, and Yoshua Bengio. 2019. BabyAI: First steps towards grounded language learning with a human in the loop. In Proceedings of the 7th International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "QUAC: Question answering in context", |
| "authors": [ |
| { |
| "first": "Eunsol", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "He", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Yatskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Wentau", |
| "middle": [], |
| "last": "Yih", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2174--2184", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Eunsol Choi, He He, Mohit Iyyer, Mark Yatskar, Wen- tau Yih, Yejin Choi, Percy Liang, and Luke Zettle- moyer. 2018. QUAC: Question answering in con- text. In Proceedings of EMNLP, pages 2174-2184.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Using language", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Herbert", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Herbert H Clark. 1996. Using language. Cambridge University Press.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Referring as a collaborative process", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Herbert", |
| "suffix": "" |
| }, |
| { |
| "first": "Deanna", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wilkes-Gibbs", |
| "suffix": "" |
| } |
| ], |
| "year": 1986, |
| "venue": "Cognition", |
| "volume": "22", |
| "issue": "1", |
| "pages": "1--39", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Herbert H Clark and Deanna Wilkes-Gibbs. 1986. Referring as a collaborative process. Cognition, 22(1):1-39.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Pragmatically informative image captioning with character-level reference", |
| "authors": [ |
| { |
| "first": "Reuben", |
| "middle": [], |
| "last": "Cohn-Gordon", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "439--443", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reuben Cohn-Gordon, Noah Goodman, and Chris Potts. 2018. Pragmatically informative image cap- tioning with character-level reference. In Proceed- ings of NAACL, pages 439-443.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "A nice derangement of epitaphs", |
| "authors": [ |
| { |
| "first": "Donald", |
| "middle": [], |
| "last": "Davidson", |
| "suffix": "" |
| } |
| ], |
| "year": 1986, |
| "venue": "Philosophical grounds of rationality: Intentions, categories, ends", |
| "volume": "4", |
| "issue": "", |
| "pages": "157--174", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Donald Davidson. 1986. A nice derangement of epi- taphs. Philosophical grounds of rationality: Inten- tions, categories, ends, 4:157-174.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Neural evidence for bayesian trial-by-trial adaptation on the N400 during semantic priming", |
| "authors": [ |
| { |
| "first": "Nathaniel", |
| "middle": [], |
| "last": "Delaney-Busch", |
| "suffix": "" |
| }, |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Morgan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ellen", |
| "middle": [], |
| "last": "Lau", |
| "suffix": "" |
| }, |
| { |
| "first": "Gina", |
| "middle": [ |
| "R" |
| ], |
| "last": "Kuperberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Cognition", |
| "volume": "187", |
| "issue": "", |
| "pages": "10--20", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nathaniel Delaney-Busch, Emily Morgan, Ellen Lau, and Gina R Kuperberg. 2019. Neural evidence for bayesian trial-by-trial adaptation on the N400 during semantic priming. Cognition, 187:10-20.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Learning to interpret utterances using dialogue history", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Devault", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Stone", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "184--192", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David DeVault and Matthew Stone. 2009. Learning to interpret utterances using dialogue history. In Pro- ceedings of EACL, pages 184-192.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Universal principles in the repair of communication problems", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dingemanse", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Se\u00e1n", |
| "suffix": "" |
| }, |
| { |
| "first": "Julija", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Joe", |
| "middle": [], |
| "last": "Baranova", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Blythe", |
| "suffix": "" |
| }, |
| { |
| "first": "Simeon", |
| "middle": [], |
| "last": "Drew", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Floyd", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Rosa", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gisladottir", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Kobin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kendrick", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Stephen", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Levinson", |
| "suffix": "" |
| }, |
| { |
| "first": "Giovanni", |
| "middle": [], |
| "last": "Manrique", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [ |
| "J" |
| ], |
| "last": "Rossi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Enfield", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "PloS one", |
| "volume": "10", |
| "issue": "9", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Dingemanse, Se\u00e1n G Roberts, Julija Baranova, Joe Blythe, Paul Drew, Simeon Floyd, Rosa S Gisladottir, Kobin H Kendrick, Stephen C Levin- son, Elizabeth Manrique, Giovanni Rossi, and N. J. Enfield. 2015. Universal principles in the repair of communication problems. PloS one, 10(9):e0136100.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Open' class repair initiators in response to sequential sources of troubles in conversation", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Drew", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Journal of Pragmatics", |
| "volume": "28", |
| "issue": "1", |
| "pages": "69--101", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul Drew. 1997. 'Open' class repair initiators in re- sponse to sequential sources of troubles in conversa- tion. Journal of Pragmatics, 28(1):69-101.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Model-agnostic meta-learning for fast adaptation of deep networks", |
| "authors": [ |
| { |
| "first": "Chelsea", |
| "middle": [], |
| "last": "Finn", |
| "suffix": "" |
| }, |
| { |
| "first": "Pieter", |
| "middle": [], |
| "last": "Abbeel", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Levine", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 34th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "1126--1135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chelsea Finn, Pieter Abbeel, and Sergey Levine. 2017. Model-agnostic meta-learning for fast adaptation of deep networks. In Proceedings of the 34th Inter- national Conference on Machine Learning, pages 1126-1135.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Using speakers' referential intentions to model early cross-situational word learning", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Noah", |
| "suffix": "" |
| }, |
| { |
| "first": "Joshua", |
| "middle": [ |
| "B" |
| ], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tenenbaum", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Psychological Science", |
| "volume": "20", |
| "issue": "5", |
| "pages": "578--585", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael C Frank, Noah D Goodman, and Joshua B Tenenbaum. 2009. Using speakers' referential inten- tions to model early cross-situational word learning. Psychological Science, 20(5):578-585.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Unified pragmatic models for generating and following instructions", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Fried", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Andreas", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1951--1963", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Fried, Jacob Andreas, and Dan Klein. 2018. Unified pragmatic models for generating and follow- ing instructions. In Proceedings of NAACL, pages 1951-1963.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Information asymmetry in KL-regularized RL", |
| "authors": [ |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Galashov", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Siddhant", |
| "suffix": "" |
| }, |
| { |
| "first": "Leonard", |
| "middle": [], |
| "last": "Jayakumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhruva", |
| "middle": [], |
| "last": "Hasenclever", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Tirumala", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Schwarz", |
| "suffix": "" |
| }, |
| { |
| "first": "Wojciech", |
| "middle": [ |
| "M" |
| ], |
| "last": "Desjardins", |
| "suffix": "" |
| }, |
| { |
| "first": "Yee", |
| "middle": [ |
| "Whye" |
| ], |
| "last": "Czarnecki", |
| "suffix": "" |
| }, |
| { |
| "first": "Razvan", |
| "middle": [], |
| "last": "Teh", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Pascanu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Heess", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 7th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexandre Galashov, Siddhant M Jayakumar, Leonard Hasenclever, Dhruva Tirumala, Jonathan Schwarz, Guillaume Desjardins, Wojciech M Czarnecki, Yee Whye Teh, Razvan Pascanu, and Nicolas Heess. 2018. Information asymmetry in KL-regularized RL. In Proceedings of the 7th International Con- ference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Domain adaptation for large-scale sentiment classification: A deep learning approach", |
| "authors": [ |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Glorot", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of the 28th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "513--520", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xavier Glorot, Antoine Bordes, and Yoshua Bengio. 2011. Domain adaptation for large-scale sentiment classification: A deep learning approach. In Pro- ceedings of the 28th International Conference on Machine Learning, pages 513-520.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Pragmatic language interpretation as probabilistic inference", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Noah", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael C", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Trends in Cognitive Sciences", |
| "volume": "20", |
| "issue": "11", |
| "pages": "818--829", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Noah D Goodman and Michael C Frank. 2016. Prag- matic language interpretation as probabilistic infer- ence. Trends in Cognitive Sciences, 20(11):818 - 829.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Recasting gradient-based meta-learning as hierarchical bayes", |
| "authors": [ |
| { |
| "first": "Erin", |
| "middle": [], |
| "last": "Grant", |
| "suffix": "" |
| }, |
| { |
| "first": "Chelsea", |
| "middle": [], |
| "last": "Finn", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Levine", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Darrell", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Griffiths", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 6th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Erin Grant, Chelsea Finn, Sergey Levine, Trevor Darrell, and Thomas Griffiths. 2018. Recasting gradient-based meta-learning as hierarchical bayes. In Proceedings of the 6th International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Logic and conversation", |
| "authors": [ |
| { |
| "first": "H", |
| "middle": [ |
| "P" |
| ], |
| "last": "Grice", |
| "suffix": "" |
| } |
| ], |
| "year": 1975, |
| "venue": "Syntax and Semantics", |
| "volume": "", |
| "issue": "", |
| "pages": "43--58", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "H. P. Grice. 1975. Logic and conversation. In P. Cole and J. Morgan, editors, Syntax and Semantics, pages 43-58. Academic Press, New York.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "The effect of speaker-specific information on pragmatic inferences", |
| "authors": [ |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Grodner", |
| "suffix": "" |
| }, |
| { |
| "first": "Julie", |
| "middle": [ |
| "C" |
| ], |
| "last": "Sedivy", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "The processing and acquisition of reference", |
| "volume": "2327", |
| "issue": "", |
| "pages": "239--272", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniel Grodner and Julie C Sedivy. 2011. The effect of speaker-specific information on pragmatic infer- ences. In The processing and acquisition of refer- ence, volume 2327, pages 239-272. MIT Press.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "The PhotoBook dataset: Building common ground through visually-grounded dialogue", |
| "authors": [ |
| { |
| "first": "Janosch", |
| "middle": [], |
| "last": "Haber", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Baumg\u00e4rtner", |
| "suffix": "" |
| }, |
| { |
| "first": "Ece", |
| "middle": [], |
| "last": "Takmaz", |
| "suffix": "" |
| }, |
| { |
| "first": "Lieke", |
| "middle": [], |
| "last": "Gelderloos", |
| "suffix": "" |
| }, |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| }, |
| { |
| "first": "Raquel", |
| "middle": [], |
| "last": "Fern\u00e1ndez", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1895--1910", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Janosch Haber, Tim Baumg\u00e4rtner, Ece Takmaz, Lieke Gelderloos, Elia Bruni, and Raquel Fern\u00e1ndez. 2019. The PhotoBook dataset: Building common ground through visually-grounded dialogue. In Proceedings of ACL, pages 1895-1910.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Convention-formation in iterated reference games", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Robert", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hawkins", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah D", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 39th annual meeting of the Cognitive Science Society", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert D Hawkins, Michael C Frank, and Noah D Goodman. 2017. Convention-formation in iterated reference games. In Proceedings of the 39th annual meeting of the Cognitive Science Society.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Characterizing the dynamics of learning in repeated reference games", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Robert", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hawkins", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah D", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Cognitive Science", |
| "volume": "44", |
| "issue": "6", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert D Hawkins, Michael C Frank, and Noah D Goodman. 2020a. Characterizing the dynamics of learning in repeated reference games. Cognitive Sci- ence, 44(6):e12845.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Generalizing meanings from partners to populations: Hierarchical inference supports convention formation on networks", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Robert", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hawkins", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Noah", |
| "suffix": "" |
| }, |
| { |
| "first": "Adele", |
| "middle": [ |
| "E" |
| ], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas L", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Griffiths", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 42nd annual meeting of the Cognitive Science Society", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert D Hawkins, Noah D Goodman, Adele E Gold- berg, and Thomas L Griffiths. 2020b. Generalizing meanings from partners to populations: Hierarchi- cal inference supports convention formation on net- works. In Proceedings of the 42nd annual meeting of the Cognitive Science Society.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Low-rank RNN adaptation for context-aware language modeling", |
| "authors": [ |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Jaech", |
| "suffix": "" |
| }, |
| { |
| "first": "Mari", |
| "middle": [], |
| "last": "Ostendorf", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "497--510", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aaron Jaech and Mari Ostendorf. 2018a. Low-rank RNN adaptation for context-aware language model- ing. Transactions of the Association for Computa- tional Linguistics, 6:497-510.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Personalized language model for query auto-completion", |
| "authors": [ |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Jaech", |
| "suffix": "" |
| }, |
| { |
| "first": "Mari", |
| "middle": [], |
| "last": "Ostendorf", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "700--705", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aaron Jaech and Mari Ostendorf. 2018b. Personalized language model for query auto-completion. In Pro- ceedings of ACL, pages 700-705.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Robust speech perception: Recognize the familiar, generalize to the similar, and adapt to the novel", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Dave", |
| "suffix": "" |
| }, |
| { |
| "first": "T Florian", |
| "middle": [], |
| "last": "Kleinschmidt", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jaeger", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Psychological Review", |
| "volume": "122", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dave F Kleinschmidt and T Florian Jaeger. 2015. Ro- bust speech perception: Recognize the familiar, gen- eralize to the similar, and adapt to the novel. Psycho- logical Review, 122(2):148.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "On the dynamic adaptation of stochastic language models", |
| "authors": [ |
| { |
| "first": "Reinhard", |
| "middle": [], |
| "last": "Kneser", |
| "suffix": "" |
| }, |
| { |
| "first": "Volker", |
| "middle": [], |
| "last": "Steinbiss", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "586--589", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reinhard Kneser and Volker Steinbiss. 1993. On the dynamic adaptation of stochastic language mod- els. In IEEE International Conference on Acoustics, Speech, and Signal Processing, pages 586-589.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Do (and say) as I say: Linguistic adaptation in human-computer dialogs", |
| "authors": [ |
| { |
| "first": "Theodora", |
| "middle": [], |
| "last": "Koulouri", |
| "suffix": "" |
| }, |
| { |
| "first": "Stanislao", |
| "middle": [], |
| "last": "Lauria", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [ |
| "D" |
| ], |
| "last": "Macredie", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "31", |
| "issue": "", |
| "pages": "59--95", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Theodora Koulouri, Stanislao Lauria, and Robert D Macredie. 2016. Do (and say) as I say: Linguis- tic adaptation in human-computer dialogs. Human- Computer Interaction, 31(1):59-95.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Dynamic evaluation of neural sequence models", |
| "authors": [ |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "Krause", |
| "suffix": "" |
| }, |
| { |
| "first": "Emmanuel", |
| "middle": [], |
| "last": "Kahembwe", |
| "suffix": "" |
| }, |
| { |
| "first": "Iain", |
| "middle": [], |
| "last": "Murray", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Renals", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 35th International Conference on Machine Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "2771--2780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ben Krause, Emmanuel Kahembwe, Iain Murray, and Steve Renals. 2017. Dynamic evaluation of neural sequence models. In Proceedings of the 35th In- ternational Conference on Machine Learning, pages 2771-2780.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Changes in reference phrases as a function of frequency of usage in social interaction: A preliminary study", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Robert", |
| "suffix": "" |
| }, |
| { |
| "first": "Sidney", |
| "middle": [], |
| "last": "Krauss", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Weinheimer", |
| "suffix": "" |
| } |
| ], |
| "year": 1964, |
| "venue": "Psychonomic Science", |
| "volume": "", |
| "issue": "", |
| "pages": "113--114", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert M Krauss and Sidney Weinheimer. 1964. Changes in reference phrases as a function of fre- quency of usage in social interaction: A preliminary study. Psychonomic Science, 1(1-12):113-114.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Multi-agent cooperation and the emergence of (natural) language", |
| "authors": [ |
| { |
| "first": "Angeliki", |
| "middle": [], |
| "last": "Lazaridou", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Peysakhovich", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 5th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Angeliki Lazaridou, Alexander Peysakhovich, and Marco Baroni. 2016. Multi-agent cooperation and the emergence of (natural) language. In Proceed- ings of the 5th International Conference on Learn- ing Representations.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Convention: A philosophical study", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| } |
| ], |
| "year": 1969, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Lewis. 1969. Convention: A philosophical study. Harvard University Press.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Learning through dialogue interactions by asking questions", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Alexander", |
| "suffix": "" |
| }, |
| { |
| "first": "Sumit", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Chopra", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 5th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Alexander H Miller, Sumit Chopra, Marc'Aurelio Ranzato, and Jason Weston. 2016. Learning through dialogue interactions by asking questions. In Proceedings of the 5th International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "A Bayesian divergence prior for classifier adaptation", |
| "authors": [ |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Bilmes", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Artificial Intelligence and Statistics", |
| "volume": "", |
| "issue": "", |
| "pages": "275--282", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiao Li and Jeff Bilmes. 2007. A Bayesian divergence prior for classifier adaptation. In Artificial Intelli- gence and Statistics, pages 275-282.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Microsoft COCO: Common objects in context", |
| "authors": [ |
| { |
| "first": "Tsung-Yi", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Maire", |
| "suffix": "" |
| }, |
| { |
| "first": "Serge", |
| "middle": [], |
| "last": "Belongie", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Hays", |
| "suffix": "" |
| }, |
| { |
| "first": "Pietro", |
| "middle": [], |
| "last": "Perona", |
| "suffix": "" |
| }, |
| { |
| "first": "Deva", |
| "middle": [], |
| "last": "Ramanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Doll\u00e1r", |
| "suffix": "" |
| }, |
| { |
| "first": "C Lawrence", |
| "middle": [], |
| "last": "Zitnick", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ECCV", |
| "volume": "", |
| "issue": "", |
| "pages": "740--755", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C Lawrence Zitnick. 2014. Microsoft COCO: Common objects in context. In Proceedings of ECCV, pages 740-755. Springer.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Investigations on speaker adaptation of LSTM RNN models for speech recognition", |
| "authors": [ |
| { |
| "first": "Chaojun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yongqiang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kshitiz", |
| "middle": [], |
| "last": "Kumar", |
| "suffix": "" |
| }, |
| { |
| "first": "Yifan", |
| "middle": [], |
| "last": "Gong", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "5020--5024", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chaojun Liu, Yongqiang Wang, Kshitiz Kumar, and Yifan Gong. 2016. Investigations on speaker adap- tation of LSTM RNN models for speech recogni- tion. In Proceedings of the IEEE International Con- ference on Acoustics, Speech and Signal Processing, pages 5020-5024.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Referential communication and category acquisition", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Arthur", |
| "suffix": "" |
| }, |
| { |
| "first": "Valerie", |
| "middle": [ |
| "S" |
| ], |
| "last": "Markman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Makin", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Journal of Experimental Psychology: General", |
| "volume": "127", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Arthur B Markman and Valerie S Makin. 1998. Ref- erential communication and category acquisition. Journal of Experimental Psychology: General, 127(4):331.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Vision: A computational investigation into the human representation and processing of visual information", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Marr", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Marr. 2010. Vision: A computational investiga- tion into the human representation and processing of visual information. MIT press.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "When conceptual pacts are broken: Partner-specific effects on the comprehension of referring expressions", |
| "authors": [ |
| { |
| "first": "Charles", |
| "middle": [], |
| "last": "Metzing", |
| "suffix": "" |
| }, |
| { |
| "first": "Susan", |
| "middle": [ |
| "E" |
| ], |
| "last": "Brennan", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Memory and Language", |
| "volume": "49", |
| "issue": "2", |
| "pages": "201--213", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Charles Metzing and Susan E Brennan. 2003. When conceptual pacts are broken: Partner-specific ef- fects on the comprehension of referring expressions. Journal of Memory and Language, 49(2):201-213.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "On speaker adaptation of long short-term memory recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Yajie", |
| "middle": [], |
| "last": "Miao", |
| "suffix": "" |
| }, |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Metze", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 16th Annual Conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "1101--1105", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yajie Miao and Florian Metze. 2015. On speaker adap- tation of long short-term memory recurrent neural networks. In Proceedings of the 16th Annual Con- ference of the International Speech Communication Association, pages 1101-1105.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Recurrent neural network based language model", |
| "authors": [ |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Karafi\u00e1t", |
| "suffix": "" |
| }, |
| { |
| "first": "Luk\u00e1\u0161", |
| "middle": [], |
| "last": "Burget", |
| "suffix": "" |
| }, |
| { |
| "first": "Ja\u0148", |
| "middle": [], |
| "last": "Cernock\u1ef3", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjeev", |
| "middle": [], |
| "last": "Khudanpur", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the 11th annual conference of the International Speech Communication Association", |
| "volume": "", |
| "issue": "", |
| "pages": "1045--1048", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom\u00e1\u0161 Mikolov, Martin Karafi\u00e1t, Luk\u00e1\u0161 Burget, Ja\u0148 Cernock\u1ef3, and Sanjeev Khudanpur. 2010. Recur- rent neural network based language model. In Pro- ceedings of the 11th annual conference of the Inter- national Speech Communication Association, pages 1045-1048.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Colors in context: A pragmatic neural model for grounded language understanding", |
| "authors": [ |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Monroe", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [ |
| "D" |
| ], |
| "last": "Hawkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "D" |
| ], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "325--338", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Will Monroe, Robert D. Hawkins, Noah D. Goodman, and Christopher Potts. 2017. Colors in context: A pragmatic neural model for grounded language un- derstanding. Transactions of the Association for Computational Linguistics, 5:325-338.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Deep online learning via meta-learning: Continual adaptation for model-based RL", |
| "authors": [ |
| { |
| "first": "Anusha", |
| "middle": [], |
| "last": "Nagabandi", |
| "suffix": "" |
| }, |
| { |
| "first": "Chelsea", |
| "middle": [], |
| "last": "Finn", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Levine", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 7th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anusha Nagabandi, Chelsea Finn, and Sergey Levine. 2019. Deep online learning via meta-learning: Con- tinual adaptation for model-based RL. In Proceed- ings of the 7th International Conference on Learn- ing Representations.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "A simple but effective method to incorporate multi-turn context with BERT for conversational machine comprehension", |
| "authors": [ |
| { |
| "first": "Yasuhito", |
| "middle": [], |
| "last": "Ohsugi", |
| "suffix": "" |
| }, |
| { |
| "first": "Itsumi", |
| "middle": [], |
| "last": "Saito", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyosuke", |
| "middle": [], |
| "last": "Nishida", |
| "suffix": "" |
| }, |
| { |
| "first": "Hisako", |
| "middle": [], |
| "last": "Asano", |
| "suffix": "" |
| }, |
| { |
| "first": "Junji", |
| "middle": [], |
| "last": "Tomita", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the First Workshop on NLP for Conversational AI", |
| "volume": "", |
| "issue": "", |
| "pages": "11--17", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yasuhito Ohsugi, Itsumi Saito, Kyosuke Nishida, Hisako Asano, and Junji Tomita. 2019. A simple but effective method to incorporate multi-turn con- text with BERT for conversational machine compre- hension. In Proceedings of the First Workshop on NLP for Conversational AI, pages 11-17.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Why discourse affects speakers' choice of referring expressions", |
| "authors": [ |
| { |
| "first": "Naho", |
| "middle": [], |
| "last": "Orita", |
| "suffix": "" |
| }, |
| { |
| "first": "Eliana", |
| "middle": [], |
| "last": "Vornov", |
| "suffix": "" |
| }, |
| { |
| "first": "Naomi", |
| "middle": [], |
| "last": "Feldman", |
| "suffix": "" |
| }, |
| { |
| "first": "Hal", |
| "middle": [], |
| "last": "Daum\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Iii", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1639--1649", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Naho Orita, Eliana Vornov, Naomi Feldman, and Hal Daum\u00e9 III. 2015. Why discourse affects speakers' choice of referring expressions. In Proceedings of ACL, pages 1639-1649.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Stochastic language adaptation over time and state in natural spoken dialog systems", |
| "authors": [ |
| { |
| "first": "Giuseppe", |
| "middle": [], |
| "last": "Riccardi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Allen L Gorin", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "IEEE Transactions on Speech and Audio Processing", |
| "volume": "8", |
| "issue": "1", |
| "pages": "3--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Giuseppe Riccardi and Allen L Gorin. 2000. Stochas- tic language adaptation over time and state in natu- ral spoken dialog systems. IEEE Transactions on Speech and Audio Processing, 8(1):3-10.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Catastrophic forgetting, rehearsal and pseudorehearsal", |
| "authors": [ |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Robins", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Connection Science", |
| "volume": "7", |
| "issue": "2", |
| "pages": "123--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anthony Robins. 1995. Catastrophic forgetting, re- hearsal and pseudorehearsal. Connection Science, 7(2):123-146.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Evidential strength of intonational cues and rational adaptation to (un-) reliable intonation", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Timo", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Roettger", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Franke", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Cognitive Science", |
| "volume": "43", |
| "issue": "7", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timo B Roettger and Michael Franke. 2019. Eviden- tial strength of intonational cues and rational adapta- tion to (un-) reliable intonation. Cognitive Science, 43(7):e12745.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "Leveraging past references for robust language grounding", |
| "authors": [ |
| { |
| "first": "Subhro", |
| "middle": [], |
| "last": "Roy", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Noseworthy", |
| "suffix": "" |
| }, |
| { |
| "first": "Rohan", |
| "middle": [], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "Daehyung", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicholas", |
| "middle": [], |
| "last": "Roy", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "430--440", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Subhro Roy, Michael Noseworthy, Rohan Paul, Dae- hyung Park, and Nicholas Roy. 2019. Leveraging past references for robust language grounding. In Proceedings of CoNLL, pages 430-440.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "2020. I know what you're probably going to say: Listener adaptation to variable use of uncertainty expressions", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Judith", |
| "middle": [], |
| "last": "Degen", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Cognition", |
| "volume": "203", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Schuster and Judith Degen. 2020. I know what you're probably going to say: Listener adapta- tion to variable use of uncertainty expressions. Cog- nition, 203:104285.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Learning and using language via recursive pragmatic reasoning about other agents", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Nathaniel", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Goodman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3039--3047", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nathaniel J Smith, Noah Goodman, and Michael Frank. 2013. Learning and using language via recursive pragmatic reasoning about other agents. In Ad- vances in Neural Information Processing Systems, pages 3039-3047.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Getting to \"Hearer-old\": Charting referring expressions across time", |
| "authors": [ |
| { |
| "first": "Ieva", |
| "middle": [], |
| "last": "Stali\u016bnait\u0117", |
| "suffix": "" |
| }, |
| { |
| "first": "Hannah", |
| "middle": [], |
| "last": "Rohde", |
| "suffix": "" |
| }, |
| { |
| "first": "Bonnie", |
| "middle": [], |
| "last": "Webber", |
| "suffix": "" |
| }, |
| { |
| "first": "Annie", |
| "middle": [], |
| "last": "Louis", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "4350--4359", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ieva Stali\u016bnait\u0117, Hannah Rohde, Bonnie Webber, and Annie Louis. 2018. Getting to \"Hearer-old\": Chart- ing referring expressions across time. In Proceed- ings of EMNLP, pages 4350-4359.", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "Characterizing collective attention via descriptor context: A case study of public discussions of crisis events", |
| "authors": [ |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Stewart", |
| "suffix": "" |
| }, |
| { |
| "first": "Diyi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 14th International AAAI Conference on Web and Social Media", |
| "volume": "", |
| "issue": "", |
| "pages": "650--660", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ian Stewart, Diyi Yang, and Jacob Eisenstein. 2020. Characterizing collective attention via descriptor context: A case study of public discussions of cri- sis events. In Proceedings of the 14th International AAAI Conference on Web and Social Media, pages 650-660.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "A neural model of adaptation in reading", |
| "authors": [ |
| { |
| "first": "Marten", |
| "middle": [], |
| "last": "Van Schijndel", |
| "suffix": "" |
| }, |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "4704--4710", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marten Van Schijndel and Tal Linzen. 2018. A neural model of adaptation in reading. In Proceedings of EMNLP, pages 4704-4710.", |
| "links": null |
| }, |
| "BIBREF61": { |
| "ref_id": "b61", |
| "title": "Context-aware captions from context-agnostic supervision", |
| "authors": [ |
| { |
| "first": "Ramakrishna", |
| "middle": [], |
| "last": "Vedantam", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Murphy", |
| "suffix": "" |
| }, |
| { |
| "first": "Devi", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Gal", |
| "middle": [], |
| "last": "Chechik", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "1070--1079", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ramakrishna Vedantam, Samy Bengio, Kevin Murphy, Devi Parikh, and Gal Chechik. 2017. Context-aware captions from context-agnostic supervision. In Pro- ceedings of the IEEE Conference on Computer Vi- sion and Pattern Recognition, pages 1070-1079.", |
| "links": null |
| }, |
| "BIBREF62": { |
| "ref_id": "b62", |
| "title": "Show and tell: A neural image caption generator", |
| "authors": [ |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Toshev", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Dumitru", |
| "middle": [], |
| "last": "Erhan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "3156--3164", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oriol Vinyals, Alexander Toshev, Samy Bengio, and Dumitru Erhan. 2015. Show and tell: A neural im- age caption generator. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recog- nition, pages 3156-3164.", |
| "links": null |
| }, |
| "BIBREF63": { |
| "ref_id": "b63", |
| "title": "Naturalizing a programming language via interactive learning", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Sida", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Ginn", |
| "suffix": "" |
| }, |
| { |
| "first": "Christoper", |
| "middle": [ |
| "D" |
| ], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "929--938", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sida I Wang, Samuel Ginn, Percy Liang, and Christoper D Manning. 2017. Naturalizing a pro- gramming language via interactive learning. In Pro- ceedings of ACL, pages 929-938.", |
| "links": null |
| }, |
| "BIBREF64": { |
| "ref_id": "b64", |
| "title": "Learning language games through interaction", |
| "authors": [ |
| { |
| "first": "I", |
| "middle": [], |
| "last": "Sida", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "2368--2378", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sida I Wang, Percy Liang, and Christopher D Manning. 2016. Learning language games through interaction. In Proceedings of ACL, page 2368-2378.", |
| "links": null |
| }, |
| "BIBREF65": { |
| "ref_id": "b65", |
| "title": "Coordinating beliefs in conversation", |
| "authors": [ |
| { |
| "first": "Deanna", |
| "middle": [], |
| "last": "Wilkes", |
| "suffix": "" |
| }, |
| { |
| "first": "-", |
| "middle": [], |
| "last": "Gibbs", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Herbert", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 1992, |
| "venue": "Journal of Memory and Language", |
| "volume": "31", |
| "issue": "2", |
| "pages": "183--194", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deanna Wilkes-Gibbs and Herbert H Clark. 1992. Co- ordinating beliefs in conversation. Journal of Mem- ory and Language, 31(2):183-194.", |
| "links": null |
| }, |
| "BIBREF66": { |
| "ref_id": "b66", |
| "title": "Talker-specificity and adaptation in quantifier interpretation", |
| "authors": [ |
| { |
| "first": "Ilker", |
| "middle": [], |
| "last": "Yildirim", |
| "suffix": "" |
| }, |
| { |
| "first": "Judith", |
| "middle": [], |
| "last": "Degen", |
| "suffix": "" |
| }, |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "T Florian", |
| "middle": [], |
| "last": "Tanenhaus", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Jaeger", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Journal of Memory and Language", |
| "volume": "87", |
| "issue": "", |
| "pages": "128--143", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilker Yildirim, Judith Degen, Michael K Tanenhaus, and T Florian Jaeger. 2016. Talker-specificity and adaptation in quantifier interpretation. Journal of Memory and Language, 87:128-143.", |
| "links": null |
| }, |
| "BIBREF67": { |
| "ref_id": "b67", |
| "title": "From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| }, |
| { |
| "first": "Alice", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Micah", |
| "middle": [], |
| "last": "Hodosh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hockenmaier", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "67--78", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Young, Alice Lai, Micah Hodosh, and Julia Hock- enmaier. 2014. From image descriptions to visual denotations: New similarity metrics for semantic in- ference over event descriptions. Transactions of the Association for Computational Linguistics, 2:67-78.", |
| "links": null |
| }, |
| "BIBREF68": { |
| "ref_id": "b68", |
| "title": "KL-divergence regularized deep neural network adaptation for improved large vocabulary speech recognition", |
| "authors": [ |
| { |
| "first": "Dong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaisheng", |
| "middle": [], |
| "last": "Yao", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Gang", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Frank", |
| "middle": [], |
| "last": "Seide", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "7893--7897", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dong Yu, Kaisheng Yao, Hang Su, Gang Li, and Frank Seide. 2013. KL-divergence regularized deep neu- ral network adaptation for improved large vocabu- lary speech recognition. In Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing, pages 7893-7897.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Figure 1: We introduce a regularized continual learning approach allowing agents initialized with a pretrained language model \u0398 to iteratively infer the language model \u03b8 i used by a partner, over repeated interactions {t 1 , t 2 . . . } in an online reference game." |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "In a repeated reference game, a speaker agent must repeatedly communicate the identity of the same objects in context to a listener agent." |
| }, |
| "FIGREF4": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Communication becomes more efficient and accurate as our model adapts to a human speaker. Example contexts and utterances are shown. Error bars are bootstrapped 95% CIs." |
| }, |
| "FIGREF5": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Ablation studies of the listener model. (A-B) KL regularization prevents catastrophic forgetting over the course of adaptation. (C) Local rehearsal and pragmatic reasoning independently contribute to successful listener adaptation. Error bars are bootstrapped 95% CIs." |
| } |
| } |
| } |
| } |