| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:15:47.248054Z" |
| }, |
| "title": "Annotating anaphoric phenomena in situated dialogue", |
| "authors": [ |
| { |
| "first": "Sharid", |
| "middle": [], |
| "last": "Lo\u00e1iciga", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Potsdam", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Dobnik", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Gothenburg", |
| "location": { |
| "country": "Sweden" |
| } |
| }, |
| "email": "simon.dobnik@gu.se" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Schlangen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Potsdam", |
| "location": { |
| "country": "Germany" |
| } |
| }, |
| "email": "david.schlangen@uni-potsdam.de" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In recent years several corpora have been developed for vision and language tasks. With this paper, we intend to start a discussion on the annotation of referential phenomena in situated dialogue. We argue that there is still significant room for corpora that increase the complexity of both visual and linguistic domains and which capture different varieties of perceptual and conversational contexts. In addition, a rich annotation scheme covering a broad range of referential phenomena and compatible with the textual task of coreference resolution is necessary in order to take the most advantage of these corpora. Consequently, there are several open questions regarding the semantics of reference and annotation, and the extent to which standard textual coreference accounts for the situated dialogue genre. Working with two corpora on situated dialogue, we present our extension to the ARRAU (Uryupina et al., 2020) annotation scheme in order to start this discussion.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In recent years several corpora have been developed for vision and language tasks. With this paper, we intend to start a discussion on the annotation of referential phenomena in situated dialogue. We argue that there is still significant room for corpora that increase the complexity of both visual and linguistic domains and which capture different varieties of perceptual and conversational contexts. In addition, a rich annotation scheme covering a broad range of referential phenomena and compatible with the textual task of coreference resolution is necessary in order to take the most advantage of these corpora. Consequently, there are several open questions regarding the semantics of reference and annotation, and the extent to which standard textual coreference accounts for the situated dialogue genre. Working with two corpora on situated dialogue, we present our extension to the ARRAU (Uryupina et al., 2020) annotation scheme in order to start this discussion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "With the ease of combining representations from different modalities provided by neural networks, text and vision are coming together. There is a growing body of resources addressing a setting in which the visual context can be exploited to support a textual task, for example visual anaphora resolution. 1 Several corpora have been developed in the domain of vision and language (V&L), for example corpora of image captions (Lin et al., 2014; Young et al., 2014; , images and paragraph descriptions (Krause et al., 2017) , visual question answering (Antol et al., 2015) , visual dialogue (Das et al., 2017) and embodied question answering (Das et al., 2018) . Through these the V&L research has progressively moved from sentence descriptions to descriptions involving utterances and conversations, therefore adding complexity to their semantic representations. In parallel to the corpora, V&L systems have been developed but of course these are limited by the complexity of the task for which the dataset has been collected. The end goal of the current research is to move to a more complex linguistic setting involving multiparty dialogue and visual representations that go beyond individual images.", |
| "cite_spans": [ |
| { |
| "start": 305, |
| "end": 306, |
| "text": "1", |
| "ref_id": null |
| }, |
| { |
| "start": 425, |
| "end": 443, |
| "text": "(Lin et al., 2014;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 444, |
| "end": 463, |
| "text": "Young et al., 2014;", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 500, |
| "end": 521, |
| "text": "(Krause et al., 2017)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 550, |
| "end": 570, |
| "text": "(Antol et al., 2015)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 589, |
| "end": 607, |
| "text": "(Das et al., 2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 640, |
| "end": 658, |
| "text": "(Das et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Anaphora resolution has been studied both in the textual and situated dialogue domains (cf. Sukthanker et al. (2020) for an extensive survey of anaphora and coreference; (Kelleher et al., 2005; Seo et al., 2017; Kottur et al., 2018; Yu et al., 2019; Dobnik and Lo\u00e1iciga, 2019) ). In the textual domain, this has been formulated as a standard task with several corpora annotated uniformly for the most part, while in situated dialogue each corpus presents its own individual solution (cf. (Kelleher et al., 2005; Smith et al., 2011; Pustejovsky and Krishnaswamy, 2020) ). With the increasing interest in the combination of V&L in deep learning applications, multimodal resources are increasingly used in the context of traditional textual natural language processing (NLP) tasks. As such, it makes sense to consider a common annotation strategy both for the textual and situated dialogue domains, basing it on the rich work of textual anaphora resolution standards. Doing so, we also hope to get new insights about the semantics of reference in natural language.", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 116, |
| "text": "Sukthanker et al. (2020)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 170, |
| "end": 193, |
| "text": "(Kelleher et al., 2005;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 194, |
| "end": 211, |
| "text": "Seo et al., 2017;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 212, |
| "end": 232, |
| "text": "Kottur et al., 2018;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 233, |
| "end": 249, |
| "text": "Yu et al., 2019;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 250, |
| "end": 276, |
| "text": "Dobnik and Lo\u00e1iciga, 2019)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 488, |
| "end": 511, |
| "text": "(Kelleher et al., 2005;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 512, |
| "end": 531, |
| "text": "Smith et al., 2011;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 532, |
| "end": 567, |
| "text": "Pustejovsky and Krishnaswamy, 2020)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Situated reference resolution involves grounding linguistic expressions in perceptual representations (Harnad, 1990) or representations of actions (Roy, 2005) . Anaphora resolution, traditionally a textual task, involves linking linguistic expressions referring to the same discourse entities (Stede, 2012) . While challenging, the task is defined by the familiar nature of written texts: linear, planned and structured; defining thus the mechanisms and devices found in them. In resources combining V&L, however, the textual part is often a dialogue or pairs of question-answers. As a result, the coreference devices differ from those found in texts and are closer to actual conversations in which people create reference to entities on the fly. This of course comes with its own challenges, but there are also some relations made easier since they can be grounded in the image.", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 116, |
| "text": "(Harnad, 1990)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 147, |
| "end": 158, |
| "text": "(Roy, 2005)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 293, |
| "end": 306, |
| "text": "(Stede, 2012)", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As V&L come together, there is therefore an increased need for extending resources for the task of visual anaphora resolution. This means engaging with the challenges along two axes:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Dialogue: built by two speakers who each have their own mental state and cognitive process but who are communicating through referring expressions which are projected in the same conversation. As conversations are linear (one cannot go back to the past or to the future) linguistic coreference is linear. \u2022 Shared physical context: simultaneous access to an image or other perceptual context. Same as in dialogue, the speakers have different viewpoints of the scene and need to build their individual mental states representing the scene guided by visual attention. However, once a representation of a visual scene is built, reference can be made to its representations in a non-linear fashion.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We present our extension to the ARRAU (Poesio, 2004; Artstein and Poesio, 2006; Uryupina et al., 2020) annotation scheme by analysing two situated dialogue corpora: the Cups corpus (Dobnik et al., 2020) and the Tell-me-more corpus (Ilinykh et al., 2019) , shown below in Figures 1 and 2 respectively. This exercise proved useful to pinpoint in what ways the purely textual document scenario is different from the domain of embodied interaction both in terms of the semantics of interaction and annotation practices.", |
| "cite_spans": [ |
| { |
| "start": 38, |
| "end": 52, |
| "text": "(Poesio, 2004;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 53, |
| "end": 79, |
| "text": "Artstein and Poesio, 2006;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 80, |
| "end": 102, |
| "text": "Uryupina et al., 2020)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 181, |
| "end": 202, |
| "text": "(Dobnik et al., 2020)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 231, |
| "end": 253, |
| "text": "(Ilinykh et al., 2019)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The Cups corpus contains a conversation between two participants over an (almost) identical visual scene involving a table and cups where participants have different locations. Some cups have been removed from each participant's view and they are instructed to discuss over a computer terminal in order to find the cups that each does not see. The ground truth of the visual scene is known as it has been artificially generated. It may take over an hour for the participants to solve the task and their activity results in free dialogue close to spoken conversations including phenomena such as clarifications, repairs, restarts and variable grammar. (The conversations are logged at a key-press level.) The Tell-me-more corpus consists of images accompanied with a short text of five complete sentences, collected by asking participants to describe the image to a friend, successively adding details in short constrained conversations. The genre of these texts is therefore mixed: in between standard text (as found in news text for example) and dialogue data which reflects the features found in conversations rather than written conventions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "These corpora are complementary as Cups gives us accurate visual ground truth information with free and unrestricted dialogue, while Tell-me-more offers a richer unrestricted image with short and task-constrained (pseudo-)dialogues.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we discuss a number of cases from these corpora that challenge both standard language grounding annotations as well as standard anaphora annotation. This work points thus towards required future work in creating anaphora annotation schemes that can handle situated dialogue.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Pointing to the inability of NLP tools to handle the textual part in situated dialogue, early works had described the need to ground the dialogue in the image in a manner informed by linguistics (Byron, 2003) .", |
| "cite_spans": [ |
| { |
| "start": 195, |
| "end": 208, |
| "text": "(Byron, 2003)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As content develops in a text, entities are introduced and re-mentioned, establishing discourse referents. The context is provided by the document and no extra-linguistic reference is needed for resolving the reference to an entity (Karttunen, 1969) . In situated dialogue, on the other hand, the visual modality brings the extra-linguistic context as a source of referents. Here, resolving references to entities can be thus achieved by either looking at the picture or relating to the information that has been said previously in the discourse. Both of these processes happen simultaneously and therefore their interaction must be explained by theories of cognitive processing related to attention and memory (Kelleher and Dobnik) . However, in order to understand both processes and their interaction we need to disentangle them. Extending the anaphora annotation paradigm is thus the best bet although not a lot of work exists in this area. Textual coreference Annotated data for the coreference resolution task has mainly focused on news texts and concrete nouns, excluding reference to events and other coreferential relations such as bridging, deixis, and ambiguous items well documented in the linguistic literature but deemed infrequent or too difficult to process (Poesio, 2016) . In contrast, there is a growing body of literature interested in phenomena beyond the nominal case (Kolhatkar et al., 2018; Nedoluzhko and Lapshinova-Koltunski, 2016) , resulting in new annotated corpora (Lapshinova-Koltunski et al., 2018; Zeldes, 2017; Uryupina et al., 2020) , although smaller in 1. it's a bedroom scene with the bed partially visible 2. the bed has a curved wooden headboard with slots like a fence 3. there is framed art hanging above the bed 4. to the left of the bed is a door, which is open 5. there is a small square nighstand next to the bed which has a lamp on top of it Moreover, as a product of this year's edition of the CRAC 2 and CODI 3 workshops, a shared task on anaphora resolution in dialogues has been proposed. This will undoubtedly result in additional corpora annotated with the standards used for the coreference resolution task.", |
| "cite_spans": [ |
| { |
| "start": 232, |
| "end": 249, |
| "text": "(Karttunen, 1969)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 711, |
| "end": 732, |
| "text": "(Kelleher and Dobnik)", |
| "ref_id": null |
| }, |
| { |
| "start": 1274, |
| "end": 1288, |
| "text": "(Poesio, 2016)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1390, |
| "end": 1414, |
| "text": "(Kolhatkar et al., 2018;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 1415, |
| "end": 1457, |
| "text": "Nedoluzhko and Lapshinova-Koltunski, 2016)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1495, |
| "end": 1530, |
| "text": "(Lapshinova-Koltunski et al., 2018;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 1531, |
| "end": 1544, |
| "text": "Zeldes, 2017;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 1545, |
| "end": 1567, |
| "text": "Uryupina et al., 2020)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Visual coreference Coreference work based on the popular VisDial dataset (Das et al., 2017) targets only a limited set of referential expressions, partly because it relies on automatic tools (Kottur et al., 2018; Yu et al., 2019) , which are known to be problematic with this genre. With a focus in grounded human interaction, there are corpora whose textual part comprises question answer pairs (Antol et al., 2015; Goyal et al., 2017) . Those, however, are short in nature, with few opportunities for re-mention of the different objects in the image and hence coreference. Last, corpora designed towards navigation and location involve considerable dialogue interaction between instruction giver and instruction follower which include examples of coreference. For example, the SCARE corpus (Stoia et al., 2008) provides natural interactions, it has been audio recorded and then transcribed, the conversations are long and there are frequent referring expressions (it is hard to understand transcribed dialogues on its own), but overall the size of the corpus is small. Thomason et al. (2019) present a corpus of 2050 short human-human interactions in a virtual environment collected with crowd-sourcing.", |
| "cite_spans": [ |
| { |
| "start": 73, |
| "end": 91, |
| "text": "(Das et al., 2017)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 191, |
| "end": 212, |
| "text": "(Kottur et al., 2018;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 213, |
| "end": 229, |
| "text": "Yu et al., 2019)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 396, |
| "end": 416, |
| "text": "(Antol et al., 2015;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 417, |
| "end": 436, |
| "text": "Goyal et al., 2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 792, |
| "end": 812, |
| "text": "(Stoia et al., 2008)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 1071, |
| "end": 1093, |
| "text": "Thomason et al. (2019)", |
| "ref_id": "BIBREF41" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Referring expressions generation The goal in this area is to generate referring expressions over several turns of conversation in a natural and nonrepetitive way to the same (or different) grounded objects following principles of communicative discourse (Takmaz et al., 2020) . Here, the PhotoBook dataset (Haber et al., 2019) is used. Our work is complementary to these approaches as it focuses on the interpretative rather than generative aspects of reference and coreference.", |
| "cite_spans": [ |
| { |
| "start": 254, |
| "end": 275, |
| "text": "(Takmaz et al., 2020)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 306, |
| "end": 326, |
| "text": "(Haber et al., 2019)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "3 The ARRAU annotation scheme Deeply rooted in linguistic theory, the ARRAU corpus annotation scheme is particularly well-suited for annotating situated dialogue. Indeed, its annotation scheme was designed to accommodate different genres, including news, dialogue and narrative texts, and in consequence anaphoric phenomena beyond the nominal standard case typically found in other coreference corpora (Uryupina et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 402, |
| "end": 425, |
| "text": "(Uryupina et al., 2020)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The dialogue genre has its own idiosyncrasies not covered by annotation schemes designed for news text, for example collaborative completions giving way for discontinuous markables (Uryupina et al., 2020) , and more pronouns including deictics (M\u00fcller, 2007) . The annotation scheme also includes guidelines for bridging reference, a much less studied type of reference but very commonly used in the Tell-me-more corpus discussed here. ARRAU is also known for containing annotations for both referring and non-referring expressions. Most coreference corpora focus on identity anaphora, meaning that only multiple mentions of the same discourse entity are annotated, leaving out those mentioned only once, also known as singletons. The large OntoNotes corpus, for instance, does not include annotations of singletons or expletives.", |
| "cite_spans": [ |
| { |
| "start": 181, |
| "end": 204, |
| "text": "(Uryupina et al., 2020)", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 244, |
| "end": 258, |
| "text": "(M\u00fcller, 2007)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In the next section, we describe the general AR-RAU annotation scheme along with our proposed adaptations. With the goal of moving towards general guidelines for the situated dialogue genre, the extensions we present target the common challenges of our two corpora.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The first step is identifying the referring expressions or mentions to annotate. In ARRAU, all noun phrases are considered, marking the complete phrase with all its modifiers and not just its head. This includes noun phrases which are nonreferring such as pleonastics and also noun phrases not re-mentioned later in the text. The mentions also include personal pronouns and demonstrative pronouns used as deictics (to refer back to nonnominal antecedents).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotating situated dialogue 4.1 Mention identification and object detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We also consider all noun phrases, including pronouns and deictics as mentions. For Cups, we created a simple NP chunker based on the regular expression method (Bird et al., 2009 ) with moderate success: a manual annotation of one of the documents showed an error rate of about 30% (295 errors out of 1030 identified chunks). In contrast, for Tell-me-more we had annotators identify the NPs completely by hand.", |
| "cite_spans": [ |
| { |
| "start": 160, |
| "end": 178, |
| "text": "(Bird et al., 2009", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotating situated dialogue 4.1 Mention identification and object detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Compared to ARRAU, the noun phrases in these corpora are rather simple, without a lot of modifiers. However, this does not mean that mention identification is straightforward as complex noun phrases with embedded markables such as the blue cup with a white handle do arrive. Consider also the blue cup to the left of the red cup, where a particular cup is referred to by taking another cup as a landmark: is it the left or the red cup or the left of the red cup which should be considered for re-mention?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Annotating situated dialogue 4.1 Mention identification and object detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Akin to the mention identification, the image in the multimodal corpora is processed in order to detect objects. In Cups, we have the ground truth of the scenes from which participants views have been generated. All the objects and geometrically defined regions are assigned a predefined ID as shown in Figure 1 . In Tell-me-more, the object labels are part of the underlying ADE20K data (Zhou et al., 2017) , extracted using tools from Schlangen (2019). Here, an automatic object classifier may not detect all the objects in the scene or assign them different labels than participants use when referring to them in the dialogue.", |
| "cite_spans": [ |
| { |
| "start": 388, |
| "end": 407, |
| "text": "(Zhou et al., 2017)", |
| "ref_id": "BIBREF48" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 303, |
| "end": 311, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Annotating situated dialogue 4.1 Mention identification and object detection", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The morphosyntactic properties of the mention are annotated, including gender (female, male, neutre), number (singular, plural, mass) and person (1st, 2nd, 3rd), and its semantic type (person, animate, concrete, space, time, plan (for actions), abstract, or unknown). We include all these categories used in ARRAU.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Characterisation of the mention", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "In addition, we have also extended them in order to include a cardinality attribute. This accounts for a common strategy of grouping things in order to refer to them collectively. In other words, objects can be created dynamically as the dialogue progresses. For example, when a speaker refer to the blue ones, these are not all the blue cups in the scene but a particular set of blue objects that were grouped at that point of the dialogue and which can then be subsequently re-mentioned.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Characterisation of the mention", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The cardinality attribute has the values unique and group. The first refers to objects represented by a single individual entity while groups refer to entities composed by several objects. Note that group is different from the mass number attribute in that mass nouns are usually singular. The value group refers to cases where the speaker decided to refer to a specific region of the image containing several entities together, for instance green curtains in sentence 4 in (1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Characterisation of the mention", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "(1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Characterisation of the mention", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "1. I see a picture of an entertainment room. 2. There is a round table in the foreground and a fussball table in the middle of the room, as well as a pool table further back. 3. There is a sitting area with chairs facing a television set. 4. The room has several windows with green curtains. 5. The floors are made of a brown tile.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Characterisation of the mention", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "As mentioned, ARRAU covers a broad range of anaphoric relations including both non-referring and referring noun phrases. Distinguishing between these two is non-trivial, and research around ARRAU have argued in favour of annotating both types (Poesio, 2016; Yu et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 243, |
| "end": 257, |
| "text": "(Poesio, 2016;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 258, |
| "end": 274, |
| "text": "Yu et al., 2020)", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Characterisation of the reference", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "This includes mentions with a specific syntactic or semantic function: predication, expletive, idiom, incomplete or fragmentary expression, quantifier, and coordination. The last two are, by the authors own admission, controversial. Following ARRAU, we annotate all types of non-referential mentions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Non-referring", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "If a mention is identified as referring, then its information status needs to be annotated as discoursenew or discourse-old; discourse-old information needs to point to an antecedent. 4 This distinction signals whether an entity is mentioned a first or subsequent time, shaping the reader's discourse model of that particular discourse entity (Stede, 2012) .", |
| "cite_spans": [ |
| { |
| "start": 184, |
| "end": 185, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 343, |
| "end": 356, |
| "text": "(Stede, 2012)", |
| "ref_id": "BIBREF37" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Referring", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "Referring mentions yield coreference chainsthe sequence of mentions pointing to a same entity in a text -a central construct in the coreference resolution domain. Built on top of the document as a unit, this notion relies on and in turn informs theories about accessibility hierarchy and salience of entities (Ariel, 1988 (Ariel, , 2004 Grosz et al., 1995) .", |
| "cite_spans": [ |
| { |
| "start": 309, |
| "end": 321, |
| "text": "(Ariel, 1988", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 322, |
| "end": 336, |
| "text": "(Ariel, , 2004", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 337, |
| "end": 356, |
| "text": "Grosz et al., 1995)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Referring", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "These theories are based on the observation that some forms are used to introduce entities and some others to refer to them: some entities are discoursenew and some are discourse-old. In situated dialogue, the image provides an additional context and source of referents, but it does not follow that the status of subsequent mentions is old. In the example (2) below, the fact that the discourse starts with It is licensed by the image and this source of reference should be accounted for differently in the annotation than a genuine discourse-old case such as the it in sentence 2.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Referring", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "(2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Referring", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "1. It s a well-lit kitchen with stainded wooden cupboards. 2. There's a microwave mounted over the stove, which has a red tea kettle on it. 3. The appliances are black and stainless steel in the kitchen. 4. The countertops look like they're black granite. 5. The window has sunlight streaming in and it 's very brightly light.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Referring", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "In order to address these cases in the Tell-me-more corpus, we consider them discourse-old. Very importantly, in order to keep them distinct from genuinely old information in the discourse, we introduced a new value task for the antecedent (hence a discourse-old entity can have an antecedent which is a phrase, a segment, or the task). Our reasoning is that although the pronoun It does not have an antecedent in the text, it appears in the first position of the first sentence because the speaker was probably referring back to the the image in the instructions \"Describe the image to a friend...\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Referring", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "In dialogue as found in the Cups corpus, on the other hand, references can be established either relative to utterances of a particular speaker or across utterances of different speakers, and in situated dialogue, references can also be established to the objects in the scene. This leads to another notable extension to the annotation scheme: the grounding of the entities to the image (Section 4.4).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Referring", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "An understudied referential relationship also included in the ARRAU guidelines is bridging, i.e. an associative relationship between two mentions (Versley et al., 2016) . When the status of a mention is either new or old, it is possible to annotate if the mention is a related object of some other entity. Here we follow the simplified scheme from Artstein and Poesio (2006) :", |
| "cite_spans": [ |
| { |
| "start": 146, |
| "end": 168, |
| "text": "(Versley et al., 2016)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 348, |
| "end": 374, |
| "text": "Artstein and Poesio (2006)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bridging", |
| "sec_num": "4.3.3" |
| }, |
| { |
| "text": "\u2022 Part: \"An object that stands in a part-of relation to an object previously mentioned.\" \u2022 Set: \"Relations that hold between a set and its elements, or between a set and a subset.\" \u2022 Other: \"Expressions containing the word other and referring to a second object of the same type as an object already mentioned.\" \u2022 Miscellaneous: \"Clear cases of bridging references that do not fall into any of the categories above.\" The Tell-me-more corpus is rich in examples of bridging. Since the corpus uses pictures of different rooms in a house, after a room is introduced, typically a series of objects belonging to that room follow, creating many opportunities for using a bridging reference mechanism. For instance, image your surprise if the second sentence of example (3) started with the toaster instead of the bed. Coherence will be immediately broken.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bridging", |
| "sec_num": "4.3.3" |
| }, |
| { |
| "text": "(3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bridging", |
| "sec_num": "4.3.3" |
| }, |
| { |
| "text": "1. This is a bedroom with a twin sized bed in it. 2. The bed has a blue bag laying on it and a green bad on the floor at the foot of the bed. 3. There is a nightstand aside of the bed with a water bottle on it. 4. There is an arched closet space on one wall and an arched shelving area too. 5. There is a small lamp attached to the wall at the head of the bed.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Bridging", |
| "sec_num": "4.3.3" |
| }, |
| { |
| "text": "In spoken discourse people try their best to ground the references so they make sure they understand each other. To do so, they rely on the mechanisms of memory and attention (Kelleher and Dobnik) . Memory controls how long objects referred to and objects perceived are cognitively salient in the mind of an agent, while attention controls the ratio of information that becomes salient coming from perception vs the amount of information coming from cognitive control of an agent (Lavie et al., 2004) . Most entities annotated as concrete references can be grounded to the image easily. Following the ARRAU-trains annotation closely, we have added an attribute on-image with values yes/no. If the value is yes, then the atribute bounding-box with values yes/no needs to be annotated as well. The idea here is to distinguish between grounded entities detected by the object detector, and those that although visible do not have a bounding box or predefined ID.", |
| "cite_spans": [ |
| { |
| "start": 175, |
| "end": 196, |
| "text": "(Kelleher and Dobnik)", |
| "ref_id": null |
| }, |
| { |
| "start": 480, |
| "end": 500, |
| "text": "(Lavie et al., 2004)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grounding and referentiality", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "This last scenario can be difficult, such as base of the tub in example (4), where the object detector failed to recognise the target object. We observed, however, that this happens when the speakers refer to parts of the objects, and then the bridging annotation scheme can be smoothly applied.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grounding and referentiality", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "1. This is a picture of a bathtub. 2. The tub is white. 3. The wall and base of the tub are brown. 4. The door appears to be glass. 5. There is a handrail on the side wall.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grounding and referentiality", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "For bridging references, if a mention which is visible is in a part-of relation with another object which does have a bounding box, then we ground it to that object as well.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grounding and referentiality", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "This process of referring to sub-objects is also fairly common in Cups. For example, participants refer to the cups handles and tops that we did not identify earlier.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grounding and referentiality", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Last, the image also allows for typically semantic properties to be used to refer back to the objects: colour, shapes, sizes. These can be genuinely referential (a form of ellipsis) or used in attributive manner. Compare for example white in the second sentence of (4), with (5) below. 5P1: closest to me, from left to right red, blue, white, red P2: ok, on your side I only see red, blue, white", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grounding and referentiality", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Note that in the case of mentions annotated as groups, we ground all the elements belonging to the group. However, deciding which elements exactly the speaker had in mind can be ambiguous. In (6) from Cups the speakers refer to rows of objects even though these are not arranged in strict geometric lines. Hence, what objects are included in a row is contextually defined and not always clear. 6P2: ok, so your next row P2: you said there 's a takeaway cup somewhere marooned all alone P1: Okay. So we have that row I described with the now found red cup. Then a takeaway cup that is between that row and the next. It's very much in the middle of the two rows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grounding and referentiality", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Moreover, we observe references to different regions of the image, and these references change dynamically throughout the conversation, e.g. my left, your right, the first row. In the Cups corpus, we have split the scene into equal rectangular regions that are splitting the table into a grid as shown in Figure 1c . However, the grid nature of the subregions and their granularity are frequently insufficient as participants do not split the table to subregions in a grid-like manner but relative to the current focus on the scene and the topological arrangements of objects. In the example, \"the empty space in the second row of objects close to you\" an empty space has been designed as a new region which does not correspond to our projected gridlike regions. The references such dynamic objects must be resolved by the hearer and misunderstanding may occur, depending on the complexity and ambiguity of the scene.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 305, |
| "end": 314, |
| "text": "Figure 1c", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Grounding and referentiality", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Last, in the Cups corpus objects may be rereferred to again in different parts of a dialogue, potentially creating very long distance relationships between mentions. However, we generally restrict these to the scope of the dialogue games for which some parts of the corpus are also annotated.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Grounding and referentiality", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Our annotation is implemented using the MMAX tool (M\u00fcller and Strube, 2006) for compatibility with the ARRAU MMAX schemes. An example of the annotator interface is presented in Figure 3 . Besides the authors, three student assistants have been involved as annotators until now. We expect to release a first version of the annotation later during the year. This will include proper inter-annotator agreement metrics in order to evaluate the adequacy of the proposed schema.", |
| "cite_spans": [ |
| { |
| "start": 50, |
| "end": 75, |
| "text": "(M\u00fcller and Strube, 2006)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 177, |
| "end": 185, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "The annotation process", |
| "sec_num": "4.5" |
| }, |
| { |
| "text": "Contrary to a Gricean-based analysis of spoken discourse, coherence-based theories of discourse do not traditionally take the cognitive state of the speaker as a necessary element to text interpretation (Bender and Lascarides, 2019) . In situated dialogue, however, although the image can be treated as the ground truth of the situation, the speaker's cognitive state has to be considered by the hearer, in order to disambiguate the utterances. In other words, the hearer makes a model of the beliefs, desires and intentions associated with the utterance. This is exemplified in the following excerpt from Cups where both participants do not see one of the two red cups close by, but each a different one. They mistakenly believe that there is only one missing red cup and this dis-alignment of their beliefs gradually leads to increasingly diverging cognitive states.", |
| "cite_spans": [ |
| { |
| "start": 203, |
| "end": 232, |
| "text": "(Bender and Lascarides, 2019)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unaddressed challenge: speakers' cognitive state", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "P2: there is an empty space on the table on the second row away from you P2: between the red and white mug (from left to right) P1: I have one thing there, a white funny top P2: ok, i'll mark it. DIALOGUE_STATE: B found O-25. P1: and the red one is slightly close to you P1: is that right? P1: to my left from that red mug there is a yellow mug P2: hm... P2: can't see that and now i'm confused DIALOGUE_STATE: B cannot see O-29. P2: describe the second row away from you like you see it P1: only one thing there, a white funny top P2: aha, so it's closer to you than those i call \"the second row\" P1: behind that, there is a yellow, red, white and blue P1: from my left to right P1: yes, that must be it! P1: so what do you see in the \"second row\" from my perspective? P2: i see a red, then space, then white and blue (same as katie's\") P2: no yellow P2: is it on the edge of the table? P2: on your left P1: ok, yes! DIALOGUE_STATE: inconsistent", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Unaddressed challenge: speakers' cognitive state", |
| "sec_num": "4.6" |
| }, |
| { |
| "text": "Different V&L resources provide with an opportunity to explore the notion of discourse entity and (co)reference in grounded context. Since the nature of contexts defined by the tasks in which the corpora were collected varies considerably we get an opportunity to study the phenomena over these contexts and get a more complete picture of reference. Extending the coreference annotation to the V&L domain is essential to understand the relationship between reference and coreference. Work around textual coreference has defined the task with insufficient consideration of the semantic aspects involved in the interpretation of anaphoric phenomena; whereas work from the V&L community assumes that coreferential information can be inferred latently. By extending the coreference annotation scheme to rich situated dialogue corpora, we make explicit the relations at play between the text and the image. The same mechanisms that humans adopt to solve coreference in the textual domain should underlay results in the V&L domain.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Indeed, reference is underspecified in both modalities; any kind of information extraction from these domains will benefit from mechanisms that resolve this underspecification: capturing coreference is a door to capturing coherence. Furthermore, a rich annotation scheme that is portable between tasks and contexts, leads to the development of corpora allowing the training of data driven systems for the V&L domain and social robotics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Also known as coreference resolution in the NLP domain, here we follow Poesio (2016) in our terminology.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Computational Models of Reference, Anaphora and Coreference, https://sites.google.com/view/ crac2021/ 3 Workshop on Computational Approaches to Discourse, https://sites.google.com/view/codi-2021/ accueil", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "An antecedent can always be annotated as ambiguous if a clear entity cannot be identified for a particular mention.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "VQA: Visual Question Answering", |
| "authors": [ |
| { |
| "first": "Stanislaw", |
| "middle": [], |
| "last": "Antol", |
| "suffix": "" |
| }, |
| { |
| "first": "Aishwarya", |
| "middle": [], |
| "last": "Agrawal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiasen", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Margaret", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhruv", |
| "middle": [], |
| "last": "Batra", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "Lawrence" |
| ], |
| "last": "Zitnick", |
| "suffix": "" |
| }, |
| { |
| "first": "Devi", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the IEEE International Conference on Computer Vision (ICCV)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stanislaw Antol, Aishwarya Agrawal, Jiasen Lu, Mar- garet Mitchell, Dhruv Batra, C. Lawrence Zitnick, and Devi Parikh. 2015. VQA: Visual Question An- swering. In Proceedings of the IEEE International Conference on Computer Vision (ICCV).", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Referring and accessibility", |
| "authors": [ |
| { |
| "first": "Mira", |
| "middle": [ |
| "Ariel" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 1988, |
| "venue": "Journal of Linguistics", |
| "volume": "24", |
| "issue": "1", |
| "pages": "65--87", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mira Ariel. 1988. Referring and accessibility. Journal of Linguistics, 24(1):65-87.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Accessibility marking: Discourse functions, discourse profiles, and processing cues", |
| "authors": [ |
| { |
| "first": "Mira", |
| "middle": [ |
| "Ariel" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Discourse Processes", |
| "volume": "37", |
| "issue": "", |
| "pages": "91--116", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mira Ariel. 2004. Accessibility marking: Discourse functions, discourse profiles, and processing cues. Discourse Processes, 37(2):91-116.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Arrau annotation manual (trains dialogues)", |
| "authors": [ |
| { |
| "first": "Ron", |
| "middle": [], |
| "last": "Artstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ron Artstein and Massimo Poesio. 2006. Arrau anno- tation manual (trains dialogues).", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Linguistic Fundamentals for Natural Language Processing II: 100 Essentials from Semantics and Pragmatics", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [ |
| "M" |
| ], |
| "last": "Bender", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Lascarides", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Synthesis Lectures on Human Language Technologies", |
| "volume": "12", |
| "issue": "3", |
| "pages": "1--268", |
| "other_ids": { |
| "DOI": [ |
| "10.2200/S00935ED1V02Y201907HLT043" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily M. Bender and Alex Lascarides. 2019. Linguis- tic Fundamentals for Natural Language Processing II: 100 Essentials from Semantics and Pragmatics. Synthesis Lectures on Human Language Technolo- gies, 12(3):1-268.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Natural language processing with Python", |
| "authors": [ |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bird", |
| "suffix": "" |
| }, |
| { |
| "first": "Ewan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Loper", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steven Bird, Ewan Klein, and Edward Loper. 2009. Natural language processing with Python, 1st ed edi- tion. O'Reilly, Beijing, Cambridge, Farnham, K\u00f6ln, Sebastopol and Tokyo.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Understanding referring expressions in situated language some challenges for real-world agents", |
| "authors": [ |
| { |
| "first": "K", |
| "middle": [], |
| "last": "Donna", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Byron", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the First International Workshop on Language Understanding and Agents for Real World Interaction", |
| "volume": "", |
| "issue": "", |
| "pages": "39--47", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Donna K Byron. 2003. Understanding referring ex- pressions in situated language some challenges for real-world agents. In Proceedings of the First Inter- national Workshop on Language Understanding and Agents for Real World Interaction, pages 39-47.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Embodied question answering", |
| "authors": [ |
| { |
| "first": "Abhishek", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Samyak", |
| "middle": [], |
| "last": "Datta", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgia", |
| "middle": [], |
| "last": "Gkioxari", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Devi", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhruv", |
| "middle": [], |
| "last": "Batra", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops", |
| "volume": "", |
| "issue": "", |
| "pages": "2054--2063", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abhishek Das, Samyak Datta, Georgia Gkioxari, Ste- fan Lee, Devi Parikh, and Dhruv Batra. 2018. Em- bodied question answering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 2054-2063.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
| "authors": [ |
| { |
| "first": "Abhishek", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Satwik", |
| "middle": [], |
| "last": "Kottur", |
| "suffix": "" |
| }, |
| { |
| "first": "Khushi", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Avi", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Deshraj", |
| "middle": [], |
| "last": "Yadav", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "F" |
| ], |
| "last": "Jos\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Devi", |
| "middle": [], |
| "last": "Moura", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhruv", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Batra", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "326--335", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abhishek Das, Satwik Kottur, Khushi Gupta, Avi Singh, Deshraj Yadav, Jos\u00e9 MF Moura, Devi Parikh, and Dhruv Batra. 2017. Visual dialog. In Proceed- ings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 326-335.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Local alignment of frame of reference assignment in English and Swedish dialogue", |
| "authors": [ |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Dobnik", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [ |
| "D" |
| ], |
| "last": "Kelleher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christine", |
| "middle": [], |
| "last": "Howes", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Spatial Cognition XII: Proceedings of the 12th International Conference, Spatial Cognition 2020", |
| "volume": "", |
| "issue": "", |
| "pages": "251--267", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simon Dobnik, John D. Kelleher, and Christine Howes. 2020. Local alignment of frame of reference assign- ment in English and Swedish dialogue. In Spatial Cognition XII: Proceedings of the 12th International Conference, Spatial Cognition 2020, Riga, Latvia, pages 251-267, Cham, Switzerland. Springer Inter- national Publishing.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "On visual coreference chains resolution", |
| "authors": [ |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Dobnik", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharid", |
| "middle": [], |
| "last": "Lo\u00e1iciga", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue -Poster Abstracts", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simon Dobnik and Sharid Lo\u00e1iciga. 2019. On visual coreference chains resolution. In Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue -Poster Abstracts, London, United King- dom. SEMDIAL.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Making the V in VQA matter: Elevating the role of image understanding in Visual Question Answering", |
| "authors": [ |
| { |
| "first": "Yash", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Tejas", |
| "middle": [], |
| "last": "Khot", |
| "suffix": "" |
| }, |
| { |
| "first": "Douglas", |
| "middle": [], |
| "last": "Summers-Stay", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhruv", |
| "middle": [], |
| "last": "Batra", |
| "suffix": "" |
| }, |
| { |
| "first": "Devi", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Conference on Computer Vision and Pattern Recognition (CVPR)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. 2017. Making the V in VQA matter: Elevating the role of image under- standing in Visual Question Answering. In Confer- ence on Computer Vision and Pattern Recognition (CVPR).", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Centering: A framework for modelling the local coherence of discourse", |
| "authors": [ |
| { |
| "first": "Barbara", |
| "middle": [ |
| "J" |
| ], |
| "last": "Grosz", |
| "suffix": "" |
| }, |
| { |
| "first": "Aravind", |
| "middle": [ |
| "K" |
| ], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Weinstein", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Computational Linguistics", |
| "volume": "2", |
| "issue": "21", |
| "pages": "203--225", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Barbara J. Grosz, Aravind K. Joshi, and Scott Wein- stein. 1995. Centering: A framework for modelling the local coherence of discourse. Computational Linguistics, 2(21):203-225.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The PhotoBook dataset: Building common ground through visually-grounded dialogue", |
| "authors": [ |
| { |
| "first": "Janosch", |
| "middle": [], |
| "last": "Haber", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Baumg\u00e4rtner", |
| "suffix": "" |
| }, |
| { |
| "first": "Ece", |
| "middle": [], |
| "last": "Takmaz", |
| "suffix": "" |
| }, |
| { |
| "first": "Lieke", |
| "middle": [], |
| "last": "Gelderloos", |
| "suffix": "" |
| }, |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| }, |
| { |
| "first": "Raquel", |
| "middle": [], |
| "last": "Fern\u00e1ndez", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1895--1910", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1184" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Janosch Haber, Tim Baumg\u00e4rtner, Ece Takmaz, Lieke Gelderloos, Elia Bruni, and Raquel Fern\u00e1ndez. 2019. The PhotoBook dataset: Building common ground through visually-grounded dialogue. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1895-1910, Flo- rence, Italy. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "The symbol grounding problem", |
| "authors": [ |
| { |
| "first": "Stevan", |
| "middle": [], |
| "last": "Harnad", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Physica D", |
| "volume": "42", |
| "issue": "1-3", |
| "pages": "335--346", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stevan Harnad. 1990. The symbol grounding problem. Physica D, 42(1-3):335-346.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Tell me more: A dataset of visual scene description sequences", |
| "authors": [ |
| { |
| "first": "Nikolai", |
| "middle": [], |
| "last": "Ilinykh", |
| "suffix": "" |
| }, |
| { |
| "first": "Sina", |
| "middle": [], |
| "last": "Zarrie\u00df", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Schlangen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 12th International Conference on Natural Language Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "152--157", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-8621" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikolai Ilinykh, Sina Zarrie\u00df, and David Schlangen. 2019. Tell me more: A dataset of visual scene de- scription sequences. In Proceedings of the 12th In- ternational Conference on Natural Language Gener- ation, pages 152-157, Tokyo, Japan. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Discourse referents", |
| "authors": [ |
| { |
| "first": "Lauri", |
| "middle": [], |
| "last": "Karttunen", |
| "suffix": "" |
| } |
| ], |
| "year": 1969, |
| "venue": "International Conference on Computational Linguistics COLING 1969: Preprint No. 70", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lauri Karttunen. 1969. Discourse referents. In Inter- national Conference on Computational Linguistics COLING 1969: Preprint No. 70, S\u00e5nga S\u00e4by, Swe- den.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Dynamically structuring updating and interrelating representations of visual and linguistic discourse", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [ |
| "D" |
| ], |
| "last": "Kelleher", |
| "suffix": "" |
| }, |
| { |
| "first": "Fintan", |
| "middle": [ |
| "J" |
| ], |
| "last": "Costello", |
| "suffix": "" |
| }, |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Van Genabith", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Artificial Intelligence", |
| "volume": "167", |
| "issue": "", |
| "pages": "62--102", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John D. Kelleher, Fintan J. Costello, and Josef van Gen- abith. 2005. Dynamically structuring updating and interrelating representations of visual and linguistic discourse. Artificial Intelligence, 167:62-102.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Referring to the recently seen: reference and perceptual memory in situated dialogue", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [ |
| "D" |
| ], |
| "last": "Kelleher", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Dobnik", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "CLASP Papers in Computational Linguistics: Dialogue and Perception -Extended papers from DaP", |
| "volume": "", |
| "issue": "", |
| "pages": "41--50", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John D. Kelleher and Simon Dobnik. Referring to the recently seen: reference and perceptual memory in situated dialogue. In CLASP Papers in Computa- tional Linguistics: Dialogue and Perception -Ex- tended papers from DaP-2018 Gothenburg, pages 41-50.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Anaphora with nonnominal antecedents in computational linguistics: a survey", |
| "authors": [ |
| { |
| "first": "Varada", |
| "middle": [], |
| "last": "Kolhatkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Roussel", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefanie", |
| "middle": [], |
| "last": "Dipper", |
| "suffix": "" |
| }, |
| { |
| "first": "Heike", |
| "middle": [], |
| "last": "Zinsmeister", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Computational Linguistics", |
| "volume": "44", |
| "issue": "3", |
| "pages": "547--612", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Varada Kolhatkar, Adam Roussel, Stefanie Dipper, and Heike Zinsmeister. 2018. Anaphora with non- nominal antecedents in computational linguistics: a survey. Computational Linguistics, 44(3):547-612.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Visual coreference resolution in visual dialog using neural module networks", |
| "authors": [ |
| { |
| "first": "Satwik", |
| "middle": [], |
| "last": "Kottur", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "F" |
| ], |
| "last": "Jos\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Devi", |
| "middle": [], |
| "last": "Moura", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhruv", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcus", |
| "middle": [], |
| "last": "Batra", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Rohrbach", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "The European Conference on Computer Vision (ECCV)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Satwik Kottur, Jos\u00e9 M. F. Moura, Devi Parikh, Dhruv Batra, and Marcus Rohrbach. 2018. Visual corefer- ence resolution in visual dialog using neural mod- ule networks. In The European Conference on Com- puter Vision (ECCV).", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "A hierarchical approach for generating descriptive image paragraphs", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Krause", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Ranjay", |
| "middle": [], |
| "last": "Krishna", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Fei-Fei", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
| "volume": "", |
| "issue": "", |
| "pages": "3337--3345", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan Krause, Justin Johnson, Ranjay Krishna, and Li Fei-Fei. 2017. A hierarchical approach for gen- erating descriptive image paragraphs. In 2017 IEEE Conference on Computer Vision and Pattern Recog- nition (CVPR), pages 3337-3345.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Visual Genome: Connecting language and vision using crowdsourced dense image annotations", |
| "authors": [ |
| { |
| "first": "Ranjay", |
| "middle": [], |
| "last": "Krishna", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuke", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Oliver", |
| "middle": [], |
| "last": "Groth", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenji", |
| "middle": [], |
| "last": "Hata", |
| "suffix": "" |
| }, |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Kravitz", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephanie", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yannis", |
| "middle": [], |
| "last": "Kalantidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Li-Jia", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "A" |
| ], |
| "last": "Shamma", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Bernstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Fei-Fei", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "International Journal of Computer Vision", |
| "volume": "123", |
| "issue": "1", |
| "pages": "32--73", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin John- son, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, Michael Bernstein, and Li Fei-Fei. 2017. Visual Genome: Connecting language and vision using crowdsourced dense image annotations. Interna- tional Journal of Computer Vision, 123(1):32-73.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "ParCorFull: a parallel corpus annotated with full coreference", |
| "authors": [ |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Lapshinova-Koltunski", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Hardmeier", |
| "suffix": "" |
| }, |
| { |
| "first": "Pauline", |
| "middle": [], |
| "last": "Krielke", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of 11th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "423--428", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ekaterina Lapshinova-Koltunski, Christian Hardmeier, and Pauline Krielke. 2018. ParCorFull: a parallel corpus annotated with full coreference. In Proceed- ings of 11th Language Resources and Evaluation Conference, pages 423-428, Miyazaki, Japan. Euro- pean Language Resources Association (ELRA). To appear.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Load theory of selective attention and cognitive control", |
| "authors": [ |
| { |
| "first": "Nilli", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| }, |
| { |
| "first": "Aleksandra", |
| "middle": [], |
| "last": "Hirst", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [ |
| "W" |
| ], |
| "last": "De Fockert", |
| "suffix": "" |
| }, |
| { |
| "first": "Essi", |
| "middle": [], |
| "last": "Viding", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Journal of Experimental Psychology: General", |
| "volume": "133", |
| "issue": "3", |
| "pages": "339--354", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nilli Lavie, Aleksandra Hirst, Jan W de Fockert, and Essi Viding. 2004. Load theory of selective atten- tion and cognitive control. Journal of Experimental Psychology: General, 133(3):339-354.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Microsoft coco: Common objects in context", |
| "authors": [ |
| { |
| "first": "Tsung-Yi", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Maire", |
| "suffix": "" |
| }, |
| { |
| "first": "Serge", |
| "middle": [], |
| "last": "Belongie", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Hays", |
| "suffix": "" |
| }, |
| { |
| "first": "Pietro", |
| "middle": [], |
| "last": "Perona", |
| "suffix": "" |
| }, |
| { |
| "first": "Deva", |
| "middle": [], |
| "last": "Ramanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Doll\u00e1r", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "Lawrence" |
| ], |
| "last": "Zitnick", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Computer Vision -ECCV 2014", |
| "volume": "", |
| "issue": "", |
| "pages": "740--755", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C. Lawrence Zitnick. 2014. Microsoft coco: Common objects in context. In Computer Vision - ECCV 2014, pages 740-755, Cham. Springer Inter- national Publishing.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Resolving it, this, and that in unrestricted multi-party dialog", |
| "authors": [ |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "816--823", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christoph M\u00fcller. 2007. Resolving it, this, and that in unrestricted multi-party dialog. In Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics, pages 816-823, Prague, Czech Republic. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Multilevel annotation of linguistic data with MMAX2", |
| "authors": [ |
| { |
| "first": "Christoph", |
| "middle": [], |
| "last": "M\u00fcller", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Strube", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Corpus Technology and Language Pedagogy: New Resources, New Tools, New Methods", |
| "volume": "", |
| "issue": "", |
| "pages": "197--214", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christoph M\u00fcller and Michael Strube. 2006. Multi- level annotation of linguistic data with MMAX2. In Sabine Braun, Kurt Kohn, and Joybrato Mukher- jee, editors, Corpus Technology and Language Ped- agogy: New Resources, New Tools, New Methods, pages 197-214. Peter Lang, Frankfurt a.M., Ger- many.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Abstract coreference in a multilingual perspective: a view on czech and german", |
| "authors": [ |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Nedoluzhko", |
| "suffix": "" |
| }, |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Lapshinova-Koltunski", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Workshop on Coreference Resolution Beyond OntoNotes, COR-BON 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "47--52", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anna Nedoluzhko and Ekaterina Lapshinova- Koltunski. 2016. Abstract coreference in a multilingual perspective: a view on czech and german. In Proceedings of the Workshop on Coreference Resolution Beyond OntoNotes, COR- BON 2016, pages 47-52, Ann Arbor, Michigan. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Discourse annotation and semantic annotation in the GNOME corpus", |
| "authors": [ |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the Workshop on Discourse Annotation", |
| "volume": "", |
| "issue": "", |
| "pages": "72--79", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Massimo Poesio. 2004. Discourse annotation and se- mantic annotation in the GNOME corpus. In Pro- ceedings of the Workshop on Discourse Annota- tion, pages 72-79, Barcelona, Spain. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Linguistic and cognitive evidence about anaphora", |
| "authors": [ |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Anaphora Resolution: Algorithms, Resources and Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "23--54", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Massimo Poesio. 2016. Linguistic and cognitive evi- dence about anaphora. In Massimo Poesio, Roland Stuckardt, and Yannick Versley, editors, Anaphora Resolution: Algorithms, Resources and Applica- tions, pages 23-54. Springer-Verlag, Berlin Heidel- berg.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Unrestricted coreference: Identifying entities and events in OntoNotes", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Sameer", |
| "suffix": "" |
| }, |
| { |
| "first": "Lance", |
| "middle": [], |
| "last": "Pradhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ralph", |
| "middle": [], |
| "last": "Ramshaw", |
| "suffix": "" |
| }, |
| { |
| "first": "Jessica Macbrideand Linnea", |
| "middle": [], |
| "last": "Weischedel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Micciulla", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "International Conference on Semantic Computing (ICSC 2007)", |
| "volume": "", |
| "issue": "", |
| "pages": "446--453", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICSC.2007.93" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sameer S. Pradhan, Lance Ramshaw, Ralph Weischedel, and Jessica MacBrideand Linnea Micciulla. 2007. Unrestricted coreference: Iden- tifying entities and events in OntoNotes. In International Conference on Semantic Computing (ICSC 2007), pages 446-453.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Situated meaning in multimodal dialogue: Humanrobot and human-computer interactions", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pustejovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikhil", |
| "middle": [], |
| "last": "Krishnaswamy", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Journal article manuscript", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Pustejovsky and Nikhil Krishnaswamy. 2020. Situated meaning in multimodal dialogue: Human- robot and human-computer interactions. Journal ar- ticle manuscript, Department of Computer Science, Brandeis University.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Semiotic schemas: a framework for grounding language in action and perception", |
| "authors": [ |
| { |
| "first": "Deb", |
| "middle": [], |
| "last": "Roy", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Artificial Intelligence", |
| "volume": "167", |
| "issue": "1-2", |
| "pages": "170--205", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Deb Roy. 2005. Semiotic schemas: a framework for grounding language in action and perception. Artifi- cial Intelligence, 167(1-2):170-205.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Natural language semantics with pictures: Some language & vision datasets and potential uses for computational semantics", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Schlangen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 13th International Conference on Computational Semantics -Long Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "283--294", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-0424" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Schlangen. 2019. Natural language semantics with pictures: Some language & vision datasets and potential uses for computational semantics. In Pro- ceedings of the 13th International Conference on Computational Semantics -Long Papers, pages 283- 294, Gothenburg, Sweden. Association for Compu- tational Linguistics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Visual reference resolution using attention memory for visual dialog", |
| "authors": [ |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Paul Hongsuck Seo", |
| "suffix": "" |
| }, |
| { |
| "first": "Bohyung", |
| "middle": [], |
| "last": "Lehrmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Leonid", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sigal", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "30", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul Hongsuck Seo, Andreas Lehrmann, Bohyung Han, and Leonid Sigal. 2017. Visual reference resolution using attention memory for visual dialog. In Ad- vances in Neural Information Processing Systems, volume 30. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Interaction strategies for an affective conversational agent", |
| "authors": [ |
| { |
| "first": "Cameron", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "Nigel", |
| "middle": [], |
| "last": "Crook", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Dobnik", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Charlton", |
| "suffix": "" |
| }, |
| { |
| "first": "Johan", |
| "middle": [], |
| "last": "Boye", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Pulman", |
| "suffix": "" |
| }, |
| { |
| "first": "Raul", |
| "middle": [], |
| "last": "Santos De La Camara", |
| "suffix": "" |
| }, |
| { |
| "first": "Markku", |
| "middle": [], |
| "last": "Turunen", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Benyon", |
| "suffix": "" |
| }, |
| { |
| "first": "Jay", |
| "middle": [], |
| "last": "Bradley", |
| "suffix": "" |
| }, |
| { |
| "first": "Bj\u00f6rn", |
| "middle": [], |
| "last": "Gamb\u00e4ck", |
| "suffix": "" |
| }, |
| { |
| "first": "Preben", |
| "middle": [], |
| "last": "Hansen", |
| "suffix": "" |
| }, |
| { |
| "first": "Oli", |
| "middle": [], |
| "last": "Mival", |
| "suffix": "" |
| }, |
| { |
| "first": "Nick", |
| "middle": [], |
| "last": "Webb", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Cavazza", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Presence: Teleoperators and Virtual Environments", |
| "volume": "20", |
| "issue": "5", |
| "pages": "395--411", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cameron Smith, Nigel Crook, Simon Dobnik, Daniel Charlton, Johan Boye, Stephen Pulman, Raul Santos de la Camara, Markku Turunen, David Benyon, Jay Bradley, Bj\u00f6rn Gamb\u00e4ck, Preben Hansen, Oli Mi- val, Nick Webb, and Marc Cavazza. 2011. Interac- tion strategies for an affective conversational agent. Presence: Teleoperators and Virtual Environments, 20(5):395-411.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Disourse Processing", |
| "authors": [ |
| { |
| "first": "Manfred", |
| "middle": [], |
| "last": "Stede", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manfred Stede. 2012. Disourse Processing. Morgan and Claypool Publishers, Toronto.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "SCARE: a situated corpus with annotated referring expressions", |
| "authors": [ |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Stoia", |
| "suffix": "" |
| }, |
| { |
| "first": "Darla", |
| "middle": [ |
| "Magdalene" |
| ], |
| "last": "Shockley", |
| "suffix": "" |
| }, |
| { |
| "first": "Donna", |
| "middle": [ |
| "K" |
| ], |
| "last": "Byron", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Fosler-Lussier", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC'08)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laura Stoia, Darla Magdalene Shockley, Donna K. Byron, and Eric Fosler-Lussier. 2008. SCARE: a situated corpus with annotated referring expres- sions. In Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC'08), Marrakech, Morocco. European Lan- guage Resources Association (ELRA).", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Anaphora and coreference resolution: A review", |
| "authors": [ |
| { |
| "first": "Rhea", |
| "middle": [], |
| "last": "Sukthanker", |
| "suffix": "" |
| }, |
| { |
| "first": "Soujanya", |
| "middle": [], |
| "last": "Poria", |
| "suffix": "" |
| }, |
| { |
| "first": "Erik", |
| "middle": [], |
| "last": "Cambria", |
| "suffix": "" |
| }, |
| { |
| "first": "Ramkumar", |
| "middle": [], |
| "last": "Thirunavukarasu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "59", |
| "issue": "", |
| "pages": "139--162", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.inffus.2020.01.010" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rhea Sukthanker, Soujanya Poria, Erik Cambria, and Ramkumar Thirunavukarasu. 2020. Anaphora and coreference resolution: A review. Information Fu- sion, 59:139-162.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Refer, Reuse, Reduce: Generating Subsequent References in Visual and Conversational Contexts", |
| "authors": [ |
| { |
| "first": "Ece", |
| "middle": [], |
| "last": "Takmaz", |
| "suffix": "" |
| }, |
| { |
| "first": "Mario", |
| "middle": [], |
| "last": "Giulianelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandro", |
| "middle": [], |
| "last": "Pezzelle", |
| "suffix": "" |
| }, |
| { |
| "first": "Arabella", |
| "middle": [], |
| "last": "Sinclair", |
| "suffix": "" |
| }, |
| { |
| "first": "Raquel", |
| "middle": [], |
| "last": "Fern\u00e1ndez", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "4350--4368", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.353" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ece Takmaz, Mario Giulianelli, Sandro Pezzelle, Ara- bella Sinclair, and Raquel Fern\u00e1ndez. 2020. Refer, Reuse, Reduce: Generating Subsequent References in Visual and Conversational Contexts. In Proceed- ings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 4350-4368, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Vision-and-dialog navigation", |
| "authors": [ |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Thomason", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Murray", |
| "suffix": "" |
| }, |
| { |
| "first": "Maya", |
| "middle": [], |
| "last": "Cakmak", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Conference on Robot Learning (CoRL)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jesse Thomason, Michael Murray, Maya Cakmak, and Luke Zettlemoyer. 2019. Vision-and-dialog naviga- tion. In Conference on Robot Learning (CoRL).", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Annotating a broad range of anaphoric phenomena, in multiple genres: the ARRAU corpus", |
| "authors": [ |
| { |
| "first": "Olga", |
| "middle": [], |
| "last": "Uryupina", |
| "suffix": "" |
| }, |
| { |
| "first": "Ron", |
| "middle": [], |
| "last": "Artstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonella", |
| "middle": [], |
| "last": "Bristot", |
| "suffix": "" |
| }, |
| { |
| "first": "Federica", |
| "middle": [], |
| "last": "Cavicchio", |
| "suffix": "" |
| }, |
| { |
| "first": "Francesca", |
| "middle": [], |
| "last": "Delogu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kepa", |
| "middle": [], |
| "last": "Rodriguez", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Natural Language Engineering", |
| "volume": "26", |
| "issue": "1", |
| "pages": "95--128", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Olga Uryupina, Ron Artstein, Antonella Bristot, Feder- ica Cavicchio, Francesca Delogu, Kepa Rodriguez, and Massimo Poesio. 2020. Annotating a broad range of anaphoric phenomena, in multiple genres: the ARRAU corpus. Natural Language Engineer- ing, 26(1):95-128.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Using lexical and encyclopedic knowledge", |
| "authors": [ |
| { |
| "first": "Yannick", |
| "middle": [], |
| "last": "Versley", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| }, |
| { |
| "first": "Simone", |
| "middle": [], |
| "last": "Ponzetto", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Anaphora Resolution: Algorithms, Resources and Applications", |
| "volume": "", |
| "issue": "", |
| "pages": "397--429", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yannick Versley, Massimo Poesio, and Simone Ponzetto. 2016. Using lexical and encyclopedic knowledge. In Massimo Poesio, Roland Stuckardt, and Yannick Versley, editors, Anaphora Resolution: Algorithms, Resources and Applications, pages 397- 429. Springer-Verlag, Berlin Heidelberg.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| }, |
| { |
| "first": "Alice", |
| "middle": [], |
| "last": "Lai", |
| "suffix": "" |
| }, |
| { |
| "first": "Micah", |
| "middle": [], |
| "last": "Hodosh", |
| "suffix": "" |
| }, |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Hockenmaier", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "67--78", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Young, Alice Lai, Micah Hodosh, and Julia Hock- enmaier. 2014. From image descriptions to visual denotations: New similarity metrics for semantic in- ference over event descriptions. Transactions of the Association for Computational Linguistics, 2:67-78.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "A cluster ranking model for full anaphora resolution", |
| "authors": [ |
| { |
| "first": "Juntao", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Uma", |
| "suffix": "" |
| }, |
| { |
| "first": "Massimo", |
| "middle": [], |
| "last": "Poesio", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "11--20", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juntao Yu, Alexandra Uma, and Massimo Poesio. 2020. A cluster ranking model for full anaphora resolution. In Proceedings of the 12th Language Resources and Evaluation Conference, pages 11-20, Marseille, France. European Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "What you see is what you get: Visual pronoun coreference resolution in dialogues", |
| "authors": [ |
| { |
| "first": "Xintong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongming", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangqiu", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Song", |
| "suffix": "" |
| }, |
| { |
| "first": "Changshui", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5123--5132", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1516" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xintong Yu, Hongming Zhang, Yangqiu Song, Yan Song, and Changshui Zhang. 2019. What you see is what you get: Visual pronoun coreference resolu- tion in dialogues. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 5123-5132, Hong Kong, China. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "The GUM corpus: creating multilayer resources in the classroom. Language Resources and Evaluation", |
| "authors": [ |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Zeldes", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "51", |
| "issue": "", |
| "pages": "581--612", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amir Zeldes. 2017. The GUM corpus: creating mul- tilayer resources in the classroom. Language Re- sources and Evaluation, 51(3):581-612.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Scene parsing through ade20k dataset", |
| "authors": [ |
| { |
| "first": "Bolei", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Hang", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Puig", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanja", |
| "middle": [], |
| "last": "Fidler", |
| "suffix": "" |
| }, |
| { |
| "first": "Adela", |
| "middle": [], |
| "last": "Barriuso", |
| "suffix": "" |
| }, |
| { |
| "first": "Antonio", |
| "middle": [], |
| "last": "Torralba", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", |
| "volume": "", |
| "issue": "", |
| "pages": "5122--5130", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/CVPR.2017.544" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. 2017. Scene parsing through ade20k dataset. In 2017 IEEE Con- ference on Computer Vision and Pattern Recognition (CVPR), pages 5122-5130.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": "Top-down perspective of the Cups corpus scene with ground truth object IDs.", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "num": null, |
| "text": "Participant 1 cannot see the cups circled in blue, whereas participant 2 cannot see the cups circled in red. Person 3 is a passive observer of in the conversation.", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "num": null, |
| "text": "Image and description sentences from the Tell-me-more corpus. Grammatical errors and other disfluencies are not corrected. size thanOntoNotes (Pradhan et al., 2007), the largest and most used coreference corpus in the field.", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF3": { |
| "num": null, |
| "text": "Example of annotation in the MMAX tool. Coreferential links are shown with the green lines in the bottom right. The annotator has simultaneous access to the image and the text while annotating all specified attributes in the annotation scheme.", |
| "uris": null, |
| "type_str": "figure" |
| } |
| } |
| } |
| } |