| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:23:29.109934Z" |
| }, |
| "title": "They Are Not All Alike: Answering Different Spatial Questions Requires Different Grounding Strategies", |
| "authors": [ |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Testoni", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Claudio", |
| "middle": [], |
| "last": "Greco", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "Bianchi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "ISAE-Supaero", |
| "location": { |
| "country": "France" |
| } |
| }, |
| "email": "tobias.bianchi@student.isae-supaero.fr" |
| }, |
| { |
| "first": "Mauricio", |
| "middle": [], |
| "last": "Mazuecos", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universidad de C\u00f3rdoba", |
| "location": { |
| "country": "Conicet Argentina" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Agata", |
| "middle": [], |
| "last": "Marcante", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universit\u00e9 de Lorraine", |
| "location": { |
| "country": "France" |
| } |
| }, |
| "email": "agata.marcante7@etu.univ-lorraine.fr" |
| }, |
| { |
| "first": "Luciana", |
| "middle": [], |
| "last": "Benotti", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Universidad de C\u00f3rdoba", |
| "location": { |
| "country": "Conicet Argentina" |
| } |
| }, |
| "email": "mmazuecos|luciana.benotti@unc.edu.ar" |
| }, |
| { |
| "first": "Raffaella", |
| "middle": [], |
| "last": "Bernardi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Trento", |
| "location": { |
| "country": "Italy" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper, we study the grounding skills required to answer spatial questions asked by humans while playing the GuessWhat?! game. We propose a classification for spatial questions dividing them into absolute, relational, and group questions. We build a new answerer model based on the LXMERT multimodal transformer and we compare a baseline with and without visual features of the scene. We are interested in studying how the attention mechanisms of LXMERT are used to answer spatial questions since they require putting attention on more than one region simultaneously and spotting the relation holding among them. We show that our proposed model outperforms the baseline by a large extent (9.70% on spatial questions and 6.27% overall). By analyzing LXMERT errors and its attention mechanisms, we find that our classification helps to gain a better understanding of the skills required to answer different spatial questions.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper, we study the grounding skills required to answer spatial questions asked by humans while playing the GuessWhat?! game. We propose a classification for spatial questions dividing them into absolute, relational, and group questions. We build a new answerer model based on the LXMERT multimodal transformer and we compare a baseline with and without visual features of the scene. We are interested in studying how the attention mechanisms of LXMERT are used to answer spatial questions since they require putting attention on more than one region simultaneously and spotting the relation holding among them. We show that our proposed model outperforms the baseline by a large extent (9.70% on spatial questions and 6.27% overall). By analyzing LXMERT errors and its attention mechanisms, we find that our classification helps to gain a better understanding of the skills required to answer different spatial questions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Visual Dialogues are a useful testbed to study how models ground natural language and in particular how they ground spatial language, which is the focus of our analysis. Visual Dialogues have been the aim of early work on natural language understanding (NLU) (Winograd, 1972) and are now studied by a very active community at the interplay between computer vision and computational linguistics (e.g. Baldridge et al. (2018) ; Ilinykh et al. (2019) ; Haber et al. (2019) ). Recently, important progress has been made on visual dialogue systems thanks to the release of datasets like Vis-Dial (Das et al., 2017) and GuessWhat?! (de Vries et al., 2017) . The former contains chit-chat conversations about an image whereas the latter is a visual game, hence its dialogues are goal-oriented. In both cases, one agent asks questions and the ABSOLUTE is it the bus on the left? No RELATIONAL is the boat next to a car? No GROUP is one of the two in the back? Yes Figure 1 : A vast amount of questions asked by humans in the GuessWhat?! game (de Vries et al., 2017) are spatial. We classify them as absolute, relational, and group based on how they many objects are involved and how they are related. The red box marks the object(s) involved in the question, while the green box marks the target of the game. Relational and group questions need more than one object, whereas absolute do not.", |
| "cite_spans": [ |
| { |
| "start": 259, |
| "end": 275, |
| "text": "(Winograd, 1972)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 400, |
| "end": 423, |
| "text": "Baldridge et al. (2018)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 426, |
| "end": 447, |
| "text": "Ilinykh et al. (2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 450, |
| "end": 469, |
| "text": "Haber et al. (2019)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 591, |
| "end": 609, |
| "text": "(Das et al., 2017)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 614, |
| "end": 649, |
| "text": "GuessWhat?! (de Vries et al., 2017)", |
| "ref_id": null |
| }, |
| { |
| "start": 1034, |
| "end": 1057, |
| "text": "(de Vries et al., 2017)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 956, |
| "end": 964, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "other, which we call the Oracle, answers. For Vis-Dial most of the work focused on the answerer, but in-depth evaluation has been carried out on the questioner too (eg., Murahari et al. (2019) ; Testoni et al. (2019) ). For GuessWhat?!, instead, work has been done mostly, if not only, on the questioner. Current models trained with reinforcement learning achieve high task success; they adapt to the oracle limitations and end-up asking questions that are linguistically simpler than those asked by humans Pang and Wang, 2020) .", |
| "cite_spans": [ |
| { |
| "start": 170, |
| "end": 192, |
| "text": "Murahari et al. (2019)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 195, |
| "end": 216, |
| "text": "Testoni et al. (2019)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 507, |
| "end": 527, |
| "text": "Pang and Wang, 2020)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "It is interesting to understand where current multimodal NLU models stand with respect to this task: answering questions asked by humans in a goal oriented visual dialogue. Our paper addresses this question by evaluating how the Oracle model of the GuessWhat?! game answers questions asked by humans while playing the game.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "GuessWhat?! is a cooperative game where two players talk in order to identify an object in an image. The player known as the Questioner has to guess the target by asking yes/no questions. The other player, the Oracle, knows the target object and answers the questions. show that most of the questions in the dataset are about the entity of the target (\"Is it a female?\") or its location (\"is it the first one?\"). Mazuecos et al. (2020) show that the baseline model, commonly used for the Guesswhat?! task since its introduction in de Vries et al. (2017), has almost human-like accuracy on the entity questions and a much lower accuracy on questions about attributes. In this paper, we focus on spatial questions and classify them into three groups: absolute, relational, and group questions as illustrated in Figure 1 .", |
| "cite_spans": [ |
| { |
| "start": 413, |
| "end": 435, |
| "text": "Mazuecos et al. (2020)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 809, |
| "end": 817, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "An unpleasant aspect of the baseline model is that it receives the gold standard entity of the target (that is, the category label, e.g. \"giraffe\" or \"boat\") as input. Furthermore, it answers questions without seeing either the image or the visual features of the target, but instead it simply relies on the category label of the target and its coordinates. Important progress on multimodal encoders has been obtained since the GuessWhat?! release; hence, we study the effect of using models that ground the question into the image and do not have access to the gold standard category label of the target. We adapt a multimodal universal encoder, LXMERT (Tan and Bansal, 2019) , to play the role of the Oracle and compare it with the baseline model. It is known that grounding spatial expressions is challenging for neural networks since quite often they require models to put attention on more regions simultaneously and spot the relation holding among them (e.g., the car and the boat in Figure 1 , middle). LXMERT is a transformer-based neural network and as such it heavily exploits attentionbased mechanisms. In this paper, we run a qualitative analysis of the attention LXMERT exhibits for the different types of location questions and run an in-depth error analysis of its results. To sum up, we make the following contributions:", |
| "cite_spans": [ |
| { |
| "start": 654, |
| "end": 676, |
| "text": "(Tan and Bansal, 2019)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 990, |
| "end": 998, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We adapt LXMERT to play the role of the Oracle of the GuessWhat?! game obtaining an overall accuracy of 82.21%, an increase of 6.27% with respect to the usual baseline.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We find that LXMERT improves over the baseline also on spatial questions (+9.70%), but they remain a large source of errors also for this model -with 77.00% accuracy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We classify spatial questions into three subtypes and use this classification to annotate the subset of spatial questions in the Guess-What?! test set. The fine-grained evaluation shows that the hardest spatial questions are the relational and group ones.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We run an in-depth qualitative analysis of LXMERT cross-modal attention and an analysis of its errors on each question sub-type. The analysis shows that LXMERT attention differs between absolute and relational questions as expected, and that some spatial questions need the dialogue history to be interpreted correctly.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The paper proceeds as follows. Section 2 reviews previous work on visual question answering and on spatial referring expressions. Section 3 presents the models providing information on how we adapt LXMERT for the Oracle task. Section 4 describes the dataset and our classification of spatial questions. In Section 5 we compare the accuracy of the models reporting a fine-grained evaluation by question type and zoom into the subset of spatial questions. We further analysed this subset through a manual inspection of LXMERT attention and errors in Section 6, before drawing our conclusions in Section 7. The code of our work is available at: https://github.com/albertotestoni/ unitn_unc_splu2020.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Answering visual questions is a task that has received increasing attention during the last years. Interesting exploratory analysis has been carried out to understand Visual Question Answering (VQA) systems which highlight their weaknesses and strengths, e.g. (Johnson et al., 2017; Shekhar et al., 2017; Suhr et al., 2017; Kafle and Kanan, 2017) . VQA datasets contain both wh-and Y/N-questions. But the kind of Y/N visual questions the Oracle needs to answer are different than those of the VQA datasets: it has to check whether the target has or does not have the questioned property. Hence, it has to compare the target's properties with those of the entity the question refers to and answer accordingly. Moreover, differently from VQA, the GuessWhat?! dataset has been collected in a more naturalistic environment, by letting humans play the games. We adapt LXMERT (Tan and Bansal, 2019) , a multimodal universal encoder State-of-the-Art in VQA, to accomplish the Oracle's challenge.", |
| "cite_spans": [ |
| { |
| "start": 260, |
| "end": 282, |
| "text": "(Johnson et al., 2017;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 283, |
| "end": 304, |
| "text": "Shekhar et al., 2017;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 305, |
| "end": 323, |
| "text": "Suhr et al., 2017;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 324, |
| "end": 346, |
| "text": "Kafle and Kanan, 2017)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 870, |
| "end": 892, |
| "text": "(Tan and Bansal, 2019)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "After the introduction of the supervised baseline models (de Vries et al., 2017), several models have been proposed for the Questioner, which are mostly based on reinforcement learning (Sang-Woo et al., 2019; Zhang et al., 2018b; Zhao and Tresp, 2018; Zhang et al., 2018a; Gan et al., 2019; Pang and Wang, 2020) . For these models, the role of the Oracle is even more salient than for models based on supervised or cooperative learning ) since they are reinforced to ask those questions the Oracle is good at answering. Despite this important role of the Oracle, no work has been carried out to evaluate and improve it. We aim to fill this gap. show that GuessWhat?! human players ask quite a lot spatial questions. It has been observed that capturing the spatial relation about objects is challenging for neural network models. Kelleher and Dobnik (2017) argue that Convolutional Neural Network (CNN) do not ground spatial information properly: since they discard location information through the pooling mechanism, their embeddings can only capture rough relative positions of objects within a scene. In line with this claim, Collell and Moens (2018) show that linguistic features are more spatially informative than CNN visual features. New multimodal models, like LXMERT, start from positional aware embeddings. We therefore study how well they handle the spatial questions asked by Guess-What?! players.", |
| "cite_spans": [ |
| { |
| "start": 185, |
| "end": 208, |
| "text": "(Sang-Woo et al., 2019;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 209, |
| "end": 229, |
| "text": "Zhang et al., 2018b;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 230, |
| "end": 251, |
| "text": "Zhao and Tresp, 2018;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 252, |
| "end": 272, |
| "text": "Zhang et al., 2018a;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 273, |
| "end": 290, |
| "text": "Gan et al., 2019;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 291, |
| "end": 311, |
| "text": "Pang and Wang, 2020)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 829, |
| "end": 855, |
| "text": "Kelleher and Dobnik (2017)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1128, |
| "end": 1152, |
| "text": "Collell and Moens (2018)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Spatial expressions have been deeply studied within the referring expression generation community. In this area, earlier work (Paraboni et al., 2007) has suggested that, in ordered domains (e.g., a document divided into sections and subsections), referring expressions that include spatial information, even when redundant, lead to a significant reduction in the amount of search that is needed to identify the referent. It has been argued that spatial information reduces the cognitive load (measured by eye tracking) necessary for resolving a referring expression (Paraboni et al., 2017) . This research area (Krahmer and van Deemter, 2012; Ghanimifard and Dobnik, 2017) distinguishes between spatial referring expressions that involve another object in the description (e.g. \"the rabbit in the hat\") from those that do not (e.g. \"the rabbit on the left\"). The first group of expressions is known as relational, while we shall refer to the second one as absolute. A further distinction is made between referring expressions that are singular (e.g. \"the rabbit in the hat\") and those that are plural (e.g. \"the three rabbits on the table\") and refer to a group (see e.g., L\u00f8nning (1997) ; Gatt and van Deemter (2007) ; Krahmer and van Deemter (2012)).", |
| "cite_spans": [ |
| { |
| "start": 126, |
| "end": 149, |
| "text": "(Paraboni et al., 2007)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 566, |
| "end": 589, |
| "text": "(Paraboni et al., 2017)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 611, |
| "end": 642, |
| "text": "(Krahmer and van Deemter, 2012;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 643, |
| "end": 672, |
| "text": "Ghanimifard and Dobnik, 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 1173, |
| "end": 1187, |
| "text": "L\u00f8nning (1997)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1190, |
| "end": 1217, |
| "text": "Gatt and van Deemter (2007)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this paper, we classify GuessWhat?! spatial questions using absolute, relational and group distinctions and examine how LXMERT performs for each type of spatial question. We also conduct an error analysis and an attention analysis taking these categories into consideration.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Recent work by Agarwal et al. (2020) shows that in current visual dialogue datasets the dialogue history rarely matters. The authors ask crowdsourcers whether they can confidently answer a question by looking at the image and the question, without seeing the dialogue history. In our qualitative analysis we check whether history plays a role for the spatial questions of the GuessWhat?! game that LXMERT fails to answer.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 36, |
| "text": "Agarwal et al. (2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this section we present the models that we compare. We also explain how we adapted LXMERT to the Oracle task. The models are trained on successful games.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "3" |
| }, |
| { |
| "text": "LSTM is the baseline model proposed in de Vries et al. 2017. It does not have access to the raw image features. It receives as input embeddings of the target object's category, its spatial coordinates, and one question encoded by a dedicated LSTM. These three embeddings are concatenated and fed to a Multi-Layer Perceptron (MLP) that gives an answer (Yes or No).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Models", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We enhance the LSTM model described above with the visual modality and we remove the information about the target object category. We extract the visual vectors corresponding to the input image and the crop of the target object using a frozen ResNet-152 network pre-trained on ImageNet (He et al., 2016) and we pass them through a linear layer and a tanh activation function. We concatenate these scaled representations to the embeddings of the target object's spatial coordinates and the question: the resulting vector is fed to an MLP to obtain the answer, as it happens in the LSTM model.", |
| "cite_spans": [ |
| { |
| "start": 286, |
| "end": 303, |
| "text": "(He et al., 2016)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "V-LSTM", |
| "sec_num": null |
| }, |
| { |
| "text": "LXMERT To evaluate the performance of a universal multimodal encoder, we employ LXMERT (Learning Cross-Modality Encoder Representations from Transformers) (Tan and Bansal, 2019) . It represents an image by the set of position aware object embeddings for the 36 most salient regions detected by Faster R-CNN and it processes the text input by position aware randomly initialized word embeddings. We fill the 36th position with the visual features of the target object. Both the visual and linguistic representations are processed by a specialized transformer encoder based on selfattention layers; their outputs are then processed by a cross-modality encoder that through a crossattention mechanism generates representations of the single modality (language and visual output) enhanced with the other modality as well as their joint representation (cross-modality output). LXMERT uses the special tokens CLS and SEP; the latter is used to separate sequences and to denote the end of the textual input. LXMERT has been pre-trained on five tasks. 1 It has 19 attention layers: 9 and 5 self-attention layers in the language and visual encoders, respectively and 5 cross-attention layers. We process the output corresponding to the CLS token. We consider both the pre-trained version (LXMERT) and the one trained from scratch (LXMERT-S). 2 1 Masked cross-modality language modeling, masked object prediction via RoI-feature regression, masked object prediction via detected-label classification, cross-modality matching, and image question answering. 2 We have also evaluated a simplified version of LXMERT-S in which we use 6 self (4 language and 2 visual) and 2 Table 1 : Question type distribution in successful games following the classification proposed in where a question can be assigned to more than one attribute type (multiple labels); the Single label column reports the number of questions which have been assigned to only one type.", |
| "cite_spans": [ |
| { |
| "start": 155, |
| "end": 177, |
| "text": "(Tan and Bansal, 2019)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1546, |
| "end": 1547, |
| "text": "2", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1659, |
| "end": 1666, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "V-LSTM", |
| "sec_num": null |
| }, |
| { |
| "text": "The GuessWhat?! dataset is composed of more than 150k human-human dialogues containing an average of 5.3 questions in natural language created by turkers playing the game on MS COCO images (Lin et al., 2014) . Humans have succeeded on 85% of the games. Not successful games may contain errors made by the human oracle which lead to task failure, we discard questions that belong to human dialogues that were not successful. The remaining set contains around 672K questions which are grounded on about 63K unique images and belong to about 135K dialogues. propose a classification of the questions based on their focus distinguishing questions which ask about the entity of the target (\"Is it an animal?\" or \"Is it a dog?\") or an attribute of it. A question can focus on just one attribute (e.g., \"Is it the black dog\"? or \"Is it black?\") in which case it is assigned just to one attribute question type (color in the examples) or about more attributes (e.g., \"does it have orange pillows on it?\") in which case it is assigned to more attribute question types (to both color and spatial information in the example.) Table 1 reports their distribution in the human-human dialogues giving the numbers of questions assigned to one or more types (multi label) or to just one type (single label).", |
| "cite_spans": [ |
| { |
| "start": 189, |
| "end": 207, |
| "text": "(Lin et al., 2014)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1115, |
| "end": 1122, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Dataset", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We conjecture that the spatial question type includes questions posing different challenges to multimodal models. Krahmer and van Deemter (2012) divide spatial expressions into relational (e.g. \"the cross-modal attention layers. The model behaves similarly to the more complex version trained from scratch. rabbit in the hat\"), that specifies the location of the referent of a noun phrase (the target, \"rabbit\") relative to another object (the landmark, \"hat\"), and absolute that focus only on the target by providing locative information about it (e.g. \"the rabbit on the left\"). A third spatial expression that has received attention within the REG community are group referring expressions whose target is a group of entities (e.g. \"the three rabbits on the table\") or some specific entity of a the group to which the expression refers by ordering them (e.g. \"the second rabbit from the left\").", |
| "cite_spans": [ |
| { |
| "start": 114, |
| "end": 144, |
| "text": "Krahmer and van Deemter (2012)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Dataset", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We adapt such classification to the GuessWhat?! spatial questions and classify them into four types: relational, absolute, group and other. To distinguish these types we have leveraged syntactic and lexical characteristics specific to each. Relational questions usually include a prepositional phrase followed by a noun phrase that includes either a pronoun (e.g. \"Is there a sink directly above it?\") or an object word (e.g. \"is it the pen behind the laptop?\"). Absolute spatial questions (e.g. \"the one on the left?\") instead contain a location word either in the x axis (e.g. right, middle, left), or the y (top, bottom), or the z (e.g. front, back) axis. We also consider absolute those questions that include a spatial adjective in its superlative form (e.g. \"the leftmost one?\"). Finally, we consider group questions those containing a number which may indicate order (e.g. \"right to left, is it the first one?\") or groups (e.g. \"in the back among four women?\"). We have automatically annotated spatial questions by identifying nouns, prepositions and number using the Part of Speech tagger Stanza (Qi et al., 2020) . When a question is not assigned to any of the three groups, we include it in the \"Other\" category. 3 We tried identifying objects using the entity recognizers included in Stanford core NLP (Manning et al., 2014) and Stanza (Qi et al., 2020) but the coverage was not good.", |
| "cite_spans": [ |
| { |
| "start": 1104, |
| "end": 1121, |
| "text": "(Qi et al., 2020)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 1223, |
| "end": 1224, |
| "text": "3", |
| "ref_id": null |
| }, |
| { |
| "start": 1347, |
| "end": 1364, |
| "text": "(Qi et al., 2020)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Dataset", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In the next section, we will first compare models using the multi-label classification reported in Table 1, then we will zoom into the spatial questions which together with the entity questions constitute the large majority of questions asked by humans. In order to understand strength and limits of multimodal models in answering spatial questions, we 3 Examples of questions following into the \"Other\" category are: \"Is it the tree outside?\" -i.e. an elliptical question which could be completed as \"Is it the tree outside the fenced garden?\" -or \"Can you sleep on it?\" which is not about a spatial property that occurs in the image but an afforded one. focus on those which are assigned only to the spatial question type to avoid confounding effects. Table 2 reports number of such sub-set.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Dataset", |
| "sec_num": "4" |
| }, |
| { |
| "text": "de Vries et al. 2017shows that the \"blind\" version of the LSTM model performs better that the version receiving the visual features. This result is heavily dependent on the question type distribution in human-human dialogues. As we have seen, entity questions are a great proportion of the questions humans ask. The \"blind\" baseline model is facilitated in answering them, since it is given the category of the target object. Following Mazuecos et al. 2020, we evaluate models accuracy by question types. As we can see from Table 3 , the higher overall accuracy reached by the \"blind\" LSTM model is indeed mostly due to the \"entity\" questions for which it reaches 94% (questions like: \"is it a vehicle?\"). As expected, when removing the category (V-LSTM) the accuracy on answering questions about entities decreases to a large degree, but the use of visual features helps the model to answer color questions better. The replacement of the LXMERT architecture, together with the use of positional aware embedding representations of the image, bring an important boost in the accuracy: LXMERT trained from scratch outperforms the LSTM based model on all types of questions. The pre-training phase further increases the performance in important ways.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 524, |
| "end": 531, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation by Question Type", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Above we have seen that LXMERT outperforms the other models on the spatial questions. Our fine-grained classification sheds light on an interesting point: its main advantage comes from the relational questions (Table 4) . Absolute questions require cross-modal attention only to align a word with its referent, whereas relational questions are more challenging: the model has to locate the regions corresponding to the two related words and understand the relation holding among them. The group questions may require \"counting\" skills that go beyond the scope of this paper.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 210, |
| "end": 219, |
| "text": "(Table 4)", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Evaluation on Spatial Questions", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "As a first step towards a deeper understanding of LXMERT performance, we use a linear logistic regression model for the task of predicting whether a question was answered correctly. In Shekhar et al. (2018) it has been shown that unsuccessful games contain more objects in the image than successful ones, and that the target size area is smaller. We use these two features as predictor variables together with the length of the question and the turn in which it was asked in the full dialogue. We observe that the number of objects in the image and the question turn play a significant role in predicting the model behaviour. This might be due to the fact that models do not receive the dialogue history as input. Below we run an error analysis based on the three spatial sub-type questions described above to check whether indeed this could be a source of error. After the error analysis, we study whether LXMERT uses its cross-modal attention differently across these three groups of questions.", |
| "cite_spans": [ |
| { |
| "start": 185, |
| "end": 206, |
| "text": "Shekhar et al. (2018)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Qualitative Analysis", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We did a manual error analysis of 20% of LXMERT errors on spatial questions. We tagged emergent error categories by following a qualitative annotation methodology. Below we describe our findings by classifying them in the three types of spatial questions that we consider throughout the paper. We found that absolute and group questions have more errors related to the missing dialogue history than relational questions even though we explicitly allow for relational questions that include anaphoric pronouns. For these two categories, around 50% of errors are related to missing dialogue history. Dialogue history dependency in the dataset is generally not lexicalized with explicit pronouns but left implicit through ellipsis (e.g. \"in the middle?\"). Figure 2 shows an example of this. Question 5 could be answered with \"yes\" if asked at the beginning of the dialogue (\"middle\" would refer to the middle of the image) but its answer is \"no\" due to history (\"middle\" refers to the middle of the group of oranges). In most of these dialogues, the category of the target is left implicit because it is established in previous questions (e.g., \"orange\"). But also other information is implicit. For example, \"the last single one?\" does not say that the search is evolving from right to left. In these cases, the meaning of the question is only correctly interpretable in the dialogue context.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 753, |
| "end": 761, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "History dependence, as illustrated in Figure 2 , is hard to detect even for human annotators. Using the presence of the pronoun to detect whether a question needs the history in order to be properly answered, as it has been done in Agarwal et al. (2020) , might be misleading. Our examples show that ellipses might create more context dependen-", |
| "cite_spans": [ |
| { |
| "start": 232, |
| "end": 253, |
| "text": "Agarwal et al. (2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 38, |
| "end": 46, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Error Analysis", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Human answer 1. It is a fruit? yes 2. It is the orange? yes 3. One of them I suppose? yes 4. Is it to our right? no 5. In the middle? no 6. The last single one? yes Figure 2 : Sample image and dialogue from the GuessWhat?! dataset. The red boxes mark the objects involved in the questions, while the green box marks the actual referent. LXMERT incorrectly answers \"yes\" to question 5. LXMERT, like all Oracles, does not have access to the dialogue history. It probably interprets the question as \"is the target in the middle of the picture?\". The image and dialogue illustrates the history dependence of questions.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 165, |
| "end": 173, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Human question", |
| "sec_num": null |
| }, |
| { |
| "text": "cies and that there are questions which could be apparently answered even when given in isolation but they would be answered differently based on the context they are in.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human question", |
| "sec_num": null |
| }, |
| { |
| "text": "For absolute only questions, we found the following errors. Questions related to the z-axis of the picture (e.g. \"is it in the background?\") seem to be harder for the model than those questions related to the x-axis of the picture (e.g. \"is it on the left?\"). The errors that do occur on the x-axis are either related to the fact that the dialogue history is necessary in order to interpret the question as in Figure 2 , or that the target is neither on the left nor on the right of the x-axis. In this dataset the adjective left and right behave as vague adjectives. Questions that include superlatives (e.g.\"the rightmost book?) cause many errors. As well as questions that combine two or more of these characteristics (e.g. \"is it the animal at the very front on the left ?\"). Finally, the ambiguity of the word \"middle\", which could be used for any axis, seems to confuse the model. For group questions, the second most frequent errors corresponds to questions grouping in one of the three axes. The term \"row\" is often used to group the target with other objects, especially when images are overcrowded with objects belonging to the same category. However, the term is an ambiguous one, as it can refer to any of the three axes and its meaning is often dependent on which interpretation is more salient in the image. Furthermore, inverse x-axis properties (e.g., \"third girl from right?\") also seem to be problematic. Another frequent error type includes questions that require counting above three (e.g., \"seventh bus from the left?\"). People can immediately and precisely identify that an image contains 1, 2, 3 or 4 items by a simple glance, this ability is called subitizing (Kaufman et al., 1949; Piazza et al., 2002 the quantity of a larger number of objects takes considerably longer and involves counting for humans. It seems models such as LXMERT are able to do subitizing, but not counting. Other problematic group questions are multi-type ones, for instance belonging also to the relational type (e.g., \"are there two of them on the branch?\"); and questions using entities outside the image as reference, such as the viewers (e.g., \"is it in the first room closer to us?\").", |
| "cite_spans": [ |
| { |
| "start": 1684, |
| "end": 1706, |
| "text": "(Kaufman et al., 1949;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 1707, |
| "end": 1726, |
| "text": "Piazza et al., 2002", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 410, |
| "end": 418, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Human question", |
| "sec_num": null |
| }, |
| { |
| "text": "For relational questions we find that a source of errors is when the target and the landmark bounding boxes overlap or one is included in the other (\"is it the clock behind the person?\"). Also when the landmark is a part of another object instead of being an object with well delimited borders the model seems to get confused (\"is it under his feet?\"). Questions that include non projective prepositions seem harder (\"is it the person near the bicycle?\") than those whose prepositions indicate the direction of the relation. Another source of errors are questions in which the landmark is large and no clear borders are visible (\"is it on the water?\"). Finally, those questions that require OCR (optical character recognition) are problematic (\"does it have words on it?\"). is it the bus on the left? No is it the boat next to a car? No is it one of the two in the back? Yes Figure 3 : Attentions from the CLS: in absolute questions attention is mostly on the only object the question refers to (the left bus, 0.13) and the target object (0.64) (left); in the relational questions attentions spread between the two related objects (car and boat, 0.12 each) and the target object (the boat on the back, 0.9) (middle); in the group questions attentions goes to the entity of the referred group (0.08 and 0.13) and the target (0.37) (right).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 875, |
| "end": 883, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Human question", |
| "sec_num": null |
| }, |
| { |
| "text": "Here we aim to understand how LXMERT uses attention mechanisms to answer spatial questions. We focus our analysis on the cross-attention layers from language to vision. Recall that, in our adaptation of LXMERT to the Oracle task, the crop of the target is given as the 36th visual embedding together with the most salient regions of the image detected by Faster R-CNN. We are interested in understanding how it exploits the target visual representation to guide attention.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "LXMERT's Attention", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "The entropy of the attention maps shows that the model in the first attention layers distributes attention across all regions (its entropy is close to the maximum possible level), at layer 2 it learns to focus its attention on some regions of the image and on the crop of the target. Finally, at the last layer, the attention on the CLS (the embedding given to the classifier to select the answer) reveals an interesting difference among question types: the number of regions considered salient in the absolute questions is lower that the one of salient regions in the group and relational questions. Table 5 reports the numbers of regions with an attention value higher than 0.05. 4 We have used different thresholds to compute the number of top-valued regions and the same pattern emerges. From a manual inspection, we have seen that the higher number of salient regions in the relational questions often is due to the fact that they refer to more candidate objects, differently from the absolute ones which usually refer to fewer or even just one object. Figure 3 illustrates how LXMERT uses its attention in three sub-type of spatial questions. As we can see, when it interprets relational questions involving two objects, it \"looks\" both at the target 4 If the attention is equally distributed among all the 36 regions, their attention value would be 0.02 (viz. 1/36).", |
| "cite_spans": [ |
| { |
| "start": 682, |
| "end": 683, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 1257, |
| "end": 1258, |
| "text": "4", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 601, |
| "end": 608, |
| "text": "Table 5", |
| "ref_id": "TABREF7" |
| }, |
| { |
| "start": 1058, |
| "end": 1066, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "LXMERT's Attention", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "(the boat) and the landmark (the car); in the example it answers the question negatively since the target of the game is the boat marked by the green box and not the one to which the question refers to. Similarly, when interpreting a group question, it looks at the referred group (the two giraffes); in the example it answers the question positively since the target of the game is indeed within the referred group. By looking at the attention maps, we noticed that interesting patterns emerge when looking at the attentions from the CLS token (Figure 3 marks the regions considered more salient from the CLS token). Other tokens put attention mostly or only on the target object region.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 545, |
| "end": 552, |
| "text": "(Figure", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "LXMERT's Attention", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "In this paper we tackle the problem of grounding spatial questions in the GuessWhat?! visual dialogue game. We adapt LXMERT to play the role of the Oracle of the GuessWhat?! game reaching an overall accuracy of 82.21%. This result outperforms the widely used baseline model by 6.27%. The gain is even higher for spatial questions, where LXMERT outperforms the baseline by 9.70%. In order to perform an in-depth analysis, we classify spatial questions into three sub-types and use this classification to annotate the subset of spatial questions in the GuessWhat?! test set. The fine-grained evaluation shows that the hardest spatial questions are the relational and group ones. We perform an in-depth analysis of LXMERT cross-modal attention and an qualitative analysis of the errors on each question sub-type. First of all, we find out that LXMERT puts attention on more regions when processing relational questions compared to absolute and group questions. Secondly, the qualitative analysis highlights the importance of having access to the dialogue history in order to answer some spatial questions. We leave this for future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Ioannis Konstas, and Verena Rieser. 2020. History for visual dialog: Do we really need it?", |
| "authors": [ |
| { |
| "first": "Shubham", |
| "middle": [], |
| "last": "Agarwal", |
| "suffix": "" |
| }, |
| { |
| "first": "Trung", |
| "middle": [], |
| "last": "Bui", |
| "suffix": "" |
| }, |
| { |
| "first": "Joon-Young", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "8182--8197", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shubham Agarwal, Trung Bui, Joon-Young Lee, Ioan- nis Konstas, and Verena Rieser. 2020. History for visual dialog: Do we really need it? In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8182-8197, On- line. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Points, paths, and playscapes: Large-scale spatial language understanding tasks set in the real world", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Baldridge", |
| "suffix": "" |
| }, |
| { |
| "first": "Tania", |
| "middle": [], |
| "last": "Bedrax-Weiss", |
| "suffix": "" |
| }, |
| { |
| "first": "Daphne", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Srini", |
| "middle": [], |
| "last": "Narayanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernando", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [], |
| "last": "Soricut", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Tseng", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the First International Workshop on Spatial Language Understanding", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Baldridge, Tania Bedrax-Weiss, Daphne Luong, Srini Narayanan, Bo Pang, Fernando Pereira, Radu Soricut, Michael Tseng, and Yuan Zhang. 2018. Points, paths, and playscapes: Large-scale spatial language understanding tasks set in the real world. In Proceedings of the First International Workshop on Spatial Language Understanding, New Orleans. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Learning representations specialized in spatial knowledge: Leveraging language and vision. Transactions of the Association for", |
| "authors": [ |
| { |
| "first": "Guillem", |
| "middle": [], |
| "last": "Collell", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Francine", |
| "middle": [], |
| "last": "Moens", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "133--144", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/tacl_a_00010" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillem Collell and Marie-Francine Moens. 2018. Learning representations specialized in spatial knowledge: Leveraging language and vision. Trans- actions of the Association for Computational Lin- guistics, 6:133-144.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Visual Dialog", |
| "authors": [ |
| { |
| "first": "Abhishek", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Satwik", |
| "middle": [], |
| "last": "Kottur", |
| "suffix": "" |
| }, |
| { |
| "first": "Khushi", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Avi", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Deshraj", |
| "middle": [], |
| "last": "Yadav", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [ |
| "F" |
| ], |
| "last": "Jos\u00e9", |
| "suffix": "" |
| }, |
| { |
| "first": "Devi", |
| "middle": [], |
| "last": "Moura", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhruv", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Batra", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "326--335", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Abhishek Das, Satwik Kottur, Khushi Gupta, Avi Singh, Deshraj Yadav, Jos\u00e9 M.F. Moura, Devi Parikh, and Dhruv Batra. 2017. Visual Dialog. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 326-335.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Multi-step reasoning via recurrent dual attention for visual dialog", |
| "authors": [ |
| { |
| "first": "Zhe", |
| "middle": [], |
| "last": "Gan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [ |
| "I" |
| ], |
| "last": "Ahmed", |
| "suffix": "" |
| }, |
| { |
| "first": "Linjie", |
| "middle": [], |
| "last": "Kholy", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingjing", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "6463--6474", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhe Gan, Yu Cheng, Ahmed EI Kholy, Linjie Li, Jingjing Liu, and Jianfeng Gao. 2019. Multi-step reasoning via recurrent dual attention for visual di- alog. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 6463-6474.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Incremental generation of plural descriptions: Similarity and partitioning", |
| "authors": [ |
| { |
| "first": "Albert", |
| "middle": [], |
| "last": "Gatt", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kees Van Deemter", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "102--111", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Albert Gatt and Kees van Deemter. 2007. Incremen- tal generation of plural descriptions: Similarity and partitioning. In EMNLP-CoNLL 2007, Proceedings of the 2007 Joint Conference on Empirical Meth- ods in Natural Language Processing and Compu- tational Natural Language Learning, June 28-30, 2007, Prague, Czech Republic, pages 102-111.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Learning to compose spatial relations with grounded neural language models", |
| "authors": [ |
| { |
| "first": "Mehdi", |
| "middle": [], |
| "last": "Ghanimifard", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Dobnik", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "IWCS 2017 -12th International Conference on Computational Semantics -Long papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mehdi Ghanimifard and Simon Dobnik. 2017. Learn- ing to compose spatial relations with grounded neu- ral language models. In IWCS 2017 -12th Inter- national Conference on Computational Semantics - Long papers.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "The PhotoBook dataset: Building common ground through visually-grounded dialogue", |
| "authors": [ |
| { |
| "first": "Janosch", |
| "middle": [], |
| "last": "Haber", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Baumg\u00e4rtner", |
| "suffix": "" |
| }, |
| { |
| "first": "Ece", |
| "middle": [], |
| "last": "Takmaz", |
| "suffix": "" |
| }, |
| { |
| "first": "Lieke", |
| "middle": [], |
| "last": "Gelderloos", |
| "suffix": "" |
| }, |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| }, |
| { |
| "first": "Raquel", |
| "middle": [], |
| "last": "Fern\u00e1ndez", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1184" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Janosch Haber, Tim Baumg\u00e4rtner, Ece Takmaz, Lieke Gelderloos, Elia Bruni, and Raquel Fern\u00e1ndez. 2019. The PhotoBook dataset: Building common ground through visually-grounded dialogue. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Deep residual learning for image recognition", |
| "authors": [ |
| { |
| "first": "Kaiming", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiangyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaoqing", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "770--778", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recog- nition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770- 778.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Tell Me More: A Dataset of Visual Scene Description Sequences", |
| "authors": [ |
| { |
| "first": "Nikolai", |
| "middle": [], |
| "last": "Ilinykh", |
| "suffix": "" |
| }, |
| { |
| "first": "Sina", |
| "middle": [], |
| "last": "Zarrie\u00df", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Schlangen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 12th International Conference on Natural Language Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikolai Ilinykh, Sina Zarrie\u00df, and David Schlangen. 2019. Tell Me More: A Dataset of Visual Scene Description Sequences. In Proceedings of the 12th International Conference on Natural Language Gen- eration.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "CLEVR: A diagnostic dataset for compositional language and elementary visual reasoning", |
| "authors": [ |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Bharath", |
| "middle": [], |
| "last": "Hariharan", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurens", |
| "middle": [], |
| "last": "Van Der Maaten", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Fei-Fei", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "Lawrence" |
| ], |
| "last": "Zitnick", |
| "suffix": "" |
| }, |
| { |
| "first": "Ross", |
| "middle": [ |
| "B" |
| ], |
| "last": "Girshick", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Justin Johnson, Bharath Hariharan, Laurens van der Maaten, Li Fei-Fei, C. Lawrence Zitnick, and Ross B. Girshick. 2017. CLEVR: A diagnos- tic dataset for compositional language and elemen- tary visual reasoning. In IEEE Conference on Computer Vision and Pattern Recognition, volume abs/1612.06890.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "An analysis of visual question answering algorithms", |
| "authors": [ |
| { |
| "first": "Kushal", |
| "middle": [], |
| "last": "Kafle", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Kanan", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the IEEE International Conference on Computer Vision", |
| "volume": "", |
| "issue": "", |
| "pages": "1965--1973", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kushal Kafle and Christopher Kanan. 2017. An analy- sis of visual question answering algorithms. In Pro- ceedings of the IEEE International Conference on Computer Vision, pages 1965-1973.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "The discrimination of visual number. The American journal of psychology", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [ |
| "L" |
| ], |
| "last": "Kaufman", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Lord", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Reese", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Volkmann", |
| "suffix": "" |
| } |
| ], |
| "year": 1949, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "498--525", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "E. L. Kaufman, M. Lord, T. Reese, and J. Volkmann. 1949. The discrimination of visual number. The American journal of psychology, page 498-525.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "What is not where: the challenge of integrating spatial representations into deep learning architectures", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [ |
| "D" |
| ], |
| "last": "Kelleher", |
| "suffix": "" |
| }, |
| { |
| "first": "Simon", |
| "middle": [], |
| "last": "Dobnik", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "CLASP Papers in Computational Linguistics. Proceedings of the Conference on Logic and Machine Learning in Natural Language", |
| "volume": "", |
| "issue": "", |
| "pages": "41--52", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John D. Kelleher and Simon Dobnik. 2017. What is not where: the challenge of integrating spatial rep- resentations into deep learning architectures. In In CLASP Papers in Computational Linguistics. Pro- ceedings of the Conference on Logic and Machine Learning in Natural Language, pages 41-52.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Computational generation of referring expressions: A survey", |
| "authors": [ |
| { |
| "first": "Emiel", |
| "middle": [], |
| "last": "Krahmer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kees Van Deemter", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Computational Linguistics", |
| "volume": "38", |
| "issue": "1", |
| "pages": "173--218", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emiel Krahmer and Kees van Deemter. 2012. Compu- tational generation of referring expressions: A sur- vey. Computational Linguistics, 38(1):173-218.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Microsoft COCO: Common objects in context", |
| "authors": [ |
| { |
| "first": "Tsung-Yi", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Maire", |
| "suffix": "" |
| }, |
| { |
| "first": "Serge", |
| "middle": [], |
| "last": "Belongie", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Hays", |
| "suffix": "" |
| }, |
| { |
| "first": "Pietro", |
| "middle": [], |
| "last": "Perona", |
| "suffix": "" |
| }, |
| { |
| "first": "Deva", |
| "middle": [], |
| "last": "Ramanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Doll\u00e1r", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "Lawrence" |
| ], |
| "last": "Zitnick", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Computer Vision -ECCV 2014 -13th European Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "740--755", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C. Lawrence Zitnick. 2014. Microsoft COCO: Common objects in context. In Computer Vision -ECCV 2014 -13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V, pages 740-755. Springer.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Plurals and collectivity", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "L\u00f8nning", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Handbook of Logic and Language", |
| "volume": "", |
| "issue": "", |
| "pages": "1009--1054", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "J. L\u00f8nning. 1997. Plurals and collectivity. In J. van Benthem and A. ter Meulen, editors, Handbook of Logic and Language, pages 1009-1054. Elsevier.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "The Stanford CoreNLP natural language processing toolkit", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jenny", |
| "middle": [], |
| "last": "Finkel", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [ |
| "J" |
| ], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mc-Closky", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Association for Computational Linguistics (ACL) System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "55--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher D. Manning, Mihai Surdeanu, John Bauer, Jenny Finkel, Steven J. Bethard, and David Mc- Closky. 2014. The Stanford CoreNLP natural lan- guage processing toolkit. In Association for Compu- tational Linguistics (ACL) System Demonstrations, pages 55-60.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "On the role of effective and referring questions in GuessWhat?", |
| "authors": [ |
| { |
| "first": "Mauricio", |
| "middle": [], |
| "last": "Mazuecos", |
| "suffix": "" |
| }, |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Testoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Raffaella", |
| "middle": [], |
| "last": "Bernardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Luciana", |
| "middle": [], |
| "last": "Benotti", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "! In Proceedings of the First Workshop on Advances in Language and Vision Research", |
| "volume": "", |
| "issue": "", |
| "pages": "19--25", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mauricio Mazuecos, Alberto Testoni, Raffaella Bernardi, and Luciana Benotti. 2020. On the role of effective and referring questions in GuessWhat?! In Proceedings of the First Workshop on Advances in Language and Vision Research, pages 19-25, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Improving generative visual dialog by answering diverse questions", |
| "authors": [ |
| { |
| "first": "Vishvak", |
| "middle": [], |
| "last": "Murahari", |
| "suffix": "" |
| }, |
| { |
| "first": "Prithvijit", |
| "middle": [], |
| "last": "Chattopadhyay", |
| "suffix": "" |
| }, |
| { |
| "first": "Dhruv", |
| "middle": [], |
| "last": "Batra", |
| "suffix": "" |
| }, |
| { |
| "first": "Devi", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Abhishek", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1449--1454", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vishvak Murahari, Prithvijit Chattopadhyay, Dhruv Ba- tra, Devi Parikh, and Abhishek Das. 2019. Improv- ing generative visual dialog by answering diverse questions. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Process- ing and the 9th International Joint Conference on Natural Language Processing, pages 1449-1454.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Visual dialogue state tracking for question generation", |
| "authors": [ |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaojie", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of 34th AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wei Pang and Xiaojie Wang. 2020. Visual dialogue state tracking for question generation. In Proceed- ings of 34th AAAI Conference on Artificial Intelli- gence.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Generating referring expressions: Making referents easy to identify", |
| "authors": [ |
| { |
| "first": "Ivandr\u00e9", |
| "middle": [], |
| "last": "Paraboni", |
| "suffix": "" |
| }, |
| { |
| "first": "Judith", |
| "middle": [], |
| "last": "Kees Van Deemter", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Masthoff", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Computational Linguistics", |
| "volume": "33", |
| "issue": "2", |
| "pages": "229--254", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivandr\u00e9 Paraboni, Kees van Deemter, and Judith Mas- thoff. 2007. Generating referring expressions: Mak- ing referents easy to identify. Computational Lin- guistics, 33(2):229-254.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Effects of cognitive effort on the resolution of overspecified descriptions", |
| "authors": [ |
| { |
| "first": "Ivandr\u00e9", |
| "middle": [], |
| "last": "Paraboni", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "Gwo" |
| ], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Jen", |
| "middle": [], |
| "last": "Lan", |
| "suffix": "" |
| }, |
| { |
| "first": "Matheus", |
| "middle": [], |
| "last": "Mendes De Sant'ana", |
| "suffix": "" |
| }, |
| { |
| "first": "Fl\u00e1vio Luiz", |
| "middle": [], |
| "last": "Coutinho", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Computational Linguistics", |
| "volume": "43", |
| "issue": "2", |
| "pages": "451--459", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivandr\u00e9 Paraboni, Alex Gwo Jen Lan, Matheus Mendes de Sant'Ana, and Fl\u00e1vio Luiz Coutinho. 2017. Ef- fects of cognitive effort on the resolution of over- specified descriptions. Computational Linguistics, 43(2):451-459.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Are subitizing and counting implemented as separate or functionally overlapping processes?", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Piazza", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Mechelli", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Butterworth", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [ |
| "J" |
| ], |
| "last": "Price", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "NeuroImage", |
| "volume": "", |
| "issue": "", |
| "pages": "435--446", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "M. Piazza, A. Mechelli, B. Butterworth, and C.J. Price. 2002. Are subitizing and counting implemented as separate or functionally overlapping processes? NeuroImage, pages 435-446.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Stanza: A python natural language processing toolkit for many human languages", |
| "authors": [ |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Qi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuhao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuhui", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Bolton", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "101--108", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peng Qi, Yuhao Zhang, Yuhui Zhang, Jason Bolton, and Christopher D. Manning. 2020. Stanza: A python natural language processing toolkit for many human languages. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics: System Demonstrations, pages 101- 108, Online. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Large-scale answerer in questioner's mind for visual dialog question generation", |
| "authors": [ |
| { |
| "first": "Lee", |
| "middle": [], |
| "last": "Sang-Woo", |
| "suffix": "" |
| }, |
| { |
| "first": "Gao", |
| "middle": [], |
| "last": "Tong", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Sohee", |
| "suffix": "" |
| }, |
| { |
| "first": "Yao", |
| "middle": [], |
| "last": "Jaejun", |
| "suffix": "" |
| }, |
| { |
| "first": "Ha", |
| "middle": [], |
| "last": "Jung-Woo", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lee Sang-Woo, Gao Tong, Yang Sohee, Yao Jaejun, and Ha Jung-Woo. 2019. Large-scale answerer in questioner's mind for visual dialog question genera- tion. In Proceedings of International Conference on Learning Representations, ICLR.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Ask no more: Deciding when to guess in referential visual dialogue", |
| "authors": [ |
| { |
| "first": "Ravi", |
| "middle": [], |
| "last": "Shekhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Baumg\u00e4rtner", |
| "suffix": "" |
| }, |
| { |
| "first": "Aashish", |
| "middle": [], |
| "last": "Venkatesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| }, |
| { |
| "first": "Raffaella", |
| "middle": [], |
| "last": "Bernardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Raquel", |
| "middle": [], |
| "last": "Fernandez", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1218--1233", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ravi Shekhar, Tim Baumg\u00e4rtner, Aashish Venkatesh, Elia Bruni, Raffaella Bernardi, and Raquel Fernan- dez. 2018. Ask no more: Deciding when to guess in referential visual dialogue. In Proceedings of the 27th International Conference on Computational Linguistics, pages 1218-1233, Santa Fe, New Mex- ico, USA. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "FOIL it! Find one mismatch between image and language caption", |
| "authors": [ |
| { |
| "first": "Ravi", |
| "middle": [], |
| "last": "Shekhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandro", |
| "middle": [], |
| "last": "Pezzelle", |
| "suffix": "" |
| }, |
| { |
| "first": "Yauhen", |
| "middle": [], |
| "last": "Klimovich", |
| "suffix": "" |
| }, |
| { |
| "first": "Aur\u00e9lie", |
| "middle": [], |
| "last": "Herbelot", |
| "suffix": "" |
| }, |
| { |
| "first": "Moin", |
| "middle": [], |
| "last": "Nabi", |
| "suffix": "" |
| }, |
| { |
| "first": "Enver", |
| "middle": [], |
| "last": "Sangineto", |
| "suffix": "" |
| }, |
| { |
| "first": "Raffaella", |
| "middle": [], |
| "last": "Bernardi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "255--265", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ravi Shekhar, Sandro Pezzelle, Yauhen Klimovich, Au- r\u00e9lie Herbelot, Moin Nabi, Enver Sangineto, and Raffaella Bernardi. 2017. FOIL it! Find one mis- match between image and language caption. In Pro- ceedings of the 55th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), pages 255-265.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Beyond task success: A closer look at jointly learning to see, ask, and Guess-What", |
| "authors": [ |
| { |
| "first": "Ravi", |
| "middle": [], |
| "last": "Shekhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Aashish", |
| "middle": [], |
| "last": "Venkatesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Baumg\u00e4rtner", |
| "suffix": "" |
| }, |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| }, |
| { |
| "first": "Barbara", |
| "middle": [], |
| "last": "Plank", |
| "suffix": "" |
| }, |
| { |
| "first": "Raffaella", |
| "middle": [], |
| "last": "Bernardi", |
| "suffix": "" |
| }, |
| { |
| "first": "Raquel", |
| "middle": [], |
| "last": "Fern\u00e1ndez", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2578--2587", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1265" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ravi Shekhar, Aashish Venkatesh, Tim Baumg\u00e4rtner, Elia Bruni, Barbara Plank, Raffaella Bernardi, and Raquel Fern\u00e1ndez. 2019. Beyond task success: A closer look at jointly learning to see, ask, and Guess- What. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 2578-2587.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "A corpus of natural language for visual reasoning", |
| "authors": [ |
| { |
| "first": "Alane", |
| "middle": [], |
| "last": "Suhr", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Yeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "217--223", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alane Suhr, Mike Lewis, James Yeh, and Yoav Artzi. 2017. A corpus of natural language for visual rea- soning. In Proceedings of the Annual Meeting of the Association for Computational Linguistics, pages 217-223, Vancouver, Canada. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "LXMERT: Learning cross-modality encoder representations from transformers", |
| "authors": [ |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "5103--5114", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hao Tan and Mohit Bansal. 2019. LXMERT: Learning cross-modality encoder representations from trans- formers. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 5103-5114.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "The devil is in the detail: A magnifying glass for the GuessWhich visual dialogue game", |
| "authors": [ |
| { |
| "first": "Alberto", |
| "middle": [], |
| "last": "Testoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Ravi", |
| "middle": [], |
| "last": "Shekhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Raquel", |
| "middle": [], |
| "last": "Fern\u00e1ndez", |
| "suffix": "" |
| }, |
| { |
| "first": "Raffaella", |
| "middle": [], |
| "last": "Bernardi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 23rd SemDial Workshop on the Semantics and Pragmatics of Dialogue (LondonLogue)", |
| "volume": "", |
| "issue": "", |
| "pages": "15--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alberto Testoni, Ravi Shekhar, Raquel Fern\u00e1ndez, and Raffaella Bernardi. 2019. The devil is in the detail: A magnifying glass for the GuessWhich visual dia- logue game. In Proceedings of the 23rd SemDial Workshop on the Semantics and Pragmatics of Dia- logue (LondonLogue), pages 15-24.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Guesswhat?! visual object discovery through multi-modal dialogue", |
| "authors": [ |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Harm De Vries", |
| "suffix": "" |
| }, |
| { |
| "first": "Sarath", |
| "middle": [], |
| "last": "Strub", |
| "suffix": "" |
| }, |
| { |
| "first": "Olivier", |
| "middle": [], |
| "last": "Chandar", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugo", |
| "middle": [], |
| "last": "Pietquin", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [ |
| "C" |
| ], |
| "last": "Larochelle", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "4466--4475", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Harm de Vries, Florian Strub, Sarath Chandar, Olivier Pietquin, Hugo Larochelle, and Aaron C. Courville. 2017. Guesswhat?! visual object discovery through multi-modal dialogue. In 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, Honolulu, HI, USA, July 21-26, 2017, pages 4466-4475. IEEE Computer Society.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Understanding natural language", |
| "authors": [ |
| { |
| "first": "Terry", |
| "middle": [], |
| "last": "Winograd", |
| "suffix": "" |
| } |
| ], |
| "year": 1972, |
| "venue": "Cognitive Psychology", |
| "volume": "3", |
| "issue": "", |
| "pages": "1--191", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Terry Winograd. 1972. Understanding natural lan- guage. Cognitive Psychology, 3:1-191.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Making history matter: History-advantage sequence training for visual dialog", |
| "authors": [ |
| { |
| "first": "Tianhao", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zheng-Jun", |
| "middle": [], |
| "last": "Zha", |
| "suffix": "" |
| }, |
| { |
| "first": "Hanwang", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the International Conference on Computer Vision (ICCV)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tianhao Yang, Zheng-Jun Zha, and Hanwang Zhang. 2019. Making history matter: History-advantage se- quence training for visual dialog. In Proceedings of the International Conference on Computer Vision (ICCV).", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Multimodal hierarchical reinforcement learning policy for task-oriented visual dialog", |
| "authors": [ |
| { |
| "first": "Jiaping", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tiancheng", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhou", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "140--150", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiaping Zhang, Tiancheng Zhao, and Zhou Yu. 2018a. Multimodal hierarchical reinforcement learning pol- icy for task-oriented visual dialog. In Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue, pages 140-150.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Goaloriented visual question generation via intermediate rewards", |
| "authors": [ |
| { |
| "first": "Junjie", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chunhua", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Anton", |
| "middle": [], |
| "last": "Van Den", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hengel", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the European Conference of Computer Vision (ECCV)", |
| "volume": "", |
| "issue": "", |
| "pages": "186--201", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Junjie Zhang, Qi Wu, Chunhua Shen, Jian Zhang, Jian- feng Lu, and Anton van den Hengel. 2018b. Goal- oriented visual question generation via intermediate rewards. In Proceedings of the European Confer- ence of Computer Vision (ECCV), pages 186-201.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Improving goaloriented visual dialog agents via advanced recurrent nets with tempered policy gradient", |
| "authors": [ |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Volker", |
| "middle": [], |
| "last": "Tresp", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rui Zhao and Volker Tresp. 2018. Improving goal- oriented visual dialog agents via advanced recurrent nets with tempered policy gradient. In Proceedings of IJCAI.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF2": { |
| "html": null, |
| "text": "Sub-type spatial questions distribution in successful games of questions annotated with only the spatial label in the test set (total: 29845).", |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF4": { |
| "html": null, |
| "text": "Accuracy of the models on the successful games by question type based on the multi label assignment. Values in parenthesis report the comparison with LSTM.", |
| "type_str": "table", |
| "num": null, |
| "content": "<table><tr><td/><td colspan=\"3\">Absolute Relational Group</td></tr><tr><td>LSTM</td><td>76.4</td><td>67.1</td><td>63.3</td></tr><tr><td>V-LSTM</td><td>75.2</td><td>63.5</td><td>62.8</td></tr><tr><td>LXMERT-S</td><td>80.5</td><td>69.6</td><td>68.4</td></tr><tr><td>LXMERT</td><td>83.4</td><td>77.2</td><td>71.6</td></tr></table>" |
| }, |
| "TABREF5": { |
| "html": null, |
| "text": "Accuracy of the sub-type of spatial questions (successful games, questions assigned only one type)", |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF7": { |
| "html": null, |
| "text": "Language to Vision attention in LXMERT: Number of regions of the image considered salient in the last layer from the CLS token -viz. regions with an attention value higher than the 0.05 threshold.", |
| "type_str": "table", |
| "num": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |