| { |
| "paper_id": "D16-1043", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:36:39.362062Z" |
| }, |
| "title": "Comparing Data Sources and Architectures for Deep Visual Representation Learning in Semantics", |
| "authors": [ |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Computer Laboratory University of Cambridge", |
| "location": {} |
| }, |
| "email": "douwe.kiela@cl.cam.ac.uk" |
| }, |
| { |
| "first": "Anita", |
| "middle": [ |
| "L" |
| ], |
| "last": "Ver\u0151", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Computer Laboratory University of Cambridge", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Computer Laboratory University of Cambridge", |
| "location": {} |
| }, |
| "email": "stephen.clark@cl.cam.ac.uk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Multi-modal distributional models learn grounded representations for improved performance in semantics. Deep visual representations, learned using convolutional neural networks, have been shown to achieve particularly high performance. In this study, we systematically compare deep visual representation learning techniques, experimenting with three well-known network architectures. In addition, we explore the various data sources that can be used for retrieving relevant images, showing that images from search engines perform as well as, or better than, those from manually crafted resources such as ImageNet. Furthermore, we explore the optimal number of images and the multilingual applicability of multi-modal semantics. We hope that these findings can serve as a guide for future research in the field.", |
| "pdf_parse": { |
| "paper_id": "D16-1043", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Multi-modal distributional models learn grounded representations for improved performance in semantics. Deep visual representations, learned using convolutional neural networks, have been shown to achieve particularly high performance. In this study, we systematically compare deep visual representation learning techniques, experimenting with three well-known network architectures. In addition, we explore the various data sources that can be used for retrieving relevant images, showing that images from search engines perform as well as, or better than, those from manually crafted resources such as ImageNet. Furthermore, we explore the optimal number of images and the multilingual applicability of multi-modal semantics. We hope that these findings can serve as a guide for future research in the field.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Multi-modal distributional semantics addresses the fact that text-based semantic models, which represent word meanings as a distribution over other words (Turney and Pantel, 2010; Clark, 2015) , suffer from the grounding problem (Harnad, 1990) . Recent work has shown that this theoretical motivation can be successfully exploited for practical gain. Indeed, multi-modal representation learning leads to improvements over language-only models in a range of tasks, including modelling semantic similarity and relatedness (Bruni et al., 2014; Silberer and Lapata, 2014; Kiela and Bottou, 2014; Lazaridou et al., 2015) , improving lexical entailment (Kiela et al., 2015a) , predicting compositionality (Roller and Schulte im Walde, 2013) , bilingual lexicon induction (Bergsma and Van Durme, 2011) , selectional preference prediction (Bergsma and Goebel, 2011) , linguistic ambiguity resolution (Berzak et al., 2015) , visual information retrieval (Bulat et al., 2016) and metaphor identification (Shutova et al., 2016) .", |
| "cite_spans": [ |
| { |
| "start": 154, |
| "end": 179, |
| "text": "(Turney and Pantel, 2010;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 180, |
| "end": 192, |
| "text": "Clark, 2015)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 229, |
| "end": 243, |
| "text": "(Harnad, 1990)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 520, |
| "end": 540, |
| "text": "(Bruni et al., 2014;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 541, |
| "end": 567, |
| "text": "Silberer and Lapata, 2014;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 568, |
| "end": 591, |
| "text": "Kiela and Bottou, 2014;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 592, |
| "end": 615, |
| "text": "Lazaridou et al., 2015)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 647, |
| "end": 668, |
| "text": "(Kiela et al., 2015a)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 699, |
| "end": 734, |
| "text": "(Roller and Schulte im Walde, 2013)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 765, |
| "end": 794, |
| "text": "(Bergsma and Van Durme, 2011)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 831, |
| "end": 857, |
| "text": "(Bergsma and Goebel, 2011)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 892, |
| "end": 913, |
| "text": "(Berzak et al., 2015)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 945, |
| "end": 965, |
| "text": "(Bulat et al., 2016)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 994, |
| "end": 1016, |
| "text": "(Shutova et al., 2016)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Most multi-modal semantic models tend to rely on raw images as the source of perceptual input. Many data sources have been tried, ranging from image search engines to photo sharing websites to manually crafted resources. Images are retrieved for a given target word if they are ranked highly, have been tagged, or are otherwise associated with the target word(s) in the data source.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Traditionally, representations for images were learned through bag-of-visual words (Sivic and Zisserman, 2003) , using SIFT-based local feature descriptors (Lowe, 2004) . Kiela and Bottou (2014) showed that transferring representations from deep convolutional neural networks (ConvNets) yield much better performance than bag-of-visual-words in multi-modal semantics. ConvNets (LeCun et al., 1998) have become very popular in recent years: they are now the dominant approach for almost all recognition and detection tasks in the computer vision community (LeCun et al., 2015) , approaching or even exceeding human performance in some cases (Weyand et al., 2016) . The work by Alex , which won the Im-ageNet Large Scale Visual Recognition Challenge (ILSVRC) (Russakovsky et al., 2015) in 2012, has played an important role in bringing convolutional networks (back) to prominence. A similar network was used by Kiela and Bottou (2014) to obtain high quality image embeddings for semantics. This work aims to provide a systematic comparison of such deep visual representation learning techniques and data sources; i.e. we aim to answer the following open questions in multi-modal semantics:", |
| "cite_spans": [ |
| { |
| "start": 83, |
| "end": 110, |
| "text": "(Sivic and Zisserman, 2003)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 156, |
| "end": 168, |
| "text": "(Lowe, 2004)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 171, |
| "end": 194, |
| "text": "Kiela and Bottou (2014)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 377, |
| "end": 397, |
| "text": "(LeCun et al., 1998)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 555, |
| "end": 575, |
| "text": "(LeCun et al., 2015)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 640, |
| "end": 661, |
| "text": "(Weyand et al., 2016)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 757, |
| "end": 783, |
| "text": "(Russakovsky et al., 2015)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 909, |
| "end": 932, |
| "text": "Kiela and Bottou (2014)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Does the improved performance over bagof-visual-words extend to different convolutional network architectures, or is it specific to Krizhevsky's AlexNet? Do others work even better?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 How important is the source of images? Is there a difference between search engines and manually annotated data sources? Does the number of images obtained for each word matter?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 Do these findings extend to different languages beyond English?", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We evaluate semantic representation quality through examining how well a system's similarity scores correlate with human similarity and relatedness judgments. We examine both the visual representations themselves as well as the multi-modal representations that fuse visual representations with linguistic input, in this case using middle fusion (i.e., concatenation). To the best of our knowledge, this work is the first to systematically compare these aspects of visual representation learning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We use the MMFeat toolkit 1 (Kiela, 2016) to obtain image representations for three different convolutional network architectures: AlexNet , GoogLeNet (Szegedy et al., 2015) and VGGNet (Simonyan and Zisserman, 2014) . Image representations are turned into an overall word-level visual representation by either taking the mean or the elementwise maximum of the relevant image representations. All three networks are trained to maximize the multinomial logistic regression objective using mini-batch gradient descent with momentum:", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 41, |
| "text": "(Kiela, 2016)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 151, |
| "end": 173, |
| "text": "(Szegedy et al., 2015)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 185, |
| "end": 215, |
| "text": "(Simonyan and Zisserman, 2014)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architectures", |
| "sec_num": "2" |
| }, |
| { |
| "text": "\u2212 D i=1 K k=1 1{y (i) = k} log exp(\u03b8 (k) x (i) ) K j=1 exp(\u03b8 (j) x (i) )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architectures", |
| "sec_num": "2" |
| }, |
| { |
| "text": "where 1{\u2022} is the indicator function, x (i) and y (i) are the input and output, respectively. D is the number of training examples and K is the number of classes. The networks are trained on the ImageNet classification task and we transfer layers from the pre-trained network. See Table 1 for an overview. In this section, we describe the network architectures and their properties.", |
| "cite_spans": [ |
| { |
| "start": 40, |
| "end": 43, |
| "text": "(i)", |
| "ref_id": null |
| }, |
| { |
| "start": 50, |
| "end": 53, |
| "text": "(i)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 281, |
| "end": 288, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Architectures", |
| "sec_num": "2" |
| }, |
| { |
| "text": "AlexNet The network by introduces the following network architecture: first, there are five convolutional layers, followed by two fullyconnected layers, where the final layer is fed into a softmax which produces a distribution over the class labels. All layers apply rectified linear units (ReLUs) (Nair and Hinton, 2010) and use dropout for regularization . This network won the ILSVRC 2012 ImageNet classification challenge. In our case, we actually use the CaffeNet reference model, which is a replication of AlexNet, with the difference that it is not trained with relighting data-augmentation, and that the order of pooling and normalization layers is switched (in CaffeNet, pooling is done before normalization, Figure 1 : Example images for dog and golden retriever from the various data sources. ImageNet has no images for dog, with images only at nodes lower in the hierarchy. ESP does not have images for the golden retriever tag.", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 321, |
| "text": "(Nair and Hinton, 2010)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 718, |
| "end": 726, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Architectures", |
| "sec_num": "2" |
| }, |
| { |
| "text": "instead of the other way around). While it uses an almost identical architecture, performance of Caf-feNet is slightly better than the original AlexNet.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architectures", |
| "sec_num": "2" |
| }, |
| { |
| "text": "GoogLeNet The ILSVRC 2014 challenge winning GoogLeNet (Szegedy et al., 2015) uses \"inception modules\" as a network-in-network method (Lin et al., 2013) for enhancing model discriminability for local patches within the receptive field. It uses much smaller receptive fields and explicitly focuses on efficiency: while it is much deeper than AlexNet, it has fewer parameters. Its architecture consists of two convolutional layers, followed by inception layers that culminate into an average pooling layer that feeds into the softmax decision (so it has no fully connected layers). Dropout is only applied on the final layer. All connections use rectifiers.", |
| "cite_spans": [ |
| { |
| "start": 54, |
| "end": 76, |
| "text": "(Szegedy et al., 2015)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 133, |
| "end": 151, |
| "text": "(Lin et al., 2013)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architectures", |
| "sec_num": "2" |
| }, |
| { |
| "text": "VGGNet The ILSVRC 2015 ImageNet classification challenge was won by VGGNet (Simonyan and Zisserman, 2014) . Like GoogLeNet, it is much deeper than AlexNet and uses smaller receptive fields. It has many more parameters than the other networks. It consists of a series of convolutional layers followed by the fully connected ones. All layers are rectified and dropout is applied to the first two fully connected layers.", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 105, |
| "text": "(Simonyan and Zisserman, 2014)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architectures", |
| "sec_num": "2" |
| }, |
| { |
| "text": "These networks were selected because they are very well-known in the computer vision community. They exhibit interesting qualitative differences in terms of their depth (i.e., the number of layers), the number of parameters, regularization methods and the use of fully connected layers. They have all been winning network architectures in the ILSVRC ImageNet classification challenges.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Architectures", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Some systematic studies of parameters for textbased distributional methods have found that the source corpus has a large impact on representational quality (Bullinaria and Levy, 2007; Kiela and Clark, 2014) . The same is likely to hold in the case of (Deng et al., 2009) and the ESP Game dataset (von Ahn and Dabbish, 2004), but most works use a single data source. In this study, one of our objectives is to asses the quality of various sources of image data. Table 2 provides an overview of the data sources, and Figure 1 shows some example images. We examine the following corpora:", |
| "cite_spans": [ |
| { |
| "start": 156, |
| "end": 183, |
| "text": "(Bullinaria and Levy, 2007;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 184, |
| "end": 206, |
| "text": "Kiela and Clark, 2014)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 251, |
| "end": 270, |
| "text": "(Deng et al., 2009)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 461, |
| "end": 469, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sources of Image Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Google Images Google's image search 2 results have been found to be comparable to hand-crafted image datasets (Fergus et al., 2005) .", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 131, |
| "text": "(Fergus et al., 2005)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sources of Image Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Bing Images An alternative image search engine is Bing Images 3 . It uses different underlying technology from Google Images, but offers the same functionality as an image search engine.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sources of Image Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Flickr Although Bergsma and Goebel (2011) 1995), by attaching images to the corresponding synset (synonym set).", |
| "cite_spans": [ |
| { |
| "start": 16, |
| "end": 41, |
| "text": "Bergsma and Goebel (2011)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sources of Image Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "ESP Game The ESP Game dataset (von Ahn and Dabbish, 2004) was constructed through a so-called \"game with a purpose\". Players were matched online and had to agree on an appropriate word label for a randomly selected image within a time limit. Once a word has been mentioned a certain number of times, that word becomes a taboo word and can no longer be used as a label. These data sources have interesting qualitative differences. Online services return images for almost any query, with much better coverage than the fixed-size ImageNet and ESP Game datasets. Search engines annotate automatically, while the others are human-annotated, either through a strict annotation procedure in the case of ImageNet, or by letting users tag images, as in the case of Flickr and ESP. Automatic systems sort images by relevance, while the others are unsorted. The relevance ranking method is not accessible, however, and so has to be treated as a black box. Search results can be language-specific, while the human annotated data sources are restricted to English. Google and Bing will return images that were ranked highly, while Flickr contains photos rather than just any kind of image. ImageNet contains high-quality images descriptive of a given synset, meaning that the tagged object is likely to be centered in the image, while the ESP Game and Flickr images may have tags describing events happening in the background also.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sources of Image Data", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Selecting images for Google, Bing and Flickr is straightforward: using their respective APIs, the desired word is given as the search query and we obtain the top N returned images (unless otherwise indicated, we use N=10). In the case of ImageNet and ESP, images are not ranked and vary greatly in number: for some words there is only a single image, while others have thousands. With ImageNet, we are faced with the additional problem that images tend to be associated only with leaf nodes in the hierarchy. For example, dog has no directly associated images, while its hyponyms (e.g. golden retriever, labrador) have many. If a word has no associated images in its subtree, we try going up one level and seeing if the parent node's tree yields any images. We subsequently randomly sample 100 images associated with the word and obtain semi-ranked re-sults by selecting the 10 images closest to the median representation as the sampled image representations. We use the same method for the ESP Game dataset. In all cases, images are resized and centercropped to ensure that they are the correct size input.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Selecting and processing images", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Representation quality in semantics is usually evaluated using intrinsic datasets of human similarity and relatedness judgments. Model performance is assessed through the Spearman \u03c1 s rank correlation between the system's similarity scores for a given pair of words, together with human judgments. Here, we evaluate on two well-known similarity and relatedness judgment datasets: MEN (Bruni et al., 2012) and SimLex-999 (Hill et al., 2015) . MEN focuses explicitly on relatedness (i.e. coffee-tea and coffee-mug get high scores, while bakery-zebra gets a low score), while SimLex-999 focuses on what it calls \"genuine\" similarity (i.e., coffee-tea gets a high score, while both coffee-mug and bakery-zebra get low scores). They are standard evaluations for evaluating representational quality in semantics.", |
| "cite_spans": [ |
| { |
| "start": 384, |
| "end": 404, |
| "text": "(Bruni et al., 2012)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 420, |
| "end": 439, |
| "text": "(Hill et al., 2015)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In each experiment, we examine performance of the visual representations compared to text-based representations, as well as performance of the multimodal representation that fuses the two. In this case, we apply mid-level fusion, concatenating the L2-normalized representations (Bruni et al., 2014) . Middle fusion is a popular technique in multi-modal semantics that has several benefits: 1) it allows for drawing from different data sources for each modality, that is, it does not require joint data; 2) concatenation is less susceptible to noise, since it preserves the information in the individual modalities; and 3) it is straightforward to apply and computationally inexpensive. Linguistic representations are 300-dimensional and are obtained by applying skipgram with negative sampling (Mikolov et al., 2013) to a recent dump of Wikipedia. The normalization step that is performed before applying fusion ensures that both modalities contribute equally to the overall multi-modal representation.", |
| "cite_spans": [ |
| { |
| "start": 278, |
| "end": 298, |
| "text": "(Bruni et al., 2014)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 794, |
| "end": 816, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "As Table 3 shows, the data sources vary in coverage: it would be unfair to compare data sources on the different subsets of the evaluation datasets that they have coverage for. That is, when comparing data sources we want to make sure we evaluate on images for the exact same word pairs. When comparing network architectures, however, we are less interested in the relative coverage between datasets and more interested in overall performance, in such a way that it can be compared to other work that was evaluated on the fully covered datasets. Hence, we report results on the maximally covered subsets per data source, which we refer to as MEN and SimLex, as well as for the overlapping common subset of word pairs that have images in each of the sources, which we refer to as MEN* and SimLex*. Table 4 shows the results on the maximally covered datasets. This means we cannot directly compare between data sources, because they have different coverage, but we can look at absolute performance and compare network architectures. The first row reports results for the text-based linguistic representations that were obtained from Wikipedia (repeated across columns for convenience). For each of the three architectures, we evaluate on SimLex (SL) and MEN, using either the mean (Mean) or elementwise maximum (Max) method for aggregating image representations into visual ones (see Section 2). For each data source, we report results for the visual representations, as well as for the multi-modal representations that fuse the visual and textual ones together. Performance across architectures is remarkably stable: we have had to report results up to three decimal points to show the difference in performance in some cases. For each of the network architectures, we see a marked improvement of multi-modal representations over uni-modal linguistic representations. In many cases, we also see visual representations outperforming linguistic ones, especially on SimLex. This is interesting, because e.g. Google and Bing have full coverage over the datasets, so their visual representations include highly abstract words, which does not appear to have an adverse impact on the method's performance. For the ESP Game dataset (on which performance is quite low) and ImageNet, we observe an increase in performance as we move to the right in the table. Interestingly, VGGNet on ImageNet scores very highly, which seems to indicate that VGGNet is somehow more \"specialized\" on ImageNet than the others. The difference between mean and max aggregation is relatively small, although the former seems to work better for Sim-Lex while the latter does slightly better for MEN. Table 5 shows the results on the common subset of the evaluation datasets, where all word pairs have images in each of the data sources. First, note the same patterns as before: multi-modal representa-tions perform better than linguistic ones. Even for the poorly performing ESP Game dataset, the VG-GNet representations perform better on both Sim-Lex and MEN (bottom right of the table). Visual representations from Google, Bing, Flickr and Im-ageNet all perform much better than ESP Game on this common covered subset. In a sense, the fullcoverage datasets were \"punished\" for their ability to return images for abstract words in the previous experiment: on this subset, which is more concrete, the search engines do much better. To a certain extent, including linguistic information is actually detrimental to performance, with multi-modal performing worse than purely visual. Again, we see the marked improvement with VGGNet for ImageNet, while Google, Bing and Flickr all do very well, regardless of the architecture.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 797, |
| "end": 804, |
| "text": "Table 4", |
| "ref_id": "TABREF7" |
| }, |
| { |
| "start": 2667, |
| "end": 2674, |
| "text": "Table 5", |
| "ref_id": "TABREF9" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "These numbers indicate the robustness of the approach: we find that multi-modal representation learning yields better performance across the board: for different network architectures, different data sources and different aggregation methods. If computational efficiency or memory usage are issues, then GoogLeNet or AlexNet are the best choices. The ESP Game dataset does not appear to work very well, and is best avoided. If we have the right coverage, then ImageNet gives good results, especially if we can use VGGNet. However, coverage is often the main issue, in which case Google and Bing yield images that are comparable or even better than images from the carefully annotated ImageNet.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Common subset comparison", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Another question is the number of images we want to use: does performance increase with more images? Is it always better to have seen 100 cats instead of only 10, or do we have enough information after having seen one or two already? There is an obvious trade-off here, since downloading and processing images takes time (and may incur financial costs). This experiment only applies to relevancesorted data sources: the image selection procedure for ImageNet and ESPGame is more about removing outliers than about finding the best possible images.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Number of images", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "As Figure 2 shows, it turns out that the optimal number of images stabilizes surprisingly quickly: around 10-20 images appears to be enough, and in some cases already too many. Performance across networks does not vary dramatically when using more images, but in the case of Flickr images on the MEN dataset, performance drops significantly as the number of images increases.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 11, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Number of images", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Although there are some indicators that visual representation learning extends to other languages, particularly in the case of bilingual lexicon learning (Bergsma and Van Durme, 2011; Kiela et al., 2015b; Vuli\u0107 et al., 2016) , this has not been shown directly on the same set of human similarity and relatedness judgments. In order to examine the multi-lingual applicability of our findings, we train linguistic representations on recent dumps of the English and Italian Wikipedia. We then search for 10 images per word on Google and Bing, while setting the language to English or Italian. We compare the results on the original SimLex, and the Italian version from Leviant and Reichart (2015) .", |
| "cite_spans": [ |
| { |
| "start": 154, |
| "end": 183, |
| "text": "(Bergsma and Van Durme, 2011;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 184, |
| "end": 204, |
| "text": "Kiela et al., 2015b;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 205, |
| "end": 224, |
| "text": "Vuli\u0107 et al., 2016)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 666, |
| "end": 693, |
| "text": "Leviant and Reichart (2015)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-and cross-lingual applicability", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "Similarly, we examine a cross-lingual scenario, where we translate Italian words into English using Google Translate. We then obtain images for the translated words and extract visual representations. These cross-lingual visual representations are sub- sequently evaluated on the Italian version of Sim-Lex. Since we know that performance across architectures is similar, we use AlexNet representations. The results can be found in Table 6 . We find the same pattern: in all cases, visual and multi-modal representations outperform linguistic ones. The Italian version of SimLex appears to be more difficult than the English version. Google performs better than Bing, especially on the Italian evaluations. For Google, the cross-lingual scenario works better, while Bing yields better results in the multilingual setting where we use the language itself instead of mapping to English. Although somewhat preliminary, these results clearly indicate that multimodal semantics can fruitfully be applied to languages other than English.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 432, |
| "end": 439, |
| "text": "Table 6", |
| "ref_id": "TABREF11" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Multi-and cross-lingual applicability", |
| "sec_num": "5.4" |
| }, |
| { |
| "text": "The objective of this study has been to systematically compare network architectures and data sources for multi-modal systems. In particular, we focused on the capabilities of deep visual representations in capturing semantics, as measured by correlation with human similarity and relatedness judgments. Our findings can be summarized as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 We examined AlexNet, GoogLeNet and VGGNet, all three recent winners of the ILSVRC ImageNet classification challenge (Russakovsky et al., 2015) , and found that they perform very similarly. If efficiency or memory are issues, AlexNet or GoogLeNet are the most suitable architectures. For overall best performance, AlexNet and VGGNet are the best choices.", |
| "cite_spans": [ |
| { |
| "start": 118, |
| "end": 144, |
| "text": "(Russakovsky et al., 2015)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 The choice of data sources appeared to have a bigger impact: Google, Bing, Flickr and Im-ageNet were much better than the ESP Game dataset. Google, Flickr and Bing have the advantage that they have potentially unlimited coverage. Google and Bing are particularly suited to full-coverage experiments, even when these include abstract words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 We found that the number of images has an impact on performance, but that it stabilizes at around 10-20 images, indicating that it is usually not necessary to obtain more than 10 images per word. For Flickr, obtaining more images is detrimental to performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "\u2022 Lastly, we established that these findings extend to other languages beyond English, obtaining the same findings on an Italian version of SimLex using the Italian Wikipedia. We examined both the multi-lingual setting where we obtain search results using the Italian language and a cross-lingual setting where we mapped Italian words to English and retrieved images for those.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "This work answers several open questions in multi-modal semantics and we hope that it will serve as a guide for future research in the field. It is important to note that the multi-modal results only apply to the mid-level fusion method of concatenating normalized vectors: although these findings are indicative of performance for other fusion methods, different architectures or data sources may be more suitable for different fusion methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In future work, downstream tasks should be addressed: it is good that multi-modal semantics improves performance on intrinsic evaluations, but it is important to show its practical benefits in more applied tasks as well. Understanding what it is that makes these representations perform so well is another important and yet unanswered question. We hope that this work may be used as a reference in determining some of the choices that can be made when developing multi-modal models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and future work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "https://github.com/douwekiela/mmfeat", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://images.google.com/ 3 https://www.bing.com/images 4 https://www.flickr.com", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "Anita Ver\u0151 is supported by the Nuance Foundation Grant: Learning Type-Driven Distributed Representations of Language. Stephen Clark is supported by the ERC Starting Grant: DisCoTex (306920).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Using visual information to predict lexical preference", |
| "authors": [ |
| { |
| "first": "Shane", |
| "middle": [], |
| "last": "Bergsma", |
| "suffix": "" |
| }, |
| { |
| "first": "Randy", |
| "middle": [], |
| "last": "Goebel", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of RANLP", |
| "volume": "", |
| "issue": "", |
| "pages": "399--405", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shane Bergsma and Randy Goebel. 2011. Using visual information to predict lexical preference. In Proceed- ings of RANLP, pages 399-405.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Learning bilingual lexicons using the visual similarity of labeled web images", |
| "authors": [ |
| { |
| "first": "Shane", |
| "middle": [], |
| "last": "Bergsma", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "1764--1769", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shane Bergsma and Benjamin Van Durme. 2011. Learn- ing bilingual lexicons using the visual similarity of la- beled web images. In IJCAI, pages 1764-1769.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Do you see what i mean? visual resolution of linguistic ambiguities", |
| "authors": [ |
| { |
| "first": "Yevgeni", |
| "middle": [], |
| "last": "Berzak", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrei", |
| "middle": [], |
| "last": "Barbu", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Harari", |
| "suffix": "" |
| }, |
| { |
| "first": "Boris", |
| "middle": [], |
| "last": "Katz", |
| "suffix": "" |
| }, |
| { |
| "first": "Shimon", |
| "middle": [], |
| "last": "Ullman", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yevgeni Berzak, Andrei Barbu, Daniel Harari, Boris Katz, and Shimon Ullman. 2015. Do you see what i mean? visual resolution of linguistic ambiguities. In Proceedings of EMNLP.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Distributional semantics in technicolor", |
| "authors": [ |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| }, |
| { |
| "first": "Gemma", |
| "middle": [], |
| "last": "Boleda", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| }, |
| { |
| "first": "Nam-Khanh", |
| "middle": [], |
| "last": "Tran", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "136--145", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elia Bruni, Gemma Boleda, Marco Baroni, and Nam- Khanh Tran. 2012. Distributional semantics in tech- nicolor. In ACL, pages 136-145.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Multimodal distributional semantics", |
| "authors": [ |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| }, |
| { |
| "first": "Nam-Khanh", |
| "middle": [], |
| "last": "Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Journal of Artifical Intelligence Research", |
| "volume": "49", |
| "issue": "", |
| "pages": "1--47", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elia Bruni, Nam-Khanh Tran, and Marco Baroni. 2014. Multimodal distributional semantics. Journal of Artif- ical Intelligence Research, 49:1-47.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Vision and Feature Norms: Improving automatic feature norm learning through cross-modal maps", |
| "authors": [ |
| { |
| "first": "Luana", |
| "middle": [], |
| "last": "Bulat", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of NAACL-HLT 2016", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luana Bulat, Douwe Kiela, and Stephen Clark. 2016. Vision and Feature Norms: Improving automatic fea- ture norm learning through cross-modal maps. In Pro- ceedings of NAACL-HLT 2016, San Diego, CA.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Extracting Semantic Representations from Word Co-occurrence Statistics: A computational study", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [ |
| "A" |
| ], |
| "last": "Bullinaria", |
| "suffix": "" |
| }, |
| { |
| "first": "Joseph", |
| "middle": [ |
| "P" |
| ], |
| "last": "Levy", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Behavior Research Methods", |
| "volume": "39", |
| "issue": "", |
| "pages": "510--526", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John A. Bullinaria and Joseph P. Levy. 2007. Extracting Semantic Representations from Word Co-occurrence Statistics: A computational study. Behavior Research Methods, 39:510-526.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Vector Space Models of Lexical Meaning", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Handbook of Contemporary Semantic Theory", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Clark. 2015. Vector Space Models of Lexical Meaning. In Shalom Lappin and Chris Fox, editors, Handbook of Contemporary Semantic Theory, chap- ter 16. Wiley-Blackwell, Oxford.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "ImageNet: A large-scale hierarchical image database", |
| "authors": [ |
| { |
| "first": "Jia", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Li-Jia", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Fei-Fei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of CVPR", |
| "volume": "", |
| "issue": "", |
| "pages": "248--255", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Fei-Fei Li. 2009. ImageNet: A large-scale hi- erarchical image database. In Proceedings of CVPR, pages 248-255.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Learning object categories from Google's image search", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Fergus", |
| "suffix": "" |
| }, |
| { |
| "first": "Fei-Fei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Pietro", |
| "middle": [], |
| "last": "Perona", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Zisserman", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of ICCV", |
| "volume": "", |
| "issue": "", |
| "pages": "1816--1823", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Fergus, Fei-Fei Li, Pietro Perona, and Andrew Zisserman. 2005. Learning object categories from Google's image search. In Proceedings of ICCV, pages 1816-1823.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "The symbol grounding problem", |
| "authors": [ |
| { |
| "first": "Stevan", |
| "middle": [], |
| "last": "Harnad", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Physica D", |
| "volume": "42", |
| "issue": "", |
| "pages": "335--346", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stevan Harnad. 1990. The symbol grounding problem. Physica D, 42:335-346.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Simlex-999: Evaluating semantic models with (genuine) similarity estimation", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Hill, Roi Reichart, and Anna Korhonen. 2015. Simlex-999: Evaluating semantic models with (gen- uine) similarity estimation. Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Improving neural networks by preventing coadaptation of feature detectors", |
| "authors": [ |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Geoffrey E Hinton", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Srivastava", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan R", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1207.0580" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Geoffrey E Hinton, Nitish Srivastava, Alex Krizhevsky, Ilya Sutskever, and Ruslan R Salakhutdinov. 2012. Improving neural networks by preventing co- adaptation of feature detectors. arXiv preprint arXiv:1207.0580.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Learning image embeddings using convolutional neural networks for improved multi-modal semantics", |
| "authors": [ |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "36--45", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douwe Kiela and L\u00e9on Bottou. 2014. Learning image embeddings using convolutional neural networks for improved multi-modal semantics. In Proceedings of EMNLP, pages 36-45.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "A Systematic Study of Semantic Vector Space Model Parameters", |
| "authors": [ |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EACL 2014, Workshop on Continuous Vector Space Models and their Compositionality (CVSC)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douwe Kiela and Stephen Clark. 2014. A Systematic Study of Semantic Vector Space Model Parameters. In Proceedings of EACL 2014, Workshop on Contin- uous Vector Space Models and their Compositionality (CVSC).", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Exploiting image generality for lexical entailment detection", |
| "authors": [ |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Rimell", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "119--124", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douwe Kiela, Laura Rimell, Ivan Vuli\u0107, and Stephen Clark. 2015a. Exploiting image generality for lexical entailment detection. In Proceedings of ACL, pages 119-124, Beijing, China, July. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Visual bilingual lexicon induction with transferred convnet features", |
| "authors": [ |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "148--158", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douwe Kiela, Ivan Vuli\u0107, and Stephen Clark. 2015b. Vi- sual bilingual lexicon induction with transferred con- vnet features. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Process- ing, pages 148-158, Lisbon, Portugal, September. As- sociation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Mmfeat: A toolkit for extracting multi-modal features", |
| "authors": [ |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douwe Kiela. 2016. Mmfeat: A toolkit for extracting multi-modal features. In Proceedings of ACL 2016.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "ImageNet classification with deep convolutional neural networks", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Krizhevsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "1106--1114", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E. Hinton. 2012. ImageNet classification with deep convolutional neural networks. In Proceedings of NIPS, pages 1106- 1114.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Combining language and vision with a multimodal skipgram model", |
| "authors": [ |
| { |
| "first": "Angeliki", |
| "middle": [], |
| "last": "Lazaridou", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Nghia The", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Pham", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Angeliki Lazaridou, Nghia The Pham, and Marco Baroni. 2015. Combining language and vision with a multi- modal skipgram model. In Proceedings of NAACL.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Gradient-based learning applied to document recognition", |
| "authors": [ |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Lecun", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Haffner", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "Proceedings of the IEEE", |
| "volume": "86", |
| "issue": "11", |
| "pages": "2278--2324", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yann LeCun, L\u00e9on Bottou, Yoshua Bengio, and Patrick Haffner. 1998. Gradient-based learning applied to document recognition. Proceedings of the IEEE, 86(11):2278-2324.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Deep learning", |
| "authors": [ |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Lecun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Nature", |
| "volume": "521", |
| "issue": "7553", |
| "pages": "436--444", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yann LeCun, Yoshua Bengio, and Geoffrey Hinton. 2015. Deep learning. Nature, 521(7553):436-444.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Judgment language matters: Multilingual vector space models for judgment language aware lexical semantics", |
| "authors": [ |
| { |
| "first": "Ira", |
| "middle": [], |
| "last": "Leviant", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1508.00106" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ira Leviant and Roi Reichart. 2015. Judgment language matters: Multilingual vector space models for judg- ment language aware lexical semantics. arXiv preprint arXiv:1508.00106.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Network in network", |
| "authors": [ |
| { |
| "first": "Min", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Qiang", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuicheng", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Min Lin, Qiang Chen, and Shuicheng Yan. 2013. Net- work in network. CoRR, abs/1312.4400.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Distinctive image features from scale-invariant keypoints", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [ |
| "G" |
| ], |
| "last": "Lowe", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "International Journal of Computer Vision", |
| "volume": "60", |
| "issue": "2", |
| "pages": "91--110", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David G. Lowe. 2004. Distinctive image features from scale-invariant keypoints. International Journal of Computer Vision, 60(2):91-110.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word representa- tions in vector space. In Proceedings of ICLR, Scotts- dale, Arizona, USA.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "WordNet: A lexical database for English", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Communications of the ACM", |
| "volume": "38", |
| "issue": "11", |
| "pages": "39--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George A. Miller. 1995. WordNet: A lexical database for English. Communications of the ACM, 38(11):39-41.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Rectified linear units improve restricted boltzmann machines", |
| "authors": [ |
| { |
| "first": "Vinod", |
| "middle": [], |
| "last": "Nair", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "807--814", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vinod Nair and Geoffrey E Hinton. 2010. Rectified lin- ear units improve restricted boltzmann machines. In Proceedings of ICML, pages 807-814.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "A multimodal LDA model integrating textual, cognitive and visual modalities", |
| "authors": [ |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Roller", |
| "suffix": "" |
| }, |
| { |
| "first": "Sabine", |
| "middle": [], |
| "last": "Schulte Im Walde", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1146--1157", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stephen Roller and Sabine Schulte im Walde. 2013. A multimodal LDA model integrating textual, cogni- tive and visual modalities. In Proceedings of EMNLP, pages 1146-1157.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "ImageNet Large Scale Visual Recognition Challenge", |
| "authors": [ |
| { |
| "first": "Olga", |
| "middle": [], |
| "last": "Russakovsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Jia", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Krause", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjeev", |
| "middle": [], |
| "last": "Satheesh", |
| "suffix": "" |
| }, |
| { |
| "first": "Sean", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiheng", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrej", |
| "middle": [], |
| "last": "Karpathy", |
| "suffix": "" |
| }, |
| { |
| "first": "Aditya", |
| "middle": [], |
| "last": "Khosla", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Bernstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [ |
| "C" |
| ], |
| "last": "Berg", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Fei-Fei", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "International Journal of Computer Vision (IJCV)", |
| "volume": "115", |
| "issue": "3", |
| "pages": "211--252", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexan- der C. Berg, and Li Fei-Fei. 2015. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision (IJCV), 115(3):211-252.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Black holes and white rabbits: Metaphor identification with visual features", |
| "authors": [ |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Maillard", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ekaterina Shutova, Douwe Kiela, and Jean Maillard. 2016. Black holes and white rabbits: Metaphor iden- tification with visual features. In Proceedings of NAACL-HTL 2016, San Diego. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Learning grounded meaning representations with autoencoders", |
| "authors": [ |
| { |
| "first": "Carina", |
| "middle": [], |
| "last": "Silberer", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "721--732", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Carina Silberer and Mirella Lapata. 2014. Learning grounded meaning representations with autoencoders. In Proceedings of ACL, pages 721-732.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Very deep convolutional networks for large-scale image recognition", |
| "authors": [ |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Simonyan", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Zisserman", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1409.1556" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karen Simonyan and Andrew Zisserman. 2014. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Video google: A text retrieval approach to object matching in videos", |
| "authors": [ |
| { |
| "first": "Josef", |
| "middle": [], |
| "last": "Sivic", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Zisserman", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of ICCV", |
| "volume": "", |
| "issue": "", |
| "pages": "1470--1477", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Josef Sivic and Andrew Zisserman. 2003. Video google: A text retrieval approach to object matching in videos. In Proceedings of ICCV, pages 1470-1477.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Going deeper with convolutions", |
| "authors": [ |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Szegedy", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yangqing", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "Pierre", |
| "middle": [], |
| "last": "Sermanet", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Reed", |
| "suffix": "" |
| }, |
| { |
| "first": "Dragomir", |
| "middle": [], |
| "last": "Anguelov", |
| "suffix": "" |
| }, |
| { |
| "first": "Dumitru", |
| "middle": [], |
| "last": "Erhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Vanhoucke", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Rabinovich", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "1--9", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Ser- manet, Scott Reed, Dragomir Anguelov, Dumitru Er- han, Vincent Vanhoucke, and Andrew Rabinovich. 2015. Going deeper with convolutions. In Proceed- ings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1-9.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "From Frequency to Meaning: vector space models of semantics", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pantel", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Journal of Artifical Intelligence Research", |
| "volume": "37", |
| "issue": "1", |
| "pages": "141--188", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter D. Turney and Patrick Pantel. 2010. From Fre- quency to Meaning: vector space models of semantics. Journal of Artifical Intelligence Research, 37(1):141- 188, January.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Labeling images with a computer game", |
| "authors": [ |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Luis Von Ahn", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dabbish", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "CHI", |
| "volume": "", |
| "issue": "", |
| "pages": "319--326", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luis von Ahn and Laura Dabbish. 2004. Labeling im- ages with a computer game. In CHI, pages 319-326.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Multi-modal representations for improved bilingual lexicon learning", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Francine", |
| "middle": [], |
| "last": "Moens", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Vuli\u0107, Douwe Kiela, Marie-Francine Moens, and Stephen Clark. 2016. Multi-modal representations for improved bilingual lexicon learning. In Proceedings of ACL, Berlin, Germany. Association for Computa- tional Linguistics.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Planet -photo geolocation with convolutional neural networks", |
| "authors": [ |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "Weyand", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Kostrikov", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Philbin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tobias Weyand, Ilya Kostrikov, and James Philbin. 2016. Planet -photo geolocation with convolutional neural networks. CoRR, abs/1602.05314.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "uris": null, |
| "type_str": "figure", |
| "text": "The effect of the number of images on representation quality.", |
| "num": null |
| }, |
| "TABREF1": { |
| "text": "Network architectures. Layer counts only include layers with parameters.", |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "text": "Sources of image data.", |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "text": "have found that Google Images works better in one experiment, the photo sharing service Flickr 4 is an interesting data source because its images are tagged by human annotators.", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td/><td colspan=\"2\">MEN (3000) SimLex (999)</td></tr><tr><td>Google</td><td>3000</td><td>999</td></tr><tr><td>Bing</td><td>3000</td><td>999</td></tr><tr><td>Flickr</td><td>3000</td><td>999</td></tr><tr><td>ImageNet</td><td>1326</td><td>373</td></tr><tr><td>ESPGame</td><td>2927</td><td>833</td></tr><tr><td>Common subset</td><td>1310</td><td>360</td></tr><tr><td>ImageNet ImageNet (Deng et al., 2009) is a large ontology of images developed for a variety of com-</td><td/><td/></tr><tr><td>puter vision applications. It serves as a benchmark-</td><td/><td/></tr><tr><td>ing standard for various image processing and com-</td><td/><td/></tr><tr><td>puter vision tasks. ImageNet is constructed along</td><td/><td/></tr><tr><td>the same hierarchical structure as WordNet (Miller,</td><td/><td/></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "text": "Coverage on MEN and SimLex for our data sources.", |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF7": { |
| "text": "", |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF9": { |
| "text": "", |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "type_str": "table" |
| }, |
| "TABREF11": { |
| "text": "", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td>: Performance on English and Italian Sim-</td></tr><tr><td>Lex, either in the multi-lingual setting (M) or the</td></tr><tr><td>cross-lingual settting (C) where we first map to En-</td></tr><tr><td>glish.</td></tr></table>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |