| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T01:13:29.335213Z" |
| }, |
| "title": "Toward General Scene Graph: Integration of Visual Semantic Knowledge with Entity Synset Alignment", |
| "authors": [ |
| { |
| "first": "Suk", |
| "middle": [], |
| "last": "Woo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Seoul National University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Seoul National University", |
| "location": {} |
| }, |
| "email": "wschoi@bi.snu.ac.kr" |
| }, |
| { |
| "first": "Kyoung-Woon", |
| "middle": [], |
| "last": "On", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Seoul National University", |
| "location": {} |
| }, |
| "email": "kwon@bi.snu.ac.kr" |
| }, |
| { |
| "first": "Yu-Jung", |
| "middle": [], |
| "last": "Heo", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Seoul National University", |
| "location": {} |
| }, |
| "email": "yjheo@bi.snu.ac.kr" |
| }, |
| { |
| "first": "Byoung-Tak", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Seoul National University AI Institute", |
| "location": {} |
| }, |
| "email": "btzhang@bi.snu.ac.kr" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Scene graph is a graph representation that explicitly represents high-level semantic knowledge of an image such as objects, attributes of objects and relationships between objects. Various tasks have been proposed for the scene graph, but the problem is that they have a limited vocabulary and biased information due to their own hypothesis. Therefore, results of each task are not generalizable and difficult to be applied to other downstream tasks. In this paper, we propose Entity Synset Alignment(ESA), which is a method to create a general scene graph by aligning various semantic knowledge efficiently to solve this bias problem. The ESA uses a large-scale lexical database, WordNet and Intersection of Union (IoU) to align the object labels in multiple scene graphs/semantic knowledge. In experiment, the integrated scene graph is applied to the image-caption retrieval task as a downstream task. We confirm that integrating multiple scene graphs helps to get better representations of images.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Scene graph is a graph representation that explicitly represents high-level semantic knowledge of an image such as objects, attributes of objects and relationships between objects. Various tasks have been proposed for the scene graph, but the problem is that they have a limited vocabulary and biased information due to their own hypothesis. Therefore, results of each task are not generalizable and difficult to be applied to other downstream tasks. In this paper, we propose Entity Synset Alignment(ESA), which is a method to create a general scene graph by aligning various semantic knowledge efficiently to solve this bias problem. The ESA uses a large-scale lexical database, WordNet and Intersection of Union (IoU) to align the object labels in multiple scene graphs/semantic knowledge. In experiment, the integrated scene graph is applied to the image-caption retrieval task as a downstream task. We confirm that integrating multiple scene graphs helps to get better representations of images.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Beyond detecting and recognizing individual objects, research for understanding visual scenes is moving toward extracting semantic knowledge to create scene graph from natural images. Starting with (Krishna et al., 2017) , various studies have been proposed to generate this semantic knowledge from images (Zellers et al., 2018; Xu et al., 2017; Liang et al., 2019; Anderson et al., 2018) . However, each study extracts only highly biased information from an image due to the limited vocabulary depending on their own hypothesis and the statistical bias of the dataset. For example, in (Anderson et al., 2018) , the author conducted a study on extracting information of both object and attribute for each entity using 1,600 object and 400 attribute class labels. In addition, (Zellers et al., 2018; Xu et al., 2017 ) generate a relationship between objects in a form of triplet (head entitypredicate -tail entity) in an image by using 150 object and 50 predicate class labels. In (Liang et al., 2019) , the author constructed a Visually-Relevant Relationships(VrR-VG) based on (Krishna et al., 2017) to mine more valuable relationships with 1600 objects and 117 predicate class labels. As such, each task defines and uses its own vocabulary, but the problem is that the vocabulary is limited. As shown in Figure 1 ,If some of objects in an image do not belong to the dataset-specific vocabulary, objects as well as relations are omitted frequently even though they are in an image. In addition, there are cases where the same object is defined with different vocabulary in a common image (e.g. man, person).", |
| "cite_spans": [ |
| { |
| "start": 198, |
| "end": 220, |
| "text": "(Krishna et al., 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 306, |
| "end": 328, |
| "text": "(Zellers et al., 2018;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 329, |
| "end": 345, |
| "text": "Xu et al., 2017;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 346, |
| "end": 365, |
| "text": "Liang et al., 2019;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 366, |
| "end": 388, |
| "text": "Anderson et al., 2018)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 586, |
| "end": 609, |
| "text": "(Anderson et al., 2018)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 776, |
| "end": 798, |
| "text": "(Zellers et al., 2018;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 799, |
| "end": 814, |
| "text": "Xu et al., 2017", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 980, |
| "end": 1000, |
| "text": "(Liang et al., 2019)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1077, |
| "end": 1099, |
| "text": "(Krishna et al., 2017)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1305, |
| "end": 1313, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we propose Entity Synset Alignment (ESA) to perform scene graph integration. With a large-scale lexical database WordNet and IoU, the ESA aligns the entity labels in scene graphs generated from each dataset. The contributions of the method proposed in this paper are as follows: 1) Scene graphs can be generated from raw image inputs, 2) integrating multiple scene graphs inferred from each dataset into one via ESA, 3) the qualitative results show that an integrated scene graph can extract richer semantic information in an image, 4) quantitative results show the significance of integrated scene graph by applying integrated scene graph to image-caption retrieval task.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "BottomUp-VG. Bottom-Up VG is a bottom-up attention model that extracts information of both object and attribute for each entity with 1,600 object and 400 attribute class labels from Visual Genome(VG). VG200. VG200 introduced by (Xu et al., 2017 ) is a filtered version of the original VG scene graph dataset. It contains 150 object and 50 predicate class labels in 108,077 images, and consists of an average of 11.5 distinct objects and 6.2 predicates per image. VrR-VG. Visually-Relevant Relationships (VrR-VG) introduced by (Liang et al., 2019) is constructed to highlight visually-relevant relationships using visual discriminator to learn the notion of visually-relevant. WordNet. WordNet, a large lexical database of English, is an ontology that summarizes a relationship between words and has been integrated into the Natural Language ToolKit. Nouns, verbs, adjectives and adverbs are grouped into sets of cognitive synonyms (synsets), each representing intrinsic concept.", |
| "cite_spans": [ |
| { |
| "start": 228, |
| "end": 244, |
| "text": "(Xu et al., 2017", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 526, |
| "end": 546, |
| "text": "(Liang et al., 2019)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As shown in Figure 2 , we employ bottom-up attention (Anderson et al., 2018) model to generate only nodes containing information of both object and attribute, and CompTransR model to generate scene graphs from raw images. Entity Synset Alignment(ESA) integrates scene graphs generated from each dataset. We introduce a simple model, CompTransR, for scene graph generation in Section 3.1 and a scene graph integration technique, Entity Synset Alignment(ESA) in Section 3.2.", |
| "cite_spans": [ |
| { |
| "start": 53, |
| "end": 76, |
| "text": "(Anderson et al., 2018)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 20, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Method", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Compositional Translation Embedding combines the well-known Knowledge Graph embedding algorithms (i.e., TransR (Lin et al., 2015) ) to learn the semantic relationships between two entities in a scene graph. Here, we apply transitive constraints to predict the semantic predicate labels in multiple symbolic subspaces by learning compositional representations of the relationships. As an entity fea-ture, we extract visual, positional, and categorical features from a detected bounding box in a given image, and concatenate them into one. Then, entity features are transformed to head(h) and tail(t) features through single feed-forward neural network. The feature vectors of head and tail are projected into multiple latent relational subspaces. We aim to disentangle the semantic space of the sub-relation labels. The predicate representation r s \u2248 t s \u2212h s is defined on each latent relational space s. All r s on the subspaces are summed out to predict predicate labels between two entities. Entity Synset Alignment is an algorithm that integrates scene graphs generated from each dataset by using label alignment and Intersection of Union (IoU). In label alignment process, we use a synset, a set of synonym(lemma, hypernym, and hyponym) that shares a common meaning in WordNet, to align two entity labels. The method using synset compares whether an entity label in a scene graph is the same entity label in other scene graph, and aligns. If the entity label is same vocabulary or in the synset of entity label for other scene graph, then IoU calculation is implemented to check whether it indicates same entity. The detailed procedure is shown in Algorithm 1. ", |
| "cite_spans": [ |
| { |
| "start": 111, |
| "end": 129, |
| "text": "(Lin et al., 2015)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Compositional Translational Embedding", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "In Table 1 , we measure the average and max number of object, relation, and attribute with various combinations of scene graph datasets. Default VG200 has 12.53 average number of object and 62 max number of object, default BottomUp-VG has 26.35 average number of object and 55 for max, and default VrR-VG has 36.77 average number of object and 167 max number of object. The most key section of Table 1 is the average number of object and relation in integrating three datasets increased. This result implies that integrating three scene graphs into one scene graph can get more richer scene graph.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| }, |
| { |
| "start": 394, |
| "end": 401, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments 4.1 Scene Graph Statistics", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To verify the usefulness of our algorithm, we suggest an image-caption retrieval task (Kiros et al., 2014) as an application of scene graphs. The imagecaption retrieval task needs visual-semantic embeddings, which is obtained by mapping the image features and caption features into joint embedding space. A general approach for this task is to obtain image features and caption features with pretrained model (such as VGGNet (Simonyan and Zisserman, 2014) for images and S-BERT (Reimers and Gurevych, 2019) for captions), then to learn mapping both to joint embedding space for maximizing similarities. In our case, we substitute image features from the pre-trained CNN model to scene-graphs and learn the representations of scenegraphs with simple 2-layer Graph Convolution Networks (Kipf and Welling, 2016). Following (Faghri et al., 2017) , we use the Max of Hinge loss for train-ing:", |
| "cite_spans": [ |
| { |
| "start": 86, |
| "end": 106, |
| "text": "(Kiros et al., 2014)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 425, |
| "end": 455, |
| "text": "(Simonyan and Zisserman, 2014)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 478, |
| "end": 506, |
| "text": "(Reimers and Gurevych, 2019)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 820, |
| "end": 841, |
| "text": "(Faghri et al., 2017)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Image-Caption Retrieval Task", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "l M H (i, c) = max c [\u03b1 + s(i, c ) \u2212 (i, c)] + +max i [\u03b1 + s(i , c) \u2212 (i, c)] +", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Image-Caption Retrieval Task", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where i and c are image features and caption features in joint embedding space, s(x, y) is innerproduct similarity function for x and y, [x] \u2261 max(x, 0) and \u03b1 serves as a margin parameter.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Image-Caption Retrieval Task", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Figure 3 shows each generated scene graph for an image and an integrated scene graph generated. In each scene graph, person is presented as person in BottomUp-VG, but woman in VG200 and VrR-VG. Furthermore, phone and tree(s) nodes are in BottomUp-VG and VrR-VG, but not in VG200. On the other hand, BottomUp-VG and VrR-VG have grass node but not in VG200. In integrated scene graph, each node has an attribute of each object such as color and some entities such as person or tree are aligned via ESA. For the setting of qualitative results, we limit the number of relation(predicate) between objects to top 20 in generated each scene graph.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Qualitative Results", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "To obtain both captions and scene-graphs for images, we select subset of images, called VG-COCO, belongs to both MS COCO dataset (Lin et al., 2014 ) (for captions) and Visual Genome (VG) dataset (Krishna et al., 2017) (for scene graphs). We manually split the VG-COCO dataset with 24,763 train, 1,000 validation and 1,470 test images. To evaluate the performance of image-caption retrieval task, we introduce Recall@K(R@K), i.e., the fraction of queries for which the correct item is retrieved in the closest K points to the query in the embedding space. We adopt R@1, R@5, R@10 metrics, as used in (Faghri et al., 2017) . First, to understand the effectiveness of scene graph based approach, we compare graph based method (GCN based) to CNN based model (Resnet-152). ResNet-152 trains the whole CNN networks, starting from pretrained model parameters. Here, we note that graph based method shows superior performance than the CNN based model, even though the graph based model exploits the simple two-layer graph convolution operations.", |
| "cite_spans": [ |
| { |
| "start": 129, |
| "end": 146, |
| "text": "(Lin et al., 2014", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 599, |
| "end": 620, |
| "text": "(Faghri et al., 2017)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Quantitative Results", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "Second, we evaluate our proposed method with various combinations of VG200, VrR-VG and BottomUp-VG. The results show that integrated scene graph generally works better than default scene graph. The overall quantitative results for image-caption retrieval are presented in Table 2 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 272, |
| "end": 279, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Quantitative Results", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "In this paper, we present a simple and efficient method to integrate multiple visual semantic knowledge into general scene graph. With a large-scale lexical database WordNet and IoU, the ESA aligns the entity labels in scene graphs generated from each dataset. The integrated scene graph has richer information and is less biased. To evaluate our proposal, we conduct the image-caption retrieval task as a down-stream task and show better performance than each scene graph. For future work, we plan to integrate more diverse visual semantic knowledge such as Human-object interaction (Gkioxari et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 584, |
| "end": 607, |
| "text": "(Gkioxari et al., 2018)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was partly supported by the Institute for Information and Communications Technology Promotion (2015-0-00310-SW.StarLab, 2017-0-01772-VTT, 2018-0-00622-RMI, 2019-0-01367-BabyMind) and Korea Institute for Advancement Technology (P0006720-GENKO) grant funded by the Korea government.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Bottom-up and top-down attention for", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Anderson", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaodong", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Buehler", |
| "suffix": "" |
| }, |
| { |
| "first": "Damien", |
| "middle": [], |
| "last": "Teney", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Gould", |
| "suffix": "" |
| }, |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Anderson, Xiaodong He, Chris Buehler, Damien Teney, Mark Johnson, Stephen Gould, and Lei Zhang. 2018. Bottom-up and top-down attention for", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Each scene graph (c),(d),(e) generated from inference models are combined into an integrated scene graph (b) for an image (a). image captioning and visual question answering", |
| "authors": [], |
| "year": null, |
| "venue": "Figure 3: Qualitative results for our Entity Synset Alignment(ESA) method with Top 20 relations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Figure 3: Qualitative results for our Entity Synset Alignment(ESA) method with Top 20 relations. Each scene graph (c),(d),(e) generated from inference models are combined into an integrated scene graph (b) for an image (a). image captioning and visual question answering. In CVPR.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Vse++: Improving visualsemantic embeddings with hard negatives", |
| "authors": [ |
| { |
| "first": "Fartash", |
| "middle": [], |
| "last": "Faghri", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [ |
| "Ryan" |
| ], |
| "last": "Fleet", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanja", |
| "middle": [], |
| "last": "Kiros", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fidler", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1707.05612" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fartash Faghri, David J Fleet, Jamie Ryan Kiros, and Sanja Fidler. 2017. Vse++: Improving visual- semantic embeddings with hard negatives. arXiv preprint arXiv:1707.05612.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Detecting and recognizing human-object interactions", |
| "authors": [ |
| { |
| "first": "Georgia", |
| "middle": [], |
| "last": "Gkioxari", |
| "suffix": "" |
| }, |
| { |
| "first": "Ross", |
| "middle": [], |
| "last": "Girshick", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Doll\u00e1r", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaiming", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "8359--8367", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Georgia Gkioxari, Ross Girshick, Piotr Doll\u00e1r, and Kaiming He. 2018. Detecting and recognizing human-object interactions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8359-8367.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Semisupervised classification with graph convolutional networks", |
| "authors": [ |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Kipf", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Welling", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1609.02907" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas N Kipf and Max Welling. 2016. Semi- supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Unifying visual-semantic embeddings with multimodal neural language models", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Kiros", |
| "suffix": "" |
| }, |
| { |
| "first": "Ruslan", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [ |
| "S" |
| ], |
| "last": "Zemel", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1411.2539" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Kiros, Ruslan Salakhutdinov, and Richard S Zemel. 2014. Unifying visual-semantic embeddings with multimodal neural language models. arXiv preprint arXiv:1411.2539.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Visual genome: Connecting language and vision using crowdsourced dense image annotations", |
| "authors": [ |
| { |
| "first": "Ranjay", |
| "middle": [], |
| "last": "Krishna", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuke", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Oliver", |
| "middle": [], |
| "last": "Groth", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenji", |
| "middle": [], |
| "last": "Hata", |
| "suffix": "" |
| }, |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Kravitz", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephanie", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yannis", |
| "middle": [], |
| "last": "Kalantidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Li-Jia", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "A" |
| ], |
| "last": "Shamma", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "International Journal of Computer Vision", |
| "volume": "123", |
| "issue": "1", |
| "pages": "32--73", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin John- son, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. 2017. Visual genome: Connecting language and vision using crowdsourced dense image anno- tations. International Journal of Computer Vision, 123(1):32-73.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Vrr-vg: Refocusing visually-relevant relationships", |
| "authors": [ |
| { |
| "first": "Yuanzhi", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yalong", |
| "middle": [], |
| "last": "Bai", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xueming", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Mei", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the IEEE International Conference on Computer Vision", |
| "volume": "", |
| "issue": "", |
| "pages": "10403--10412", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuanzhi Liang, Yalong Bai, Wei Zhang, Xueming Qian, Li Zhu, and Tao Mei. 2019. Vrr-vg: Refocusing visually-relevant relationships. In Proceedings of the IEEE International Conference on Computer Vi- sion, pages 10403-10412.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Microsoft coco: Common objects in context", |
| "authors": [ |
| { |
| "first": "Tsung-Yi", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Maire", |
| "suffix": "" |
| }, |
| { |
| "first": "Serge", |
| "middle": [], |
| "last": "Belongie", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Hays", |
| "suffix": "" |
| }, |
| { |
| "first": "Pietro", |
| "middle": [], |
| "last": "Perona", |
| "suffix": "" |
| }, |
| { |
| "first": "Deva", |
| "middle": [], |
| "last": "Ramanan", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Doll\u00e1r", |
| "suffix": "" |
| }, |
| { |
| "first": "C Lawrence", |
| "middle": [], |
| "last": "Zitnick", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "European conference on computer vision", |
| "volume": "", |
| "issue": "", |
| "pages": "740--755", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C Lawrence Zitnick. 2014. Microsoft coco: Common objects in context. In European confer- ence on computer vision, pages 740-755. Springer.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Learning entity and relation embeddings for knowledge graph completion", |
| "authors": [ |
| { |
| "first": "Yankai", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Twenty-ninth AAAI conference on artificial intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yankai Lin, Zhiyuan Liu, Maosong Sun, Yang Liu, and Xuan Zhu. 2015. Learning entity and relation embeddings for knowledge graph completion. In Twenty-ninth AAAI conference on artificial intelli- gence.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Sentencebert: Sentence embeddings using siamese bertnetworks", |
| "authors": [ |
| { |
| "first": "Nils", |
| "middle": [], |
| "last": "Reimers", |
| "suffix": "" |
| }, |
| { |
| "first": "Iryna", |
| "middle": [], |
| "last": "Gurevych", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1908.10084" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- bert: Sentence embeddings using siamese bert- networks. arXiv preprint arXiv:1908.10084.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Very deep convolutional networks for large-scale image recognition", |
| "authors": [ |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Simonyan", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Zisserman", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1409.1556" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Karen Simonyan and Andrew Zisserman. 2014. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Scene graph generation by iterative message passing", |
| "authors": [ |
| { |
| "first": "Danfei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuke", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Choy", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fei-Fei", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "5410--5419", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danfei Xu, Yuke Zhu, Christopher B Choy, and Li Fei- Fei. 2017. Scene graph generation by iterative mes- sage passing. In Proceedings of the IEEE Confer- ence on Computer Vision and Pattern Recognition, pages 5410-5419.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Neural motifs: Scene graph parsing with global context", |
| "authors": [ |
| { |
| "first": "Rowan", |
| "middle": [], |
| "last": "Zellers", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Yatskar", |
| "suffix": "" |
| }, |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "5831--5840", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rowan Zellers, Mark Yatskar, Sam Thomson, and Yejin Choi. 2018. Neural motifs: Scene graph pars- ing with global context. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recog- nition, pages 5831-5840.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "text": "An example of scene graph for a common image from Visual Genome 200 (VG200) and Visually-Relevant Relationship (VrR-VG) dataset.", |
| "type_str": "figure" |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "num": null, |
| "text": "An overview of framework which integrates visual semantic knowledge with Entity Synset Alignment(ESA). (a) A raw image goes into inference models as an input. (b) Inference models(Bottom-up attention and CompTransR) generate (c) scene graphs from each dataset(VG, VG200, VrR-VG). (e) Integrated scene graph is built as an output via (d) Entity Synset Alignment method.", |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td>Method</td><td colspan=\"6\">Number of object Number of relation Number of attributes Avg. Max Avg. Max Avg. Max</td></tr><tr><td>VG200</td><td>12.53</td><td>62</td><td>50.0</td><td>50</td><td>0.0</td><td>0</td></tr><tr><td>VrR-VG</td><td>36.77</td><td>167</td><td>50.0</td><td>50</td><td>0.0</td><td>0</td></tr><tr><td>BU-VG</td><td>26.35</td><td>55</td><td>0.0</td><td>0</td><td>26.35</td><td>55</td></tr><tr><td>VG200 \u2227 VrR-VG</td><td>37.00</td><td>167</td><td>100</td><td>100.0</td><td>0.0</td><td>0</td></tr><tr><td>VG200 \u2227 BU-VG</td><td>27.21</td><td>66</td><td>44.39</td><td>50</td><td>26.35</td><td>55</td></tr><tr><td>VrR-VG \u2227 BU-VG</td><td>42.04</td><td>141</td><td>29.57</td><td>50</td><td>26.35</td><td>55</td></tr><tr><td>VG200 \u2227 VrR-VG \u2227 BU-VG</td><td>41.95</td><td>127</td><td>79.67</td><td>100</td><td>26.35</td><td>55</td></tr></table>", |
| "num": null, |
| "text": "The average and max number of object, relation and attribute with various combinations of scene graph datasets." |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "html": null, |
| "content": "<table><tr><td/><td>Method</td><td colspan=\"6\">Caption Retrieval R@1 R@5 R@10 R@1 Image Retrieval R@5 R@10</td></tr><tr><td>CNN based</td><td>ResNet-152</td><td>26.9</td><td>65.1</td><td>79.4</td><td>24.2</td><td>36.4</td><td>39.9</td></tr><tr><td/><td>VG200</td><td>22.2</td><td>57.6</td><td>73.2</td><td>19.7</td><td>34.6</td><td>39.5</td></tr><tr><td/><td>VrR-VG</td><td>28.1</td><td>66.2</td><td>80.4</td><td>23.2</td><td>37.2</td><td>40.9</td></tr><tr><td/><td>BU-VG</td><td>27.0</td><td>65.4</td><td>80.6</td><td>23.1</td><td>37.0</td><td>40.7</td></tr><tr><td>GCN based</td><td>VG200 \u2227 VrR-VG</td><td>29.3</td><td>67.6</td><td>81.9</td><td>23.4</td><td>37.4</td><td>41.0</td></tr><tr><td/><td>VG200 \u2227 BU-VG</td><td>29.4</td><td>68.7</td><td>82.8</td><td>24.1</td><td>37.5</td><td>41.1</td></tr><tr><td/><td>VrR-VG \u2227 BU-VG</td><td>27.9</td><td>70.5</td><td>83.2</td><td>23.7</td><td>37.7</td><td>41.4</td></tr><tr><td/><td>VG200 \u2227 VrR-VG \u2227 BU-VG</td><td>27.2</td><td>70.0</td><td>82.4</td><td>24.7</td><td>37.7</td><td>41.0</td></tr></table>", |
| "num": null, |
| "text": "Quantitative results for our method on image-to-caption retrieval(caption retrieval) and caption-to-image retrieval(image retrieval) task. BU-VG is an abbreviation of BottomUp-VG." |
| } |
| } |
| } |
| } |