diff --git "a/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/layout.json" "b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/layout.json" new file mode 100644--- /dev/null +++ "b/2023/Weakly-supervised HOI Detection via Prior-guided Bi-level Representation Learning/layout.json" @@ -0,0 +1,12731 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 507, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 507, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 507, + 116 + ], + "type": "text", + "content": "WEAKLY-SUPERVISED HOI DETECTION VIA PRIOR-GUIDED BI-LEVEL REPRESENTATION LEARNING" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "spans": [ + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "text", + "content": "Bo Wan " + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "inline_equation", + "content": "^{1,*}" + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "text", + "content": ", Yongfei Liu " + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "text", + "content": ", Desen Zhou " + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "text", + "content": ", Tinne Tuytelaars " + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "text", + "content": ", Xuming He " + }, + { + "bbox": [ + 110, + 133, + 432, + 146 + ], + "type": "inline_equation", + "content": "^{2,3}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "spans": [ + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "text", + "content": " KU Leuven, Leuven, Belgium; " + }, + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "text", + "content": " ShanghaiTech University, Shanghai, China \n" + }, + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 112, + 147, + 443, + 193 + ], + "type": "text", + "content": " Shanghai Engineering Research Center of Intelligent Vision and Imaging {bwan, tinne.tuytelaars}@esat.kuleuven.be {liuyf3,zhouds,hexm}@shanghaitech.edu.cn" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 276, + 206, + 335, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 206, + 335, + 217 + ], + "spans": [ + { + "bbox": [ + 276, + 206, + 335, + 217 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 224, + 471, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 224, + 471, + 378 + ], + "spans": [ + { + "bbox": [ + 140, + 224, + 471, + 378 + ], + "type": "text", + "content": "Human object interaction (HOI) detection plays a crucial role in human-centric scene understanding and serves as a fundamental building-block for many vision tasks. One generalizable and scalable strategy for HOI detection is to use weak supervision, learning from image-level annotations only. This is inherently challenging due to ambiguous human-object associations, large search space of detecting HOIs and highly noisy training signal. A promising strategy to address those challenges is to exploit knowledge from large-scale pretrained models (e.g., CLIP), but a direct knowledge distillation strategy (Liao et al., 2022) does not perform well on the weakly-supervised setting. In contrast, we develop a CLIP-guided HOI representation capable of incorporating the prior knowledge at both image level and HOI instance level, and adopt a self-taught mechanism to prune incorrect human-object associations. Experimental results on HICO-DET and V-COCO show that our method outperforms the previous works by a sizable margin, showing the efficacy of our HOI representation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 394, + 206, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 394, + 206, + 406 + ], + "spans": [ + { + "bbox": [ + 105, + 394, + 206, + 406 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 414, + 506, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 414, + 506, + 470 + ], + "spans": [ + { + "bbox": [ + 104, + 414, + 506, + 470 + ], + "type": "text", + "content": "Human object interaction detection aims to simultaneously localize the human-object regions in an image and to classify their interactions, which serves as a fundamental building-block in a wide range of tasks in human-centric artificial intelligence, such as human activity recognition (Heilbron et al., 2015; Tina et al., 2021), human motion tracking (Wafae et al., 2019; Nishimura et al., 2021) and anomalous behavior detection (Liu et al., 2018; Pang et al., 2020)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "type": "text", + "content": "Usually, HOI detection adopts a supervised learning paradigm (Gupta & Malik, 2015; Chao et al., 2018; Wan et al., 2019; Gao et al., 2020; Zhang et al., 2021c). This requires detailed annotations (i.e. human and object bounding boxes and their interaction types) in the training stage. However, such HOI annotations are expensive to collect and prone to labeling errors. In contrast, it is much easier to acquire image-level descriptions of target scenes. Consequently, a more scalable strategy for HOI detection is to learn from weak annotations at the image level, known as weakly-supervised HOI detection (Zhang et al., 2017). Learning under such weak supervision is particularly challenging mainly due to the lack of accurate visual-semantic associations, large search space of detecting HOIs and highly noisy training signal from only image level supervision." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 579, + 506, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 506, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 506, + 712 + ], + "type": "text", + "content": "Most existing works (Zhang et al., 2017; Baldassarre et al., 2020; Kumaraswamy et al., 2021) attempt to tackle the weakly-supervised HOI detection in a Multiple Instance Learning (MIL) framework (Ilse et al., 2018). They first utilize an object detector to generate human-object proposals and then train an interaction classifier with image-level labels as supervision. Despite promising results, these methods suffer from several weaknesses when coping with diverse and fine-grained HOIs. Firstly, they usually rely on visual representations derived from the external object detector, which mainly focus on the semantic concepts of the objects in the scene and hence are insufficient for capturing the concept of fine-grained interactions. Secondly, as the image-level supervision tends to ignore the imbalance in HOI classes, their representation learning is more susceptible to the dataset bias and dominated by frequent interaction classes. Finally, these methods learn the HOI concepts from a candidate set generated by pairing up all the human and object proposals, which is highly noisy and often leads to erroneous human-object associations for many interaction classes." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 436, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 436, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 436, + 732 + ], + "type": "text", + "content": "*Equal Contribution. Code is available at https://github.com/bobwan1995/Weakly-HOI." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": "To address the aforementioned limitations, we introduce a new weakly-supervised HOI detection strategy. It aims to incorporate the prior knowledge from pretrained foundation models to facilitate the HOI learning. In particular, we propose to integrate CLIP (Radford et al., 2021b), a large-scale vision-language pretrained model. This allows us to exploit the strong generalization capability of the CLIP representation for learning a better HOI representation under weak supervision. Compared to the representations learned by the object detector, the CLIP representations are inherently less object-centric, hence more likely to incorporate also aspects about the human-object interaction, as evidenced by Appendix A. Although a few works have successfully exploited CLIP for supervised HOI detection in the past, experimentally we find they do not perform well in the more challenging weakly-supervised setting (c.f. Appendix.B). We hypothesize this is because they only transfer knowledge at image level, and fail without supervision at the level of human-object pairs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 506, + 298 + ], + "type": "text", + "content": "To this end, we develop a CLIP-guided HOI representation capable of incorporating the prior knowledge of HOIs at two different levels. First, at the image level, we utilize the visual and linguistic embeddings of the CLIP model to build a global HOI knowledge bank and generate image-level HOI predictions. In addition, for each human-object pair, we enrich the region-based HOI features by the HOI representations in the knowledge bank via a novel attention mechanism. Such a bi-level framework enables us to exploit the image-level supervision more effectively through the shared HOI knowledge bank, and to enhance the interaction feature learning by introducing the visual and text representations of the CLIP model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 302, + 506, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 302, + 506, + 402 + ], + "spans": [ + { + "bbox": [ + 104, + 302, + 506, + 402 + ], + "type": "text", + "content": "We instantiate our bi-level knowledge integration strategy as a modular deep neural network with a global and local branch. Given the human-object proposals generated by an off-the-shelf object detector, the global branch starts with a backbone network to compute image feature maps, which are used by a subsequent HOI recognition network to predict the image-wise HOI scores. The local branch builds a knowledge transfer network to extract the human-object features and augment them with the CLIP-guided knowledge bank, followed by a pairwise classification network to compute their relatedness and interaction scores1. The relatedness scores are used to prune incorrect human-object associations, which mitigates the issue of noisy proposals. Finally, the outputs of the two branches are fused to generate the final HOI scores." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 407, + 506, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 506, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 506, + 485 + ], + "type": "text", + "content": "To train our HOI detection network with image-level annotations, we first initialize the backbone network and the HOI knowledge bank from the CLIP encoders, and then train the entire model in an end-to-end manner. In particular, we devise a novel multi-task weak supervision loss consisting of three terms: 1) an image-level HOI classification loss for the global branch; 2) an MIL-like loss for the interaction scores predicted by the local branch, which is defined on the aggregate of all the human-object pair predictions; 3) a self-taught classification loss for the relatedness of each human-object pair, which uses the interaction scores from the model itself as supervision." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 490, + 507, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 507, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 507, + 578 + ], + "type": "text", + "content": "We validate our methods on two public benchmarks: HICO-DET (Chao et al., 2018) and V-COCO (Gupta & Malik, 2015). The empirical results and ablative studies show our method consistently achieves state-of-the-art performance on all benchmarks. In summary, our contributions are three-fold: (i) We exploit the CLIP knowledge to build a prior-enriched HOI representation, which is more robust for detecting fine-grained interaction types and under imbalanced data distributions. (ii) We develop a self-taught relatedness classification loss to alleviate the problem of mis-association between human-object pairs. (iii) Our approach achieves state-of-the-art performance on the weakly-supervised HOI detection task on both benchmarks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 586, + 216, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 216, + 598 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 216, + 598 + ], + "type": "text", + "content": "2 RELATED WORKS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 602, + 507, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 602, + 507, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 602, + 507, + 703 + ], + "type": "text", + "content": "HOI detection: Most works on supervised HOI detection can be categorized in two groups: two-stage and one-stage HOI detection. Two-stage methods first generate a set of human-object proposals with an external object detector, then classify their interactions. They mainly focus on exploring additional human pose information (Wan et al., 2019; Li et al., 2020a; Gupta et al., 2019), pairwise relatedness (Li et al., 2019a; Zhou et al., 2020) or modeling relations between object and human (Gao et al., 2020; Zhang et al., 2021c; Ulutan et al., 2020; Zhou & Chi, 2019), to enhance the HOI representations. One-stage methods predict human & object locations and their interaction types simultaneously in an end-to-end manner, which are currently dominated by transformer-based architectures (Carion et al., 2020; Kim et al., 2022; Dong et al., 2022; Zhang et al., 2021a;b)." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "inline_equation", + "content": "{}^{1}" + }, + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "text", + "content": " Relatedness indicates whether a human-object pair has a relation, and interaction scores are multi-label scores on the interaction space." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 125, + 81, + 483, + 258 + ], + "blocks": [ + { + "bbox": [ + 125, + 81, + 483, + 258 + ], + "lines": [ + { + "bbox": [ + 125, + 81, + 483, + 258 + ], + "spans": [ + { + "bbox": [ + 125, + 81, + 483, + 258 + ], + "type": "image", + "image_path": "e651bacab4fc1cd655ae1937f2758bab90cffc62a4962ef321be81b8fb18d4d7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 262, + 504, + 283 + ], + "lines": [ + { + "bbox": [ + 104, + 262, + 504, + 283 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 504, + 283 + ], + "type": "text", + "content": "Figure 1: Model Overview: There are four modules in our network: a backbone Network, an HOI recognition network, a knowledge transfer network and a pairwise classification network." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 295, + 504, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 504, + 329 + ], + "type": "text", + "content": "Supervised methods show superior performance, but require labor-intensive HOI annotations that are infeasible to obtain in many scenarios. Thus, in this work we focus on HOI detection under weak supervision." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 335, + 506, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 335, + 506, + 490 + ], + "spans": [ + { + "bbox": [ + 104, + 335, + 506, + 490 + ], + "type": "text", + "content": "Weakly-supervised HOI detection: Weakly-supervised HOI detection aims to learn instance-level HOIs with only image-level annotations. (Prest et al., 2011) learns a set of binary action classifiers based on detected human-object pairs, where human proposal is obtained from a part-based human detector and object is derived from the relative position with respect to the human. PPR-FCN (Zhang et al., 2017) employs a parallel FCN to perform pair selection and classification. Explainable-HOI (Baldassarre et al., 2020) adopts graph nets to capture relations for better image-level HOI recognition, and uses backward explanation for instance-level HOI detection. MX-HOI (Kumaraswamy et al., 2021) proposes a momentum-independent learning strategy to utilize strong & weak labels simultaneously. AlignFormer (Kilickaya & Smeulders, 2021) proposes an align layer in transformer framework, which utilizes geometric & visual priors to generate pseudo alignments for training. Those methods focus on learning HOIs with advanced network structures or better pseudo alignments. However, they still suffer from noisy human-object associations and ambiguous interaction types. To address those challenges, we exploit prior knowledge from CLIP to build a discriminative HOI representations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 496, + 506, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 496, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 104, + 496, + 506, + 608 + ], + "type": "text", + "content": "Knowledge exploitation of pretrained V&L models: Recently, CLIP (Radford et al., 2021a) model has demonstrated strong generalization to various downstream tasks (Ghiasi et al., 2021; Du et al., 2022; Gu et al., 2021). Some works also explore CLIP knowledge in supervised HOI detection, e.g., CATN (Dong et al., 2022) initializes the object query with category-aware semantic information from CLIP text encoder, and GEN-VLTK (Liao et al., 2022) employs image feature distillation and classifier initialization with HOI prompts. However, they only exploit CLIP knowledge at a coarse level and require detailed annotations of human-object pairs. It is non-trivial to extend such strategies to the weak supervision paradigm due to highly noisy training signals. In our work, we build a deep connection between CLIP and HOI representation by incorporating the prior knowledge of HOIs at both image and HOI instance levels." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 618, + 173, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 618, + 173, + 629 + ], + "spans": [ + { + "bbox": [ + 105, + 618, + 173, + 629 + ], + "type": "text", + "content": "3 METHOD" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 637, + 312, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 637, + 312, + 647 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 312, + 647 + ], + "type": "text", + "content": "3.1 PROBLEM SETUP AND METHOD OVERVIEW" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": "Problem setup Given an input image " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": ", the task of weakly-supervised HOI detection aims to localize and recognize the human-object interactions, while only the corresponding image-level HOI categories are available for training. Formally, we aim to learn a HOI detector " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": ", which takes an image " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": " as input and generates a set of tuples " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{O} = \\{(\\mathbf{x}_h,\\mathbf{x}_o,c_o,a_{h,o},R_{h,o}^a)\\}" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{O} = \\mathcal{M}(I)" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": ". Here each tuple indicates a HOI instance, in which " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_h,\\mathbf{x}_o\\in \\mathbb{R}^4" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": " represent human and object bounding boxes, " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "c_{o}\\in \\{1,\\dots,C\\}" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": " is the object category, " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "a_{h,o}\\in \\{1,\\dots,A\\}" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": " denotes the interaction class associated with " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_h" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_o" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "inline_equation", + "content": "R_{h,o}^{a}\\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 652, + 506, + 734 + ], + "type": "text", + "content": " is the HOI score. For the weakly-supervised setting," + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": "each training image is annotated with a set of HOI categories " + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "inline_equation", + "content": "\\mathcal{R} = \\{r^{*}\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": " at the image level only, where " + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "inline_equation", + "content": "r^{*} \\in \\{1, \\dots, N\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": " is an index to a combination of ground-truth object category " + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "inline_equation", + "content": "c^{*}" + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": " and interaction category " + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "inline_equation", + "content": "a^{*}" + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 82, + 506, + 117 + ], + "type": "text", + "content": " denotes the number of all possible HOI combinations defined on the dataset." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 118, + 506, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 118, + 506, + 164 + ], + "spans": [ + { + "bbox": [ + 104, + 118, + 506, + 164 + ], + "type": "text", + "content": "Method Overview As we lack supervision for the HOI locations, we adopt a typical hypothesize-and-recognize strategy (Zhang et al., 2017; Baldassarre et al., 2020; Kumaraswamy et al., 2021) for HOI detection: first we generate a set of human and object proposals with an off-the-shelf object detector (Ren et al., 2015) and then predict the interaction class for all human-object combinations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 168, + 506, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 168, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 168, + 506, + 236 + ], + "type": "text", + "content": "Unlike other methods, we do not re-use the feature maps of the object or human detector - we only keep the bounding boxes. Instead, we learn a new representation optimized for the HOI task. This is challenging under the weak setting as the model learning is noisy, but feasible by leveraging the rich semantic knowledge from a pretrained large-scale multimodal model, like CLIP. However, the naive knowledge integration strategies for supervised setting fail when directly applied in the weak setting, as evidenced by our experiments in Appendix.B" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 240, + 506, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 240, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 104, + 240, + 506, + 308 + ], + "type": "text", + "content": "Our framework adopts two philosophies to address the challenges in the weakly-supervised HOI task: the first is to integrate the prior knowledge into discriminative representation learning, and the second is to suppress noise in learning. For the first philosophy, we utilize the prior knowledge from CLIP to guide the representation learning in both global image-level and fine-grained human-object pairs, which is instantiated by a bi-level knowledge integration strategy. For the second philosophy, we adopt an effective self-taught learning mechanism to suppress the irrelevant pairs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "spans": [ + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "text", + "content": "We instantiate the bi-level knowledge integration strategy with a two-branch deep network. Our detection pipeline starts with a set of human proposals with detection scores " + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "inline_equation", + "content": "\\{(\\mathbf{x}_h, s_h)\\}" + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "text", + "content": ", and object proposals with their categories and detection scores " + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "inline_equation", + "content": "\\{(\\mathbf{x}_o, c_o, s_o)\\}" + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "text", + "content": ". Then, the global branch performs image-level HOI recognition by utilizing a CLIP-initialized HOI knowledge bank as a classifier. This allows us to exploit both visual and text encoders from CLIP to generate better HOI representations. In parallel, for each human-object pair " + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}_h, \\mathbf{x}_o)" + }, + { + "bbox": [ + 104, + 312, + 506, + 390 + ], + "type": "text", + "content": ", the local branch explicitly augments the pairwise HOI features with the HOI knowledge bank to then identify their relatedness and interaction classes." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 395, + 506, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 395, + 506, + 440 + ], + "spans": [ + { + "bbox": [ + 104, + 395, + 506, + 440 + ], + "type": "text", + "content": "To train our model, we use a multi-task loss, which incorporates a HOI recognition loss defined on image-wise HOIs for the visual encoder and knowledge bank finetuning, and a self-taught relatedness classification for suppressing the background human-object associations, on top of the standard MIL-based loss. We first present model details in Sec.3.2, followed by the training strategy in Sec.3.3." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 443, + 201, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 443, + 201, + 454 + ], + "spans": [ + { + "bbox": [ + 105, + 443, + 201, + 454 + ], + "type": "text", + "content": "3.2 MODEL DESIGN" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 458, + 506, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 458, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 458, + 506, + 581 + ], + "type": "text", + "content": "Now we introduce our bi-level knowledge integration strategy, where the aim is to exploit CLIP textual embeddings of HOI labels as a HOI knowledge bank for the HOI representation learning, and to transfer such knowledge both at image level as well as at the level of human-object pairs for interaction predictions. Specifically, as shown in Fig. 1, our network consists of a global branch and a local branch. The global branch includes a backbone network (Sec.3.2.1) that extracts image features, and a HOI recognition network (Sec.3.2.2) that uses a HOI knowledge bank based on CLIP to predict image-level HOI scores. For each human-object proposal generated by an off-the-shelf object detector, the local branch employs a knowledge transfer network (Sec.3.2.3) to compute its feature representation with enhancement from the HOI knowledge bank, and a pairwise classification network (Sec.3.2.4) to compute their relatedness and interaction scores. Finally, we generate the final HOI detection scores by combining global HOI scores with local predictions (Sec. 3.2.5)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": "HOI Knowledge Bank Generation CLIP builds a powerful vision-language model by pretraining on large-scale image-text pairs. It consists of a visual encoder " + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_V" + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": " and textual encoder " + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_T" + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": ", mapping both visual and textual inputs to a shared latent space. Here, we exploit CLIP to generate a HOI knowledge bank. We take a similar prompt strategy as in CLIP, adopting a common template 'a person {verb} a/an {object}' to convert HOI labels into text prompts (e.g., converting 'drive car' to 'a person driving a car'). Then we input the sentences into the CLIP textual encoder " + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_T" + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": " to initialize the HOI knowledge bank " + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T \\in \\mathbb{R}^{N \\cdot D}" + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": " denoting the feature dimension. One can think of " + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 585, + 506, + 674 + ], + "type": "text", + "content": " as a set of 'prototypes' in feature space, one for each HOI in the dataset." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 682, + 318, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 682, + 318, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 682, + 318, + 693 + ], + "type": "text", + "content": "3.2.1 GLOBAL BRANCH: BACKBONE NETWORK" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": "To incorporate CLIP for feature extraction, we initialize the backbone network (e.g., a ResNet-101 (He et al., 2016)) with CLIP's visual encoder " + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_V" + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": " to generate a feature map " + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": " for the input image " + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": ". We further compute a global feature vector " + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "v_{g} \\in \\mathbb{R}^{D}" + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": " with self-attention operation (Radford et al., 2021b)." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 82, + 310, + 185 + ], + "blocks": [ + { + "bbox": [ + 136, + 82, + 310, + 185 + ], + "lines": [ + { + "bbox": [ + 136, + 82, + 310, + 185 + ], + "spans": [ + { + "bbox": [ + 136, + 82, + 310, + 185 + ], + "type": "image", + "image_path": "f3349fb8251160198ddf86acc80f35bdf49add88f16e617e0264756fb346c105.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 170, + 186, + 283, + 196 + ], + "lines": [ + { + "bbox": [ + 170, + 186, + 283, + 196 + ], + "spans": [ + { + "bbox": [ + 170, + 186, + 283, + 196 + ], + "type": "text", + "content": "(a) knowledge transfer network" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 312, + 82, + 468, + 185 + ], + "blocks": [ + { + "bbox": [ + 312, + 82, + 468, + 185 + ], + "lines": [ + { + "bbox": [ + 312, + 82, + 468, + 185 + ], + "spans": [ + { + "bbox": [ + 312, + 82, + 468, + 185 + ], + "type": "image", + "image_path": "43ccc2a33ded6bf714fc9c749ae5107721836e3e4420f610ab7a8b7c1b4c5370.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 318, + 186, + 462, + 196 + ], + "lines": [ + { + "bbox": [ + 318, + 186, + 462, + 196 + ], + "spans": [ + { + "bbox": [ + 318, + 186, + 462, + 196 + ], + "type": "text", + "content": "(b) pseudo relatedness label generation" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 201, + 504, + 232 + ], + "lines": [ + { + "bbox": [ + 104, + 201, + 504, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 201, + 504, + 232 + ], + "type": "text", + "content": "Figure 2: The knowledge transfer network explicitly transfers the discriminative relation-level semantic knowledge derived from CLIP to the pairwise HOI representations. Pseudo relatedness label generation uses the pairwise interaction scores to generate the pseudo association labels for self-taught relatedness classification" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 245, + 352, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 245, + 352, + 255 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 352, + 255 + ], + "type": "text", + "content": "3.2.2 GLOBAL BRANCH: HOI RECOGNITION NETWORK" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": "We perform an image-wise HOI recognition task with the HOI knowledge bank " + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": ". We obtain global HOI scores " + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "inline_equation", + "content": "s_g \\in \\mathbb{R}^N" + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": " by computing the inner product between the image feature " + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "inline_equation", + "content": "v_g" + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": " and the knowledge bank " + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "inline_equation", + "content": "s_g = \\mathcal{W}_T \\times v_g" + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 104, + 262, + 504, + 318 + ], + "type": "text", + "content": " is matrix multiplication. This has the effect of adapting the visual encoder and knowledge bank parameters to the HOI recognition task, fully taking advantage of the knowledge from CLIP." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 328, + 367, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 328, + 367, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 328, + 367, + 338 + ], + "type": "text", + "content": "3.2.3 LOCAL BRANCH: KNOWLEDGE TRANSFER NETWORK" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 344, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 344, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 504, + 411 + ], + "type": "text", + "content": "Given the CLIP-initialized visual encoder, a standard HOI representation can be formed by concatenating the human and object appearance features along with their spatial encoding. However, even after the finetuning as described above, such a representation still mainly focuses on object-level semantic cues rather than relation-level concepts. In this module, we explicitly exploit the HOI knowledge bank " + }, + { + "bbox": [ + 104, + 344, + 504, + 411 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 344, + 504, + 411 + ], + "type": "text", + "content": " to learn a local relation-specific HOI representation. To achieve this, we propose an attention-based architecture as shown in Fig.2(a)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "spans": [ + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": "Specifically, for each human proposal " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_h" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": " and object proposal " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_o" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": ", we use RoI-Align (He et al., 2017) to crop the feature maps from " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": " followed by a self-attention operation to compute their appearance features " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "v_h, v_o \\in \\mathbb{R}^D" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": ". Then we compute a spatial feature " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "v_{sp}" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": " by encoding the relative positions of their bounding boxes " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}_h, \\mathbf{x}_o)^2" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": ". The holistic HOI representation " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "v_p \\in \\mathbb{R}^D" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": " is an embedding of the human and object appearance features and their spatial feature, i.e., " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "v_p = \\mathcal{F}_E([v_h; v_o; v_{sp}])" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "[\\cdot]" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": " is the concatenation operation and " + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_E" + }, + { + "bbox": [ + 104, + 416, + 504, + 484 + ], + "type": "text", + "content": " is a multi-layer perceptron (MLP)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": "To enhance relation-level concepts, we further compute its union region " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_u\\in \\mathbb{R}^4" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " (see Fig. 2a) and extract the corresponding appearance feature " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "v_{u}\\in \\mathbb{R}^{D}" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " via RoI-align over the feature map " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": ". The union region is important as it encodes relational context cues, but it potentially also contains a large amount of background that is noisy for model learning. We thus devise an attention module that is similar in design to the HOI recognition network, but uses the union feature " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "v_{u}" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " as query to extract a meta-embedding " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "v_{meta}\\in \\mathbb{R}^{D}" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " from the HOI knowledge bank " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": ". The final HOI representation " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\hat{v}_p\\in \\mathbb{R}^D" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " is built by fusing the holistic representation " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "v_{p}" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "v_{meta}" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": " with a MLP " + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_K" + }, + { + "bbox": [ + 104, + 489, + 504, + 567 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 149, + 574, + 504, + 587 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 574, + 504, + 587 + ], + "spans": [ + { + "bbox": [ + 149, + 574, + 504, + 587 + ], + "type": "interline_equation", + "content": "\\alpha = \\operatorname {S o f t m a x} \\left(\\mathcal {W} _ {T} \\times v _ {u}\\right); \\quad v _ {\\text {m e t a}} = \\alpha^ {\\intercal} \\times \\mathcal {W} _ {T}; \\quad \\hat {v} _ {p} = \\mathcal {F} _ {K} \\left(v _ {p} + v _ {\\text {m e t a}}\\right). \\tag {1}", + "image_path": "386c883a42547a86b1a4c6887dbfad85b678c63b27f4db5d235c9739282b40c4.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "text", + "content": "Here " + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "inline_equation", + "content": "\\alpha \\in \\mathbb{R}^N" + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "text", + "content": " is the normalized attention weight and " + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "text", + "content": " is the transpose operation. " + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "inline_equation", + "content": "v_{meta}" + }, + { + "bbox": [ + 104, + 594, + 504, + 616 + ], + "type": "text", + "content": " encodes a discriminative representation from CLIP and facilitates feature sharing between HOI classes." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 627, + 378, + 638 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 627, + 378, + 638 + ], + "spans": [ + { + "bbox": [ + 105, + 627, + 378, + 638 + ], + "type": "text", + "content": "3.2.4 LOCAL BRANCH: PAIRWISE CLASSIFICATION NETWORK" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "content": "Given the relation-aware HOI representation " + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "inline_equation", + "content": "\\hat{v}_p" + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "content": ", our final module performs a coarse-level classification on human-object association and a fine-level classification for interaction recognition. Specifically, we use two MLPs " + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_P" + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_B" + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "content": " to predict the interaction scores " + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "inline_equation", + "content": "s_p \\in \\mathbb{R}^A" + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "content": " and the relatedness score " + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "inline_equation", + "content": "s_b \\in \\mathbb{R}" + }, + { + "bbox": [ + 104, + 647, + 506, + 692 + ], + "type": "text", + "content": " for each human-object pair:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 242, + 698, + 504, + 712 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 698, + 504, + 712 + ], + "spans": [ + { + "bbox": [ + 242, + 698, + 504, + 712 + ], + "type": "interline_equation", + "content": "s _ {p} = \\mathcal {F} _ {P} (\\hat {v} _ {p}); \\quad s _ {b} = \\mathcal {F} _ {B} (\\hat {v} _ {p}) \\tag {2}", + "image_path": "6fe16a547ea607e052aab41a132d99bad327245c2aaf962bdc800fb9fcee83ba.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 234, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 234, + 731 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 234, + 731 + ], + "type": "text", + "content": "2For details c.f. the appendix C" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "content": "To train the model under weak supervision (see Sec. 3.3), we further aggregate the pairwise interaction scores into image-level interaction scores. Assume we have " + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "content": " pairs of human-object proposals for a given image, and denote the interaction scores for the " + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "content": "-th pair as " + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "inline_equation", + "content": "s_p^m" + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "content": ". We first concatenate all the interaction scores to compose a bag " + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "inline_equation", + "content": "S = [s_p^1; \\ldots; s_p^M] \\in \\mathbb{R}^{M \\cdot A}" + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "content": ", then we maximize over all pairs to obtain the image-wise interaction scores: " + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "inline_equation", + "content": "\\tilde{s}_p = \\max_m S" + }, + { + "bbox": [ + 104, + 82, + 506, + 146 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 148, + 226, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 148, + 226, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 148, + 226, + 159 + ], + "type": "text", + "content": "3.2.5 MODEL INFERENCE" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "spans": [ + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "text", + "content": "During model inference, we do not use the local interaction scores " + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "inline_equation", + "content": "s_p" + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "text", + "content": " directly. Instead, we normalize " + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "text", + "content": " with a Softmax operation defined on all pairs: " + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "inline_equation", + "content": "\\bar{S} = \\text{Softmax}(S)" + }, + { + "bbox": [ + 104, + 164, + 506, + 195 + ], + "type": "text", + "content": ", and then compute the normalized" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "text", + "content": "pairwise interaction scores " + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "inline_equation", + "content": "e_p = \\sigma(\\tilde{s}_p) \\cdot \\bar{s}_p" + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "inline_equation", + "content": "\\bar{s}_p" + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "text", + "content": " is a row from " + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "inline_equation", + "content": "\\bar{S}" + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 104, + 195, + 506, + 228 + ], + "type": "text", + "content": " is Sigmoid function. This has the effect of measuring the contribution of a given pair, in case of multiple pairs in an image share the same interaction." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": "The final interaction score " + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "inline_equation", + "content": "s_{h,o}^{a}" + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": " for human-object pair " + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}_h,\\mathbf{x}_o)" + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": " combines multiple scores, including the global HOI scores " + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "inline_equation", + "content": "s_g" + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": ", the normalized pairwise interaction scores " + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "inline_equation", + "content": "e_p" + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": ", and the relatedness score " + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "inline_equation", + "content": "s_b" + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": ". The overall HOI score " + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "inline_equation", + "content": "R_{h,o}^{a}" + }, + { + "bbox": [ + 104, + 234, + 506, + 269 + ], + "type": "text", + "content": " is a combination of the interaction score and the object detection scores." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 194, + 275, + 505, + 291 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 275, + 505, + 291 + ], + "spans": [ + { + "bbox": [ + 194, + 275, + 505, + 291 + ], + "type": "interline_equation", + "content": "s _ {h, o} ^ {a} = \\sigma \\left(s _ {g} ^ {a, c _ {o}}\\right) \\cdot e _ {p} ^ {a} \\cdot \\sigma \\left(s _ {b}\\right); \\quad R _ {h, o} ^ {a} = \\left(s _ {h} \\cdot s _ {o}\\right) ^ {\\gamma} \\cdot s _ {h, o} ^ {a} \\tag {3}", + "image_path": "439dd52e29b854e3e7fa8bbfa471200fe572ae8f1a5804429cf1dc051c40e3c9.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "spans": [ + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "s_g^{a,c_o}" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": " is the HOI score corresponding to " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": "-th interaction and " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "c_o" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": "-th object category in " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "s_g" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "e_p^a" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": " is the score of " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": "-th interaction in " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "e_p" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 292, + 506, + 326 + ], + "type": "text", + "content": " is a hyper-parameter to balance the scores (Zhang et al., 2021c; Li et al., 2019b)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 331, + 294, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 294, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 294, + 342 + ], + "type": "text", + "content": "3.3 LEARNING WITH WEAK SUPERVISION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "text", + "content": "To train our deep network in a weakly supervised setting, we use a multi-task loss defined on three different levels. Specifically, our overall loss function " + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "text", + "content": " consists of three terms: i) an image-wise HOI recognition loss " + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_g" + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "text", + "content": " to adapt CLIP features to the task of human-object interaction detection; ii) a pairwise interaction classification loss " + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_p" + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "text", + "content": " to guide the knowledge transfer towards fine-grained relation-aware representations; and iii) a self-taught relatedness classification loss " + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_b" + }, + { + "bbox": [ + 104, + 350, + 506, + 417 + ], + "type": "text", + "content": " to prune non-interacting human-object combinations. Formally, the overall loss is written as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 263, + 422, + 505, + 435 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 422, + 505, + 435 + ], + "spans": [ + { + "bbox": [ + 263, + 422, + 505, + 435 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {g} + \\mathcal {L} _ {p} + \\mathcal {L} _ {b} \\tag {4}", + "image_path": "17ee4c4721f88734b1729db504ae9b2c78e22b394949c4d46d608228666095c8.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "content": "Image-wise HOI recognition loss " + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_g" + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "content": ": Given the HOI scores " + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "inline_equation", + "content": "s_g" + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "content": " and ground-truth HOI categories " + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_g" + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "content": " is a standard binary cross-entropy loss for multi-label classification: " + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_g = L_{BCE}(s_g, \\mathcal{R})" + }, + { + "bbox": [ + 104, + 435, + 504, + 460 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "content": "Pairwise interaction classification loss " + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_p" + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "content": ": We adopt a MIL strategy that first aggregates the pairwise interaction scores and supervises this with image-level interaction labels as " + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "inline_equation", + "content": "\\mathcal{A} = \\{a^*\\}" + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "content": ". Given the image-wise interaction scores " + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "inline_equation", + "content": "\\tilde{s}_p" + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_p" + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "content": " is a standard binary cross-entropy loss for multi-label classification as: " + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_p = L_{BCE}(\\tilde{s}_p, \\mathcal{A})" + }, + { + "bbox": [ + 104, + 464, + 506, + 510 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "text", + "content": "Self-taught relatedness classification loss " + }, + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_b" + }, + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "text", + "content": ": As human-object associations are not annotated, we devise a novel pseudo relatedness label generation mechanism for training a self-taught binary classifier to identify valid human-object associations. Specifically, we observe that the human-object pairs with confident interaction scores are often associated after a short period of initial training without self-taught classification loss. Motivated by this, we use the interaction scores " + }, + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "inline_equation", + "content": "s_p" + }, + { + "bbox": [ + 104, + 514, + 506, + 582 + ], + "type": "text", + "content": " from the model under training to supervise the relatedness classification." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": "Concretely, we generate pseudo labels " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "\\mathcal{B} = \\{b_1,\\dots,b_M\\}" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " for all human-object pairs in an image, where " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "b_{m}\\in \\{0,1\\}" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " indicates the relatedness for the " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " -th combination. To this end, as illustrated in Fig.2(b), we first propose a binary mask " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "Z\\in \\{0,1\\}^{M\\cdot A}" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " for all interaction scores " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " with respect to the ground-truth object categories " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "\\mathcal{C} = \\{c^*\\}" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ". For each human-object pair where the object label " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "c_{o}" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " is included in " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ", we consider it as a potential interactive combination and thus assign the corresponding row in " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " as 1, and other rows as 0. For the latter, we also immediately set " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "b_{m} = 0" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ". Then we generate pairwise scores " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "t^a\\in \\mathbb{R}^M" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " for each ground-truth interaction " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "a^*" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " by selecting the corresponding row from " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "S\\odot Z" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ". The pseudo label for the pair with the highest score is assigned as 1, i.e., " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "m_a = \\arg \\max_{m}t^a" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "b_{m_a} = 1" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ". We only select one positive pair3 for each " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "a^*" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ". Finally, " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_b" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " is defined as a binary cross-entropy loss: " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_b = \\sum_m L_{BCE}(s_b^m,b_m)" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "s_b^m" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " is the relatedness score for the " + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 586, + 507, + 720 + ], + "type": "text", + "content": " -th pair." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 117, + 720, + 293, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 293, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 293, + 732 + ], + "type": "text", + "content": "3We also explore top-K selection in Appendix F" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 130, + 504, + 280 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 504, + 130 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 504, + 130 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 504, + 130 + ], + "type": "text", + "content": "Table 1: mAP comparison on HICO-DET and V-COCO test set. - denotes the results are not available. * stands for the method we re-evaluate with the correct evaluation protocol (see Appendix.I for details) and †means our re-implementation. For V-COCO, all object detectors are pretrained on MSCOCO dataset by default, and details about the evaluation metrics APS1&2 c.f. Appendix H. IN-1K denotes ImageNet with 1000 classes." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 130, + 504, + 280 + ], + "lines": [ + { + "bbox": [ + 106, + 130, + 504, + 280 + ], + "spans": [ + { + "bbox": [ + 106, + 130, + 504, + 280 + ], + "type": "table", + "html": "
MethodsBackboneDetectorHICO-DET (%)V-COCO (%)
FullRareNon-RareAProleAProle
supervised
iCAN (Gao et al., 2018)RN50 (IN-1K&COCO)FRCNN (COCO)14.8410.4516.1545.3052.40
PMFNet (Wan et al., 2019)RN50-FPN (IN-1K&COCO)FRCNN (COCO)17.4615.5618.0052.00-
TIN (Li et al., 2019b)RN50-FPN (IN-1K&COCO)FRCNN (COCO)17.2213.5118.3247.8054.20
DJ-RN (Li et al., 2020a)RN50 (IN-1K&COCO)FRCNN (COCO)21.3418.5321.1853.3060.30
IDN (Li et al., 2020b)RN50 (IN-1K&COCO)FRCNN (HICO-DET)26.2922.6127.3953.3060.30
SCG (Zhang et al., 2021c)RN50-FPN (IN-1K&HICO-DET)FRCNN (HICO-DET)31.3324.7233.3154.2060.90
HOTR (Kim et al., 2021)RN50+Transformer (IN-1K&COCO)DETR (HICO-DET)25.1017.3427.4255.2064.40
QPIC (Tamura et al., 2021)RN101+Transformer (IN-1K&COCO)DETR (COCO)29.9023.9231.6958.3060.70
CATN (Dong et al., 2022)RN50+Transformer (IN-1K&HICO-DET&COCO)DETR (HICO-DET)31.8625.1533.8460.10-
MSTR (Kim et al., 2022)RN50 + Transformer (IN-1K&COCO)DETR(HICO-DET)31.1725.3133.9262.0065.20
DisTr (Zhou et al., 2022)RN50+Transformer (IN-1K&COCO)DETR (HICO-DET)31.7527.4533.0366.2068.50
SSRT (Iftekhar et al., 2022)R101+Transformer (IN-1K&COCO)DETR (COCO)31.3424.3133.3265.0067.10
GEN-VLKT (Liao et al., 2022)RN101+Transformer (IN-1K&HICO-DET)DETR (HICO-DET)34.9531.1836.0863.5865.93
between supervised & weakly-supervised setting, learning with image-level HOIs and box annotations
AlignFormer (Kilickaya & Smeulders, 2021)RN101+Transformer (IN-1K&HICO-DET)DETR (HICO-DET)20.8518.2321.6415.8216.34
weakly-supervised
Explanation-HOI* (Baldassarre et al., 2020)ResNeXt101 (IN-1K&COCO)FRCNN (COCO)10.638.7111.20--
MX-HOI (Kumaraswamy et al., 2021)RN101 (IN-1K&COCO)FRCNN (COCO)16.1412.0617.50--
PPR-FCN† (Zhang et al., 2017)RN50 (CLIP dataset)FRCNN (COCO)17.5515.6918.41--
oursRN50 (CLIP dataset)FRCNN (COCO)22.8922.4123.0342.9748.06
oursRN101 (CLIP dataset)FRCNN (COCO)25.7024.5226.0544.7449.97
", + "image_path": "1c8c230f9f7169d54dff5b1d6823b1233510ef4f79100b811857bb40bd219d38.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 292, + 201, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 292, + 201, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 292, + 201, + 303 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 313, + 230, + 324 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 313, + 230, + 324 + ], + "spans": [ + { + "bbox": [ + 105, + 313, + 230, + 324 + ], + "type": "text", + "content": "4.1 EXPERIMENTAL SETUP" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "text", + "content": "Datasets: We benchmark our model on two public datasets: HICO-DET and V-COCO. HICO-DET consists of 47776 images (38118 for training and 9658 for test). It has " + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "inline_equation", + "content": "N = 600" + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "text", + "content": " HOI categories, which are composed of " + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "inline_equation", + "content": "C = 80" + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "text", + "content": " common objects (the same as MSCOCO (Lin et al., 2014)) and " + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "inline_equation", + "content": "A = 117" + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "text", + "content": " unique interaction categories. V-COCO is a subset of MSCOCO, consisting of 2533 images for training, 2867 for validation and 4946 for test. It has 16199 human instances, each annotated with binary labels for " + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "inline_equation", + "content": "A = 26" + }, + { + "bbox": [ + 104, + 331, + 506, + 399 + ], + "type": "text", + "content": " interaction categories." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 403, + 504, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 504, + 449 + ], + "type": "text", + "content": "Evaluation Metric: Following (Chao et al., 2015), we use mean average precision (mAP) to evaluate HOI detection performance. A human-object pair is considered as positive when both predicted human and object boxes have at least 0.5 IoU with their ground-truth boxes, and the HOI class is classified correctly." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 456, + 250, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 250, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 250, + 467 + ], + "type": "text", + "content": "4.2 IMPLEMENTATION DETAILS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "type": "text", + "content": "We use an off-the-shelf Faster R-CNN (Ren et al., 2015) pretrained on MSCOCO to generate at most 100 object candidates for each image. For V-COCO, it is worth noting that we train the object detector by removing the images in MSCOCO that overlap with V-COCO to prevent information leakage. The backbone network is initialized with the visual encoder from CLIP-RN101 model and the feature dimension " + }, + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "type": "inline_equation", + "content": "D = 1024" + }, + { + "bbox": [ + 104, + 472, + 504, + 528 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 533, + 506, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 506, + 599 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 506, + 599 + ], + "type": "text", + "content": "For model learning, we set the detection score weight " + }, + { + "bbox": [ + 104, + 533, + 506, + 599 + ], + "type": "inline_equation", + "content": "\\gamma = 2.8" + }, + { + "bbox": [ + 104, + 533, + 506, + 599 + ], + "type": "text", + "content": " as default by following previous works (Zhang et al., 2021c; Li et al., 2019b), then optimize the entire network with AdamW and an initial learning rate of 1e-5 for backbone parameters and 1e-4 for others. We detach the parameters of the knowledge bank on the local branch for better model learning. We train up to 60K iterations with batch-size 24 in each on 4 NVIDIA 2080TI GPUs, and decay the learning rate by 10 times in 12K and 24K iteration." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 602, + 238, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 602, + 238, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 602, + 238, + 613 + ], + "type": "text", + "content": "4.3 QUANTITATIVE RESULTS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "For HICO-DET (Tab.1), our approach outperforms the previous state of the arts on the weakly supervised setting by a clear margin, achieving 22.89 mAP with ResNet-50 and 25.70 mAP with ResNet-101 as backbone. For a fair comparison, we also re-implement PPR-FCN with CLIP visual encoder. The results show that we still outperform PPR-FCN by a sizeable margin, which validates the superiority of our framework. Besides, we even perform comparably with HOTR and IDN under an inferior experimental setting where HOTR adopts a more advanced transformer encoder-decoder architecture, and both methods are trained with strong supervision. Furthermore, the mAP gap between Rare (training annotations " + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "inline_equation", + "content": "< 10" + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": ") and Non-rare HOI classes in our results is much smaller than other methods, demonstrating the superior generalization capability of our HOI representation for solving the long-tailed distribution issue. In detail, we achieve a 0.62 mAP gap with ResNet-50" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 126, + 130, + 485, + 236 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 506, + 130 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 506, + 130 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 506, + 130 + ], + "type": "text", + "content": "Table 2: Ablation study on HICO-DET dataset. \"RN50-FPN(COCO)\" denotes the backbone initialized with Faster R-CNN parameters pretrained on MSCOCO dataset while \"CLIP RN50\" stands for the backbone initialized with CLIP visual encoder. Besides, we construct the knowledge bank " + }, + { + "bbox": [ + 104, + 89, + 506, + 130 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 89, + 506, + 130 + ], + "type": "text", + "content": " with random initialization, or computing HOI prompts by RoBERTa or CLIP text transformer." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 126, + 130, + 485, + 236 + ], + "lines": [ + { + "bbox": [ + 126, + 130, + 485, + 236 + ], + "spans": [ + { + "bbox": [ + 126, + 130, + 485, + 236 + ], + "type": "table", + "html": "
MethodsParameter initializationCLIP KnowledgemAP (%)
Backboneknowledge bankHOI recognitionKTNscore fusionSRCFullRareNon-Rare
baselineCLIP RN50-----19.5216.5820.40
Exp 1CLIP RN50CLIP Text---20.3118.3420.90
Exp 2CLIP RN50CLIP Text✓ (freeze WT)---20.0918.2320.64
Exp 3CLIP RN50CLIP Text--20.8618.4021.60
Exp 4CLIP RN50CLIP Text-22.4020.7022.90
Exp 5CLIP RN50----19.8817.4520.61
Exp 6CLIP RN50CLIP Text--20.7519.3821.16
Exp 7CLIP RN50CLIP Text-21.5320.0521.97
oursCLIP RN50CLIP Text22.8922.4123.03
Exp 8RN50-FPN (COCO)-----19.4416.2020.41
Exp 9RN50-FPN (COCO)random19.6115.5720.82
Exp 10RN50-FPN (COCO)RoBERTa20.4516.4621.65
", + "image_path": "597049b964652d7583c17bbd2d3f5428634b215445100d84dfdc4765fda6c5f7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 257, + 504, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 257, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 257, + 504, + 281 + ], + "type": "text", + "content": "and 1.53 with ResNet-101 backbone, which is much smaller than AlignFormer (3.14) and PPR-FCN (2.64), and supervised methods SSRT (9.01) and GEN-VLKT (4.9)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "text", + "content": "For V-COCO dataset, we report the performance of " + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{role}" + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "text", + "content": " in both scenario1 and scenario2 for a complete comparison, which are 42.97 / 48.06 " + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{role}" + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "text", + "content": " with ResNet-50 and 44.74 / 49.97 " + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "inline_equation", + "content": "\\mathrm{AP}_{role}" + }, + { + "bbox": [ + 104, + 285, + 506, + 331 + ], + "type": "text", + "content": " with ResNet-101 as backbone. As shown in Tab.1, our model achieves significant improvement compared with AlignFormer, and even is comparable with supervised methods TIN and iCAN." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 334, + 211, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 334, + 211, + 344 + ], + "spans": [ + { + "bbox": [ + 105, + 334, + 211, + 344 + ], + "type": "text", + "content": "4.4 ABLATION STUDY" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 349, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 349, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 349, + 504, + 373 + ], + "type": "text", + "content": "In this section, we mainly validate the effectiveness of each component with detailed ablation studies on HICO-DET dataset. We use ResNet-50 as the backbone network to reduce experimental costs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "text", + "content": "**Baseline:** The baseline adopts the visual encoder from CLIP-RN50 to generate the vanilla HOI representation " + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "inline_equation", + "content": "v_{p}" + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "text", + "content": ", which is directly used to predict the interaction scores " + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "inline_equation", + "content": "s_{p}" + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "text", + "content": ". Only pairwise interaction classification loss " + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{p}" + }, + { + "bbox": [ + 104, + 380, + 505, + 415 + ], + "type": "text", + "content": " is used for model learning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "type": "text", + "content": "HOI recognition: We augment the baseline with a HOI recognition network and observe the full mAP improves from 19.52 to 20.41, as reported in Exp 1 of Tab. 2. It suggests that the learnable knowledge bank " + }, + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "type": "text", + "content": " serves as a powerful classifier to perform image-level HOI recognition and update the visual encoder for better HOI representation. We visualize the learned parameters of knowledge bank in Appendix D to demonstrate its effectiveness. Furthermore, as in Exp 2, the performance slightly decreases from 20.31 to 20.09 when we freeze the training of the knowledge bank, indicating that joint learning of visual features and the knowledge bank is more appropriate for HOI detection." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 518, + 506, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 518, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 518, + 506, + 575 + ], + "type": "text", + "content": "Knowledge Transfer Network (KTN): KTN explicitly transfers the CLIP meta-knowledge to pairwise HOI features. As a result, it contributes 0.55 Full mAP improvement (Exp 3 v.s. Exp 1) and most of the performance gains come from Non-rare classes. This result shows KTN is capable of extracting discriminative features from the relational knowledge bank to our HOI representation. We also study the effectiveness of the attention mechanism of KTN in Appendix E." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": "Score fusion: In Tab. 2, we largely improve the Full mAP from 20.86 (Exp 3) to 22.40 (Exp 4) by fusing the global HOI scores " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "s_g" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": " to pairwise interaction score " + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "inline_equation", + "content": "s_p" + }, + { + "bbox": [ + 104, + 582, + 506, + 639 + ], + "type": "text", + "content": ". As the HOI recognition network seamlessly inherits the visual-linguistic features from CLIP and directly adopts image labels as supervision, the global interaction scores are pretty accurate and largely enhance the pairwise scores, demonstrating its strong capabilities to cope with long-tailed and fine-grained HOI recognition." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 646, + 505, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 646, + 505, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 646, + 505, + 691 + ], + "type": "text", + "content": "Self-taught Relatedness Classification (SRC): Self-taught classification aims to identify the relatedness between human and objects. The improvements from Exp 4 to ours show the effectiveness of our self-taught strategy, which is capable of figuring out the irrelevant human-object pairs and suppressing their interaction scores during inference." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 734 + ], + "type": "text", + "content": "Combining KTN & SRC: The ablation results of Exp 5-7 in Tab. 2 show the KTN and SRC are able to facilitate each other. In detail, the SRC obtains 0.49 Full mAP improvement when the KTN is introduced (ours v.s. Exp 4), which is only 0.36 without KTN (Exp 5 v.s. baseline). Similarly," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 91, + 206, + 158 + ], + "blocks": [ + { + "bbox": [ + 152, + 81, + 163, + 89 + ], + "lines": [ + { + "bbox": [ + 152, + 81, + 163, + 89 + ], + "spans": [ + { + "bbox": [ + 152, + 81, + 163, + 89 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 107, + 91, + 206, + 158 + ], + "lines": [ + { + "bbox": [ + 107, + 91, + 206, + 158 + ], + "spans": [ + { + "bbox": [ + 107, + 91, + 206, + 158 + ], + "type": "image", + "image_path": "3c4dc5eb76fc4028aefbf775852ee762e247e291481c548a837a8639ba17ecdc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 109, + 159, + 149, + 177 + ], + "lines": [ + { + "bbox": [ + 109, + 159, + 149, + 177 + ], + "spans": [ + { + "bbox": [ + 109, + 159, + 149, + 177 + ], + "type": "text", + "content": "wash_motorcycle \nours: 0.18, 0.355 \nbaseline: 0.0189" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 208, + 80, + 305, + 159 + ], + "blocks": [ + { + "bbox": [ + 208, + 80, + 305, + 159 + ], + "lines": [ + { + "bbox": [ + 208, + 80, + 305, + 159 + ], + "spans": [ + { + "bbox": [ + 208, + 80, + 305, + 159 + ], + "type": "image", + "image_path": "64d18385f4e7e147826a9c9ded9896f1574f4ad51300b8e50043ecca7c12edd0.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 160, + 285, + 174 + ], + "lines": [ + { + "bbox": [ + 209, + 160, + 285, + 174 + ], + "spans": [ + { + "bbox": [ + 209, + 160, + 285, + 174 + ], + "type": "text", + "content": "hold_horse:0.062,0.397,0.998 ride_horse:0.405,0.966,0.998" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 307, + 89, + 403, + 159 + ], + "blocks": [ + { + "bbox": [ + 351, + 80, + 361, + 89 + ], + "lines": [ + { + "bbox": [ + 351, + 80, + 361, + 89 + ], + "spans": [ + { + "bbox": [ + 351, + 80, + 361, + 89 + ], + "type": "text", + "content": "(c)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 89, + 403, + 159 + ], + "lines": [ + { + "bbox": [ + 307, + 89, + 403, + 159 + ], + "spans": [ + { + "bbox": [ + 307, + 89, + 403, + 159 + ], + "type": "image", + "image_path": "9df9bcc5f1461c0846c4218ef4173e3faefd7d6edad70696dd7e72628870ebc0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 160, + 400, + 167 + ], + "lines": [ + { + "bbox": [ + 307, + 160, + 400, + 167 + ], + "spans": [ + { + "bbox": [ + 307, + 160, + 400, + 167 + ], + "type": "text", + "content": "sit_on_motorcycle: 0.515, 0.033, 0.950" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 406, + 89, + 503, + 159 + ], + "blocks": [ + { + "bbox": [ + 449, + 81, + 459, + 89 + ], + "lines": [ + { + "bbox": [ + 449, + 81, + 459, + 89 + ], + "spans": [ + { + "bbox": [ + 449, + 81, + 459, + 89 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 406, + 89, + 503, + 159 + ], + "lines": [ + { + "bbox": [ + 406, + 89, + 503, + 159 + ], + "spans": [ + { + "bbox": [ + 406, + 89, + 503, + 159 + ], + "type": "image", + "image_path": "3d058fc7e928eaacd2f473320fb34d4c09870479628f97693de6ed7388ecbfea.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 159, + 500, + 174 + ], + "lines": [ + { + "bbox": [ + 406, + 159, + 500, + 174 + ], + "spans": [ + { + "bbox": [ + 406, + 159, + 500, + 174 + ], + "type": "text", + "content": "sit_at_dining_table: 0.006, 0.993, 0.079 \nsit_at_dining_table: 0.232, 0.993, 0.994" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 108, + 178, + 206, + 247 + ], + "blocks": [ + { + "bbox": [ + 108, + 178, + 206, + 247 + ], + "lines": [ + { + "bbox": [ + 108, + 178, + 206, + 247 + ], + "spans": [ + { + "bbox": [ + 108, + 178, + 206, + 247 + ], + "type": "image", + "image_path": "31a3d121ff52f3113f31974f177930970ca7ec73ecd4e03889b2f475115f9c2c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 110, + 247, + 170, + 267 + ], + "lines": [ + { + "bbox": [ + 110, + 247, + 170, + 267 + ], + "spans": [ + { + "bbox": [ + 110, + 247, + 170, + 267 + ], + "type": "text", + "content": "paint_fire_hydrant: \nours: 0.203, 0.505, 0.955 \nbaseline: 0.0027" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 273, + 504, + 324 + ], + "lines": [ + { + "bbox": [ + 104, + 273, + 504, + 324 + ], + "spans": [ + { + "bbox": [ + 104, + 273, + 504, + 324 + ], + "type": "text", + "content": "Figure 3: Visualization of HOI detection results on HICO-DET test set. Red scores denote the negative HOI predictions. We mainly demonstrate the model's capabilities on four aspects: (a) coping with imbalanced HOI distribution; (b) distinguishing subtle differences among interaction types; (c) suppressing background HOI classes, and (d) pruning irrelevant human-object associations. The numbers reported are normalized pairwise interaction score, global HOI score and relatedness score." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 208, + 178, + 306, + 247 + ], + "blocks": [ + { + "bbox": [ + 208, + 178, + 306, + 247 + ], + "lines": [ + { + "bbox": [ + 208, + 178, + 306, + 247 + ], + "spans": [ + { + "bbox": [ + 208, + 178, + 306, + 247 + ], + "type": "image", + "image_path": "192708158f64b8c7105ece6aeedbf1e1b24fd0ec539a1403e13410348bb7f329.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 208, + 249, + 287, + 263 + ], + "lines": [ + { + "bbox": [ + 208, + 249, + 287, + 263 + ], + "spans": [ + { + "bbox": [ + 208, + 249, + 287, + 263 + ], + "type": "text", + "content": "repair truck: 0.23, 0.055, 0.979 \ninspect truck: 0.48, 0.138, 0.979" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 307, + 177, + 404, + 247 + ], + "blocks": [ + { + "bbox": [ + 307, + 177, + 404, + 247 + ], + "lines": [ + { + "bbox": [ + 307, + 177, + 404, + 247 + ], + "spans": [ + { + "bbox": [ + 307, + 177, + 404, + 247 + ], + "type": "image", + "image_path": "5579212847887188404267a0686fd5fb59b35064bcc9bf3ca9fa886fb6aa1cfd.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 251, + 404, + 258 + ], + "lines": [ + { + "bbox": [ + 307, + 251, + 404, + 258 + ], + "spans": [ + { + "bbox": [ + 307, + 251, + 404, + 258 + ], + "type": "text", + "content": "stand_on_skateboard: 0.009, 0.001, 0.98" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 406, + 178, + 503, + 247 + ], + "blocks": [ + { + "bbox": [ + 406, + 178, + 503, + 247 + ], + "lines": [ + { + "bbox": [ + 406, + 178, + 503, + 247 + ], + "spans": [ + { + "bbox": [ + 406, + 178, + 503, + 247 + ], + "type": "image", + "image_path": "acddb4ab24f2368acd17d4d546335723573c1d65e5ad4a799246606a28382d6a.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 408, + 250, + 479, + 264 + ], + "lines": [ + { + "bbox": [ + 408, + 250, + 479, + 264 + ], + "spans": [ + { + "bbox": [ + 408, + 250, + 479, + 264 + ], + "type": "text", + "content": "hold_kite:0.039,0.892,0.238 hold_kite:0.478,0.892,0.995" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 339, + 504, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 339, + 504, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 339, + 504, + 363 + ], + "type": "text", + "content": "the KTN contributes 0.78 Full mAP improvement with SRC (Exp 7 v.s. Exp 6), which is only 0.55 without SRC (Exp 3 v.s. Exp 1)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 372, + 506, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 372, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 506, + 461 + ], + "type": "text", + "content": "Parameter initialization: Our visual encoder and knowledge bank are both initialized from CLIP. We also explore different parameter initialization strategy in Exp 8-10. Specifically, we initialize the visual encoder with a ResNet50-FPN pretrained on COCO detection task for the baseline (Exp 8), and the knowledge bank with random parameters (Exp 9) or embeddings of HOI labels from RoBERTa model (Exp 10) for the final model. We observe severe drops with all these initialization methods compared with ours, demonstrating the effectiveness and generalization ability of CLIP model. It is worth noting that the mAP of Rare classes decreases from 16.20 in Exp 8 to 15.57 in Exp 9, which suggests the randomly initialized knowledge bank even aggravates the imbalance issue in final model." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 475, + 231, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 231, + 487 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 231, + 487 + ], + "type": "text", + "content": "4.5 QUALITATIVE RESULTS" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 494, + 506, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 494, + 506, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 494, + 506, + 616 + ], + "type": "text", + "content": "We show some qualitative results of our method in Fig.3. For each HOI prediction, we report (i) normalized pairwise interaction score, (ii) global HOI score and (iii) relatedness score for ours, and only pairwise interaction score for baseline. In Fig.3(a), ours interaction scores are more confident than baseline in Rare HOI classes, demonstrating the generalization ability of our CLIP-guided HOI representation. Besides, when incorporating relational knowledge bank into pairwise HOI representation, our method is capable of distinguishing the subtle differences among similar HOIs in Fig.3(b) (e.g., repair_truck:0.23 v.s. inspect_truck:0.48 in the bottom figure). Moreover, in Fig.3(c), the global branch suppresses background HOIs by predicting low global scores for them (e.g., the global HOI score is 0.033 for sit_onmotorcycle while the ground-truth is sit_on_bicycle). Finally, in Fig.3(d), our self-taught relatedness classification strategy shows strong capability at recognizing the ambiguous human-object associations (e.g., 0.079 v.s. 0.994 in the upper figure)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 105, + 630, + 195, + 642 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 630, + 195, + 642 + ], + "spans": [ + { + "bbox": [ + 105, + 630, + 195, + 642 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 733 + ], + "type": "text", + "content": "In this paper, we propose a bi-level knowledge integration strategy that incorporates the prior knowledge from CLIP for weakly-supervised HOI detection. Specifically, we exploit CLIP textual embeddings of HOI labels as a relational knowledge bank, which is adopted to enhance the HOI representation with an image-wise HOI recognition network and a pairwise knowledge transfer network. We further propose the addition of a self-taught binary pairwise relatedness classification loss to overcome ambiguous human-object association. Finally, our approach achieves the new state of the art on both HICO-DET and V-COCO benchmarks under the weakly supervised setting." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 220, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 220, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 220, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGEMENT" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 506, + 140 + ], + "type": "text", + "content": "We acknowledge funding from Flemish Government under the Onderzoeksprogramma Artificiele Intelligentie (AI) Vlaanderen programme, Shanghai Science and Technology Program 21010502700 and Shanghai Frontiers Science Center of Human-centered Artificial Intelligence." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 156, + 212, + 168 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 212, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 212, + 168 + ], + "type": "text", + "content": "ETHICS STATEMENT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 180, + 504, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 180, + 504, + 214 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 504, + 214 + ], + "type": "text", + "content": "Hereby, we consciously assure that our study is original work which has not been previously published elsewhere, and is not currently being considered for publication elsewhere. We do not have ethics risks as mentioned in the author guidelines." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 230, + 269, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 230, + 269, + 243 + ], + "spans": [ + { + "bbox": [ + 105, + 230, + 269, + 243 + ], + "type": "text", + "content": "REPRODUCIBILITY STATEMENT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 255, + 504, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 504, + 278 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 504, + 278 + ], + "type": "text", + "content": "We use publicly available benchmarks, HICO-DET and V-COCO, to validate our method. Code is available at https://github.com/bobwan1995/Weakly-HOI." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 293, + 176, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 176, + 306 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 176, + 306 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 312, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 106, + 312, + 505, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 312, + 505, + 335 + ], + "spans": [ + { + "bbox": [ + 106, + 312, + 505, + 335 + ], + "type": "text", + "content": "Federico Baldassarre, Kevin Smith, Josephine Sullivan, and Hossein Azizpour. Explanation-based weakly-supervised learning of visual relations with graph networks. In ECCV, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 341, + 505, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 341, + 505, + 365 + ], + "spans": [ + { + "bbox": [ + 107, + 341, + 505, + 365 + ], + "type": "text", + "content": "Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 371, + 506, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 371, + 506, + 395 + ], + "spans": [ + { + "bbox": [ + 107, + 371, + 506, + 395 + ], + "type": "text", + "content": "Yu-Wei Chao, Zhan Wang, Yugeng He, Jiaxuan Wang, and Jia Deng. HICO: A benchmark for recognizing human-object interactions in images. In ICCV, 2015." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 400, + 506, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 400, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 107, + 400, + 506, + 423 + ], + "type": "text", + "content": "Yu-Wei Chao, Yunfan Liu, Xieyang Liu, Huayi Zeng, and Jia Deng. Learning to detect human-object interactions. In WACV, 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 430, + 506, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 430, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 106, + 430, + 506, + 464 + ], + "type": "text", + "content": "Leizhen Dong, Zhimin Li, Kunlun Xu, Zhijun Zhang, Luxin Yan, Sheng Zhong, and Xu Zou. Category-aware transformer network for better human-object interaction detection. arXiv preprint arXiv:2204.04911, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "type": "text", + "content": "Yu Du, Fangyun Wei, Zihe Zhang, Miaojing Shi, Yue Gao, and Guoqi Li. Learning to prompt for open-vocabulary object detection with vision-language model. arXiv preprint arXiv:2203.14940, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 512, + 504, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 512, + 504, + 535 + ], + "spans": [ + { + "bbox": [ + 107, + 512, + 504, + 535 + ], + "type": "text", + "content": "Chen Gao, Yuliang Zou, and Jia-Bin Huang. ican: Instance-centric attention network for human-object interaction detection. In BMVC, 2018." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 541, + 504, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 541, + 504, + 564 + ], + "spans": [ + { + "bbox": [ + 107, + 541, + 504, + 564 + ], + "type": "text", + "content": "Chen Gao, Jiarui Xu, Yuliang Zou, and Jia-Bin Huang. Drg: Dual relation graph for human-object interaction detection. In ECCV, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 571, + 504, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 571, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 107, + 571, + 504, + 594 + ], + "type": "text", + "content": "Golnaz Ghiasi, Xiuye Gu, Yin Cui, and Tsung-Yi Lin. Open-vocabulary image segmentation. arXiv preprint arXiv:2112.12143, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 601, + 504, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 601, + 504, + 625 + ], + "spans": [ + { + "bbox": [ + 107, + 601, + 504, + 625 + ], + "type": "text", + "content": "Xiuye Gu, Tsung-Yi Lin, Weicheng Kuo, and Yin Cui. Open-vocabulary object detection via vision and language knowledge distillation. In ICLR, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 631, + 506, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 631, + 506, + 653 + ], + "spans": [ + { + "bbox": [ + 107, + 631, + 506, + 653 + ], + "type": "text", + "content": "Saurabh Gupta and Jitendra Malik. Visual semantic role labeling. arXiv preprint arXiv:1505.04474, 2015." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 660, + 504, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 660, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 107, + 660, + 504, + 684 + ], + "type": "text", + "content": "Tanmay Gupta, Alexander Schwing, and Derek Hoiem. No-frills human-object interaction detection: Factorization, layout encodings, and training techniques. In ICCV, 2019." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 107, + 690, + 504, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 690, + 504, + 714 + ], + "spans": [ + { + "bbox": [ + 107, + 690, + 504, + 714 + ], + "type": "text", + "content": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 107, + 719, + 501, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 719, + 501, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 719, + 501, + 732 + ], + "type": "text", + "content": "Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In ICCV2017, 2017." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "type": "text", + "content": "Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 961-970, 2015." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 506, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 506, + 158 + ], + "type": "text", + "content": "ASM Iftekhar, Hao Chen, Kaustav Kundu, Xinyu Li, Joseph Tighe, and Davide Modolo. What to look at and where: Semantic and spatial refined transformer for detecting human-object interactions. arXiv preprint arXiv:2204.00746, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 163, + 506, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 163, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 107, + 163, + 506, + 189 + ], + "type": "text", + "content": "Maximilian Ilse, Jakub Tomczak, and Max Welling. Attention-based deep multiple instance learning. In ICML, pp. 2127-2136, 2018." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 194, + 506, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 506, + 218 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 506, + 218 + ], + "type": "text", + "content": "Mert Kilickaya and Arnold Smeulders. Human-object interaction detection via weak supervision. arXiv preprint arXiv:2112.00492, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 107, + 225, + 504, + 250 + ], + "type": "text", + "content": "Bumsoo Kim, Junhyun Lee, Jaewoo Kang, Eun-Sol Kim, and Hyunwoo J. Kim. Hotr: End-to-end human-object interaction detection with transformers. In CVPR, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 256, + 506, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 256, + 506, + 289 + ], + "spans": [ + { + "bbox": [ + 107, + 256, + 506, + 289 + ], + "type": "text", + "content": "Bumsoo Kim, Jonghwan Mun, Kyoung-Woon On, Minchul Shin, Junhyun Lee, and Eun-Sol Kim. Mstr: Multi-scale transformer for end-to-end human-object interaction detection. arXiv preprint arXiv:2203.14709, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 297, + 504, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 297, + 504, + 320 + ], + "spans": [ + { + "bbox": [ + 107, + 297, + 504, + 320 + ], + "type": "text", + "content": "Suresh Kirthi Kumaraswamy, Miaojing Shi, and Ewa Kijak. Detecting human-object interaction with mixed supervision. In WACV, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 327, + 506, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 327, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 107, + 327, + 506, + 361 + ], + "type": "text", + "content": "Yong-Lu Li, Siyuan Zhou, Xijie Huang, Liang Xu, Ze Ma, Hao-Shu Fang, Yan-Feng Wang, and Cewu Lu. Transferable interactiveness prior for human-object interaction detection. In CVPR, 2019a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 369, + 506, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 369, + 506, + 403 + ], + "spans": [ + { + "bbox": [ + 107, + 369, + 506, + 403 + ], + "type": "text", + "content": "Yong-Lu Li, Siyuan Zhou, Xijie Huang, Liang Xu, Ze Ma, Hao-Shu Fang, Yanfeng Wang, and Cewu Lu. Transferable interactiveness knowledge for human-object interaction detection. In CVPR, 2019b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 410, + 504, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 410, + 504, + 434 + ], + "spans": [ + { + "bbox": [ + 107, + 410, + 504, + 434 + ], + "type": "text", + "content": "Yong-Lu Li, Xinpeng Liu, Han Lu, Shiyi Wang, Junqi Liu, Jiefeng Li, and Cewu Lu. Detailed 2d-3d joint representation for human-object interaction. In CVPR, 2020a." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 441, + 504, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 441, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 107, + 441, + 504, + 464 + ], + "type": "text", + "content": "Yong-Lu Li, Xinpeng Liu, Xiaogqian Wu, Yizhuo Li, and Cewu Lu. Hoi analysis: Integrating and decomposing human-object interaction. In NeurIPS, 2020b." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 107, + 471, + 506, + 505 + ], + "type": "text", + "content": "Yue Liao, Aixi Zhang, Miao Lu, Yongliang Wang, Xiaobo Li, and Si Liu. Gen-vlkt: Simplify association and enhance interaction understanding for hoi detection. arXiv preprint arXiv:2203.13954, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 512, + 504, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 512, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 107, + 512, + 504, + 536 + ], + "type": "text", + "content": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dóllár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 543, + 504, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 543, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 107, + 543, + 504, + 578 + ], + "type": "text", + "content": "Wen Liu, Weixin Luo, Dongze Lian, and Shenghua Gao Gao. Future frame prediction for anomaly detection - a new baseline. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 584, + 504, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 584, + 504, + 618 + ], + "spans": [ + { + "bbox": [ + 107, + 584, + 504, + 618 + ], + "type": "text", + "content": "Hitoshi Nishimura, Satoshi Komorita, Yasutomo Kawanishi, and Hiroshi Murase. Sdof-tracker: Fast and accurate multiple human tracking by skipped-detection and optical-flow. arXiv preprint arXiv:2106.14259, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 625, + 504, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 625, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 107, + 625, + 504, + 661 + ], + "type": "text", + "content": "Guansong Pang, Cheng Yan, Chunhua Shen, van den Hengel Anton, and Xiao Bai. Self-trained deep ordinal regression for end-to-end video anomaly detection. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2020." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 667, + 504, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 667, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 107, + 667, + 504, + 691 + ], + "type": "text", + "content": "Alessandro Prest, Cordelia Schmid, and Vittorio Ferrari. Weakly supervised learning of interactions between humans and objects. IEEE TPAMI, 2011." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 697, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 697, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 697, + 504, + 732 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021a." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 567 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 117 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021b." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 146 + ], + "type": "text", + "content": "Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. arXiv preprint arXiv:1506.01497, 2015." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 152, + 504, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 152, + 504, + 176 + ], + "spans": [ + { + "bbox": [ + 107, + 152, + 504, + 176 + ], + "type": "text", + "content": "Masato Tamura, Hiroki Ohashi, and Tomoaki Yoshinaga. Qpic: Query-based pairwise human-object interaction detection with image-wide contextual information. In CVPR, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 182, + 505, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 182, + 505, + 217 + ], + "spans": [ + { + "bbox": [ + 106, + 182, + 505, + 217 + ], + "type": "text", + "content": "Tina, Anmol Kumar Sharma, Siddharth Tomar, and Kapil Gupta. Various approaches of human activity recognition: A review. In International Conference on Computing Methodologies and Communication(ICCMC), 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 223, + 504, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 223, + 504, + 246 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 504, + 246 + ], + "type": "text", + "content": "Oytun Ulutan, A S M Iftekhar, and B. S. Manjunath. Vsgnet: Spatial attention network for detecting human object interactions using graph convolutions. In CVPR, 2020." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 253, + 504, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 253, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 107, + 253, + 504, + 277 + ], + "type": "text", + "content": "Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. JMLR, 2008. URL http://jmlr.org/papers/v9/vandermaaten08a.html." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 282, + 504, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 282, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 107, + 282, + 504, + 307 + ], + "type": "text", + "content": "Mrabti Wafae, Baibai Kaoutar, Bellach Benaissa, Oulad Haj Thami Rachid, and Tairi Hamid. Human motion tracking: A comparative study. Procedia Computer Science, 148:145-153, 2019." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 312, + 504, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 312, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 107, + 312, + 504, + 336 + ], + "type": "text", + "content": "Bo Wan, Desen Zhou, Yongfei Liu, Rongjie Li, and Xuming He. Pose-aware multi-level feature network for human object interaction detection. In ICCV, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 342, + 504, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 342, + 504, + 366 + ], + "spans": [ + { + "bbox": [ + 107, + 342, + 504, + 366 + ], + "type": "text", + "content": "Aixi Zhang, Yue Liao, Si Liu, Miao Lu, Yongliang Wang, Chen Gao, and Xiaobo Li. Mining the benefits of two-stage and one-stage hoi detection. NeuIPS, 2021a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 373, + 506, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 373, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 107, + 373, + 506, + 406 + ], + "type": "text", + "content": "Frederic Z Zhang, Dylan Campbell, and Stephen Gould. Efficient two-stage detection of human-object interactions with a novel unary-pairwise transformer. arXiv preprint arXiv:2112.01838, 2021b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 414, + 504, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 414, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 107, + 414, + 504, + 437 + ], + "type": "text", + "content": "Frederic Z Zhang, Dylan Campbell, and Stephen Gould. Spatially conditioned graphs for detecting human-object interactions. In ICCV, 2021c." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 443, + 504, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 443, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 107, + 443, + 504, + 467 + ], + "type": "text", + "content": "Hanwang Zhang, Zawlin Kyaw, Jinyang Yu, and Shih-Fu Chang. Ppr-fcn: Weakly supervised visual relation detection via parallel pairwise r-fcn. In ICCV, 2017." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 473, + 506, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 473, + 506, + 506 + ], + "spans": [ + { + "bbox": [ + 107, + 473, + 506, + 506 + ], + "type": "text", + "content": "Desen Zhou, Zhichao Liu, Jian Wang, Leshan Wang, Tao Hu, Errui Ding, and Jingdong Wang. Human-object interaction detection via disentangled transformer. arXiv preprint arXiv:2204.09290, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 514, + 504, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 514, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 107, + 514, + 504, + 537 + ], + "type": "text", + "content": "Penghao Zhou and Mingmin Chi. Relation parsing neural network for human-object interaction detection. In ICCV, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 544, + 504, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 544, + 504, + 567 + ], + "spans": [ + { + "bbox": [ + 107, + 544, + 504, + 567 + ], + "type": "text", + "content": "Tianfei Zhou, Wenguan Wang, Siyuan Qi, Haibin Ling, and Jianbing Shen. Cascaded human-object interaction recognition. In CVPR, 2020." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 163, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 163, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 163, + 94 + ], + "type": "text", + "content": "APPENDIX" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 163 + ], + "type": "text", + "content": "In this appendix, we first describe the spatial feature generation, and then supplement more experimental results of different CLIP knowledge integration strategies for weakly-supervised HOI detection. For Explanation-HOI (Baldassarre et al., 2020), we further clarify the difference between their mAP evaluation protocol and the standard one. Finally, we demonstrate the limitations, potential negative societal impacts as well as the result error bars of our method." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 179, + 382, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 179, + 382, + 192 + ], + "spans": [ + { + "bbox": [ + 105, + 179, + 382, + 192 + ], + "type": "text", + "content": "A THE ADVANTAGE OF OUR HOI REPRESENTATION" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 205, + 506, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 506, + 316 + ], + "type": "text", + "content": "To verify the improvement obtained with our CLIP-based HOI representation, we visualize the HOI representation " + }, + { + "bbox": [ + 104, + 205, + 506, + 316 + ], + "type": "inline_equation", + "content": "\\hat{v}_p" + }, + { + "bbox": [ + 104, + 205, + 506, + 316 + ], + "type": "text", + "content": " in feature space with t-SNE(van der Maaten & Hinton, 2008). For clarity, we randomly sample 80 HOI categories, and collect 50 samples for each category. For comparison, we also demonstrate the object-based HOI representation derived from 'Exp 9' in Tab.2 (i.e., the model without CLIP knowledge and using a random knowledge bank). As shown in Fig.4, we observe that CLIP-based HOI representations for different HOI categories are diverse and well separated in feature space, which is better for HOI detection. In contrast, the object-based representations are not well separated in feature space (see the red box region in Fig.4b). Besides, the experimental results in the ablation study (ours v.s. 'Exp 9') also validate the advantage of CLIP-based HOI representation, improving full mAP from 19.61 to 22.89." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 332, + 378, + 345 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 332, + 378, + 345 + ], + "spans": [ + { + "bbox": [ + 105, + 332, + 378, + 345 + ], + "type": "text", + "content": "B ABLATION ON CLIP KNOWLEDGE INTEGRATION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "text", + "content": "To further demonstrate the superiority of our CLIP knowledge integration strategy, we study several proven techniques for CLIP knowledge transfer in Tab. 3. In " + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "inline_equation", + "content": "Abl1" + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "text", + "content": ", for each human-object pair, we directly infer the HOI scores with CLIP by computing the cross-modal similarities between their visual union region and the HOI prompts. Without introducing any HOI priors, the promising results indicate the powerful generalization ability of CLIP and motivate the design of incorporating CLIP knowledge for weakly-supervised HOI detection. In " + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "inline_equation", + "content": "Abl2" + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "text", + "content": ", we duplicate the experiment setting and results from " + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "inline_equation", + "content": "Exp8" + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "text", + "content": " in Tab. 2 of the main paper. It is a simplified baseline model but initializes the visual encoder with a ResNet50-FPN pretrained on COCO detection task. Then we introduce three different CLIP knowledge transfer strategies (Abl 3-4 and ours) based on " + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "inline_equation", + "content": "Abl2" + }, + { + "bbox": [ + 104, + 357, + 506, + 457 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 462, + 504, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 462, + 504, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 462, + 504, + 485 + ], + "type": "text", + "content": "In Abl 3, we directly enhance baseline scores in Abl 2 with the CLIP similarity scores in Abl 1 on the inference stage. Without bells and whistles, we obtain 1.12 gain in Full mAP." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": "Furthermore, in Abl 4, we adopt a similar knowledge transfer strategy as GEN-VLKT (Liao et al., 2022), where we initialize the HOI classifier " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_P" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": " with HOI prompt and regularize the global HOI representation with CLIP image feature " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "v_{g}" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": ". In detail, we first compute the global HOI representation " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "v_{mean}" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": " with mean pooling on all pairwise HOI representations, i.e., " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "v_{mean} = MeanPool(\\{v_p^m\\}_{m=1}^M)" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": ". Here " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "v_p^m" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": " is the holistic HOI representation (c.f. Sec. 3.2.3 in the main paper) for " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": "-th human-object pair. Then we develop an additional " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "L2" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": " loss " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{reg}" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": " to transfer the knowledge from CLIP to HOI representations: " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{reg} = L2(v_{mean}, v_g)" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": ". The performance even decreases slightly from 19.44 to 19.39, which might be caused by the incompatibility of parameters between backbone network (ResNet50-FPN pretrained on COCO) and " + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_P" + }, + { + "bbox": [ + 104, + 490, + 507, + 628 + ], + "type": "text", + "content": " (HOI prompt embeddings from CLIP). When directly applying the knowledge transfer strategy of GEN-VLKT to a weakly-supervised setting, it is difficult to map the unmatched HOI representation and classification weights to a joint space as the supervisory signals are noisy." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 633, + 504, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 633, + 504, + 656 + ], + "spans": [ + { + "bbox": [ + 104, + 633, + 504, + 656 + ], + "type": "text", + "content": "Finally, our approach achieves the best performance compared with other strategies, demonstrating the effectiveness of our bi-level knowledge integration strategy." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 673, + 293, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 673, + 293, + 685 + ], + "spans": [ + { + "bbox": [ + 105, + 673, + 293, + 685 + ], + "type": "text", + "content": "C SPATIAL FEATURE GENERATION" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": "Following (Zhang et al., 2021c), we generate the spatial feature " + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "inline_equation", + "content": "v_{sp} \\in \\mathbb{R}^{D}" + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": " for each pair of human-object proposals " + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}_h, \\mathbf{x}_o)" + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": ". Specifically, we first compute the bounding boxes information for " + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_h" + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_o" + }, + { + "bbox": [ + 104, + 698, + 506, + 733 + ], + "type": "text", + "content": " separately, including their center coordinates, widths, heights, aspect ratios and areas, all" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 81, + 304, + 235 + ], + "blocks": [ + { + "bbox": [ + 107, + 81, + 304, + 235 + ], + "lines": [ + { + "bbox": [ + 107, + 81, + 304, + 235 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 304, + 235 + ], + "type": "image", + "image_path": "ec0d2877c25887ff4609d620312019b28f059c4f6a7a3f591043ace767320b71.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 248, + 506, + 270 + ], + "lines": [ + { + "bbox": [ + 104, + 248, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 248, + 506, + 270 + ], + "type": "text", + "content": "Figure 4: The t-SNE visualization of CLIP-based HOI representation and object-based HOI representation." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 306, + 81, + 503, + 235 + ], + "blocks": [ + { + "bbox": [ + 306, + 81, + 503, + 235 + ], + "lines": [ + { + "bbox": [ + 306, + 81, + 503, + 235 + ], + "spans": [ + { + "bbox": [ + 306, + 81, + 503, + 235 + ], + "type": "image", + "image_path": "9c33688555a1473b9211dc245403d73ceed296cf44e1d2b567f377ee0add3094.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 146, + 307, + 463, + 362 + ], + "blocks": [ + { + "bbox": [ + 136, + 296, + 473, + 307 + ], + "lines": [ + { + "bbox": [ + 136, + 296, + 473, + 307 + ], + "spans": [ + { + "bbox": [ + 136, + 296, + 473, + 307 + ], + "type": "text", + "content": "Table 3: Ablation of different CLIP knowledge integration strategies on HICO-DET dataset." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 146, + 307, + 463, + 362 + ], + "lines": [ + { + "bbox": [ + 146, + 307, + 463, + 362 + ], + "spans": [ + { + "bbox": [ + 146, + 307, + 463, + 362 + ], + "type": "table", + "html": "
MethodsExperimental settingmAP (%)
FullRareNon-Rare
Abl 1CLIP inference score11.8413.7211.27
Abl 2RN50-FPN (COCO) + FP random init.19.4416.2020.41
Abl 3RN50-FPN (COCO) + FP random init. + CLIP inference score20.5618.1921.27
Abl 4RN50-FPN (COCO) + FP HOI prompt init. + CLIP visual regularization19.3915.1220.66
oursCLIP RN50 + HOI recognition + KTN + self-taught relatedness cls.22.8922.4123.03
", + "image_path": "fe363dacb48d8989d586e83fb6f3326ad7831f10cb09fb80f80800b75a732ddf.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": "normalized by the corresponding dimension of the image. We also encode their relative spatial relations by estimating the intersection over union (IoU), a ratio of the area of " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_h" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_o" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": ", a directional encoding and the distance between center coordinates of " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_h" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_o" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": ". We concatenate all the above-mentioned preliminary spatial cues and obtain a spatial encoding " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathbf{p} \\in \\mathbb{R}_{+}^{18}" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": ". To encode the second and higher order combinations of different terms, the spatial encoding is concatenated with its logarithm and then embedded to " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "v_{sp}" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "v_{sp} = \\mathcal{F}_{sp}([p; \\log(p + \\epsilon)])" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": ". Where " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\epsilon > 0" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": " is a small constant to guarantee the numerical stability, and " + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{sp}" + }, + { + "bbox": [ + 104, + 390, + 506, + 468 + ], + "type": "text", + "content": " is a multi-layer fully connected network." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 492, + 379, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 379, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 379, + 506 + ], + "type": "text", + "content": "D VISUALIZATION OF HOI KNOWLEDGE BANK " + }, + { + "bbox": [ + 105, + 492, + 379, + 506 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 522, + 506, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 522, + 506, + 579 + ], + "spans": [ + { + "bbox": [ + 104, + 522, + 506, + 579 + ], + "type": "text", + "content": "To further understand " + }, + { + "bbox": [ + 104, + 522, + 506, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 522, + 506, + 579 + ], + "type": "text", + "content": ", we visualize the knowledge bank features initialized by CLIP (Fig.5(a)) and learned from scratch (Fig.5(b)) in feature space by t-SNE. It is worth noting that the knowledge bank learned from scratch is derived from 'Exp 9' in Tab.2. As shown in Fig.5, we observe that the knowledge features of HOI classes initialized with CLIP are more discriminative than random initialized, and show a better clustering result (e.g. the HOI classes in red box regions)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 602, + 279, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 602, + 279, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 602, + 279, + 615 + ], + "type": "text", + "content": "E DIFFERENT DESIGNS OF KTN" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 504, + 733 + ], + "type": "text", + "content": "To further validate the effectiveness of our attention mechanism in KTN, we compare our design with some variants in Tab. 4. First of all, we directly encode the relation-level features within the union region to enhance the pairwise representation rather than the external knowledge bank. As a result, the mAP even decreases a little bit from 20.75 (Exp 6) to 20.69 (Exp 11). The potential reason is that the union region contains more ambiguous visual relations and background clutters, which are difficult to learn in a weak setting. Besides, we also explore different normalization strategies in KTN. The results in Tab. 4 demonstrate that Softmax operation (ours) performs better than uniform attention (Exp 12) or Sigmoid operation (Exp 13), indicating our attention mechanism is non-trivial and more effective on aggregating the relational cues from HOI knowledge bank." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 82, + 503, + 387 + ], + "blocks": [ + { + "bbox": [ + 107, + 82, + 503, + 387 + ], + "lines": [ + { + "bbox": [ + 107, + 82, + 503, + 387 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 503, + 387 + ], + "type": "image", + "image_path": "2e8f35e8efe169ddf8e8f37e0ef00a2cb7d7406a4aefb85cf71fe3778f5da5f5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 397, + 504, + 431 + ], + "lines": [ + { + "bbox": [ + 104, + 397, + 504, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 397, + 504, + 431 + ], + "type": "text", + "content": "Figure 5: The t-SNE visualization of knowledge bank " + }, + { + "bbox": [ + 104, + 397, + 504, + 431 + ], + "type": "inline_equation", + "content": "\\mathcal{W}_T" + }, + { + "bbox": [ + 104, + 397, + 504, + 431 + ], + "type": "text", + "content": ". (a) is the knowledge bank distribution in feature space based on our CLIP-based HOI representation while (b) is the knowledge bank learned from scratch (the model in Tab.2-Exp 9) based on object-based HOI representation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 126, + 462, + 484, + 509 + ], + "blocks": [ + { + "bbox": [ + 167, + 452, + 442, + 462 + ], + "lines": [ + { + "bbox": [ + 167, + 452, + 442, + 462 + ], + "spans": [ + { + "bbox": [ + 167, + 452, + 442, + 462 + ], + "type": "text", + "content": "Table 4: Different network design of Knowledge Transfer Network (KTN)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 126, + 462, + 484, + 509 + ], + "lines": [ + { + "bbox": [ + 126, + 462, + 484, + 509 + ], + "spans": [ + { + "bbox": [ + 126, + 462, + 484, + 509 + ], + "type": "table", + "html": "
MethodsParameter initializationCLIP KnowledgemAP (%)
Backboneknowledge bankHOI recognitionKTNscore fusionSRCFullRareNon-Rare
Exp 11CLIP RN50CLIP Text✓ (union)-20.6919.5521.04
Exp 12CLIP RN50CLIP Text✓ (uniform)-21.1419.8221.53
Exp 13CLIP RN50CLIP Text✓ (sigmoid)-21.2819.2721.88
oursCLIP RN50CLIP Text-21.5320.0521.97
", + "image_path": "7bd051eb2407505f5d0e91ad521730a1e01709b3b353c897c1f3b46d360b5784.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 522, + 350, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 522, + 350, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 350, + 534 + ], + "type": "text", + "content": "F TOP-K POSITIVE PAIR SELECTION FOR SRC" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 548, + 504, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 504, + 581 + ], + "type": "text", + "content": "In this section we show the results of selecting top-2 and top-5 pairs as positive in Tab. 5. We notice that there is a small performance drop, which is likely to be caused by mislabeling more negative pairs as positive, resulting in model learning with more noise." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 597, + 342, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 342, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 342, + 609 + ], + "type": "text", + "content": "G THE PROMPT GENERATION FOR V-COCO" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 623, + 504, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 623, + 504, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 623, + 504, + 667 + ], + "type": "text", + "content": "For the V-COCO dataset, each action has two different semantic roles ('instrument' and 'object') for different objects, like 'cut cake' and 'cut with knife'. We use two different prompt templates to convert a HOI label to a language sentence. For the former one, we take template \"a person verb a/an object\", and use \"a person verb with object\" for the latter." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 684, + 316, + 696 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 684, + 316, + 696 + ], + "spans": [ + { + "bbox": [ + 105, + 684, + 316, + 696 + ], + "type": "text", + "content": "H EVALUATION METRIC FOR V-COCO" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 709, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 731 + ], + "type": "text", + "content": "V-COCO dataset has two scenarios for role AP evaluation. In Tab. 1, APS1&2 refer to 'Average Precision in scenario 1&2'. V-COCO dataset has two different annotations for HOIs: the first is a" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 236, + 100, + 375, + 148 + ], + "blocks": [ + { + "bbox": [ + 154, + 89, + 455, + 100 + ], + "lines": [ + { + "bbox": [ + 154, + 89, + 455, + 100 + ], + "spans": [ + { + "bbox": [ + 154, + 89, + 455, + 100 + ], + "type": "text", + "content": "Table 5: Ablation of top-K positive pair selection for SRC on HICO-DET dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 236, + 100, + 375, + 148 + ], + "lines": [ + { + "bbox": [ + 236, + 100, + 375, + 148 + ], + "spans": [ + { + "bbox": [ + 236, + 100, + 375, + 148 + ], + "type": "table", + "html": "
MethodsmAP (%)
FullRareNon-Rare
Top-522.4521.6122.70
Top-222.4921.8322.69
ours (Top-1)22.8922.4123.03
", + "image_path": "ea948175792d2ed151c0ca1aee49e56a4e3c9c26bc08bbdd021b1dbab82c8ad6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 108, + 159, + 310, + 340 + ], + "blocks": [ + { + "bbox": [ + 108, + 159, + 310, + 340 + ], + "lines": [ + { + "bbox": [ + 108, + 159, + 310, + 340 + ], + "spans": [ + { + "bbox": [ + 108, + 159, + 310, + 340 + ], + "type": "image", + "image_path": "5ea9aca00229807cfcbed7f05dd2e53f142325eae021f722c0c643c640bf02ee.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 342, + 289, + 352 + ], + "lines": [ + { + "bbox": [ + 137, + 342, + 289, + 352 + ], + "spans": [ + { + "bbox": [ + 137, + 342, + 289, + 352 + ], + "type": "text", + "content": "(a) Evaluation protocol in Explanation-HOI" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 310, + 159, + 502, + 340 + ], + "blocks": [ + { + "bbox": [ + 310, + 159, + 502, + 340 + ], + "lines": [ + { + "bbox": [ + 310, + 159, + 502, + 340 + ], + "spans": [ + { + "bbox": [ + 310, + 159, + 502, + 340 + ], + "type": "image", + "image_path": "b1ffa02461492b359ad767aae489aa3222ac27a003e81f6fd5b1f59f8392a3fe.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 346, + 342, + 473, + 351 + ], + "lines": [ + { + "bbox": [ + 346, + 342, + 473, + 351 + ], + "spans": [ + { + "bbox": [ + 346, + 342, + 473, + 351 + ], + "type": "text", + "content": "(b) The correct evaluation protocol" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 363, + 504, + 398 + ], + "lines": [ + { + "bbox": [ + 104, + 363, + 504, + 398 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 504, + 398 + ], + "type": "text", + "content": "Figure 6: The screenshot of the evaluation code in Explanation-HOI. (a) is the original code while (b) is the correct one based on the standard evaluation code. We use red rectangle boxes to highlight the most important differences" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "text", + "content": "full label of (human location, interaction type, object location, object type), and the second misses target object (also denoted as 'role' in the original paper (Gupta & Malik, 2015)) annotations, and the label only includes (human location, interaction type). For the second case, there are two different evaluation protocols (scenarios) when taking a prediction as correct " + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "inline_equation", + "content": "^4" + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "text", + "content": ": In scenario 1, it requires the interaction is correct & the overlap between the human boxes is " + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "inline_equation", + "content": "> 0.5" + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "text", + "content": " & the corresponding role is empty, which is more restricted; in scenario 2, it only requires the interaction is correct & the overlap between the person boxes is " + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "inline_equation", + "content": "> 0.5" + }, + { + "bbox": [ + 104, + 418, + 504, + 495 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 512, + 312, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 512, + 312, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 512, + 312, + 525 + ], + "type": "text", + "content": "I EVALUATION OF EXPLANATION-HOI" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 537, + 504, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 537, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 504, + 616 + ], + "type": "text", + "content": "The Explanation-HOI (Baldassarre et al., 2020) has a misunderstanding of mAP evaluation protocol. As shown in Fig.6(a) L200-L205, the Explanation-HOI only takes some specific predicted HOIs into the evaluation process, which has the same HOI labels as groundtruth HOIs. Thus, they ignore lots of false-positive HOI predictions when calculating mAP, leading to an untrustable high mAP score (reported in their original paper). In Fig.6(b) L204-L208, we evaluate all predicted HOIs, which is the same as the standard evaluation protocol proposed in HICO-DET (Chao et al., 2015). The correct results have already been reported in Tab.1 in the main paper." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 631, + 193, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 631, + 193, + 643 + ], + "spans": [ + { + "bbox": [ + 105, + 631, + 193, + 643 + ], + "type": "text", + "content": "J LIMITATIONS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 656, + 504, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 504, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 504, + 712 + ], + "type": "text", + "content": "As described in Sec. 3.1, we adopt an external object detector to generate human-object proposals and then recognize their interactions. Consequently, our method is faced with two limitations brought by erroneous object detection results. Firstly, the positive human-object pairs are not recalled if the human or object proposals are not detected. Secondly, the proposals are kept fixed during learning, which leads to the problem of inaccurate localization and object types." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 246, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 246, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 246, + 732 + ], + "type": "text", + "content": "4https://github.com/s-gupta/v-coco" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 239, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 239, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 239, + 94 + ], + "type": "text", + "content": "K RISK OF USING CLIP" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "text", + "content": "For all the methods that adopt CLIP in their model design, there is a potential risk of data leakage as CLIP has seen quite a lot of data during pretraining. For HOI detection task, we cannot get access to CLIP dataset and do not know the exact overlap between CLIP and HOI benchmarks (i.e., HICO-DET and V-COCO), we carefully read Sec. 5 (Data Overlap Analysis) of the CLIP paper (Radford et al., 2021b), including an analysis of the overlap between its dataset with 35 popular datasets (HICO-DET and V-COCO are not included). It shows the overlap is small (median is " + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "inline_equation", + "content": "2.2\\%" + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "text", + "content": " and average is " + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "inline_equation", + "content": "3.2\\%" + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "text", + "content": ") and the influence is limited (\"overall accuracy is rarely shifted by more than " + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "inline_equation", + "content": "0.1\\%" + }, + { + "bbox": [ + 104, + 106, + 506, + 205 + ], + "type": "text", + "content": " with only 7 datasets above this threshold\"). Besides, the training text accompanying an image in the CLIP dataset is often not related to the HOI annotations. Thus, we think the risk is limited." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 221, + 174, + 233 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 221, + 174, + 233 + ], + "spans": [ + { + "bbox": [ + 105, + 221, + 174, + 233 + ], + "type": "text", + "content": "L LICENSE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 246, + 507, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 246, + 507, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 246, + 507, + 281 + ], + "type": "text", + "content": "The licenses of the assets used in our work are listed below, including open-sourced CLIP model, HICO-DET dataset, and V-COCO dataset. As for HICO-DET, we cannot find its license in the paper and the official project page. Thus we provide the official project page instead here for clarity." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 129, + 289, + 425, + 330 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 129, + 289, + 351, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 289, + 351, + 300 + ], + "spans": [ + { + "bbox": [ + 129, + 289, + 351, + 300 + ], + "type": "text", + "content": "1. CLIP: https://github.com/openai/CLIP MIT License" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 129, + 304, + 375, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 304, + 375, + 315 + ], + "spans": [ + { + "bbox": [ + 129, + 304, + 375, + 315 + ], + "type": "text", + "content": "2. VCOCO: https://github.com/s-gupta/v-coco/MIT License" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 129, + 319, + 425, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 319, + 425, + 330 + ], + "spans": [ + { + "bbox": [ + 129, + 319, + 425, + 330 + ], + "type": "text", + "content": "3. HICO-DET: http://www-personal.umich.edu/ ywchao/hico/ No license" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2023" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file